\documentclass[11pt,twoside,reqno,nonamelimits]{article}
\usepackage{e-jc}
\usepackage[colorlinks=true,citecolor=black,linkcolor=black,urlcolor=blue]{hyperref}

% \usepackage[pagebackref,colorlinks=true,urlcolor=blue,linkcolor=blue,citecolor=blue]{hyperref}
% \usepackage{hyperref}
% \usepackage{fullpage}
% \usepackage[top=1.65in,bottom=1.7in,left=1.5in,right=1.5in]{geometry}
%\usepackage[top=1.4in,bottom=1.4in,left=1.3in,right=1.3in]{geometry}

% \usepackage[top=1.4in,bottom=1.2in,left=1.2in,right=1.2in]{geometry}
%% \usepackage{makeidx}
%% \usepackage{theorem}
%% \usepackage{array}

%\usepackage{mathptmx}        % PSNFSS
%\usepackage{bookman}         % PSNFSS
%\usepackage{newcent}         % PSNFSS

\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{amscd}
\usepackage{amsthm}
\usepackage{amsxtra}
% \usepackage{stmaryrd}
%\usepackage[all,pdf]{xy}  \CompileMatrices     % when generatind PDF files
% \usepackage[all    ]{xy}  \CompileMatrices     % otherwise
% \usepackage{eucal}
\usepackage{mathrsfs}
\usepackage{nicefrac}
\usepackage{bbold}
%% \usepackage{bm}
\usepackage{graphicx}
\usepackage{xcolor}
% \usepackage[raggedright,IT,hang]{subfigure}
\usepackage{enumitem}
\usepackage{wrapfig}
% \usepackage[normalem]{ulem}
% \usepackage{url}

\usepackage[norelsize,boxed,noend,linesnumbered]{algorithm2e}\DontPrintSemicolon
% \RestyleAlgo{ruled}
% 'norelsize' takes care of incompatibility with amsart in old version of algorithm2e.
% Remove it once the new version is on the system
% \begin{algorithm}[htbp OR H]
%   Commands: \TitleOfAlgo{} \For{}{} \Foreach{}{} \If{}{} \eIf{}{}{} \EsleIf{}{} \While{}{} \Return{}

%\usepackage[bf]{titlesec}
% \renewcommand{\thesection}{\Roman{section}}
% \titleformat{\section}{\Large\sc\bfseries}{\thesection.}{1em}{}
% \titleformat{\subsection}{\bfseries}{\thesubsection.}{1em}{}
% \titleformat{\subsubsection}{\sl}{\thesubsubsection.}{1em}{}
% \titleformat{\paragraph}{}{\theparagraph}{}{}
% \titleformat{\appendix}{}{}{}{blah}
% \titleformat{\subparagraph}{}{}{0pt}{}


% tex/defs.tex

\theoremstyle{plain}
\newtheorem{theorem}{Theorem}
\newtheorem*{theorem*}{Theorem}

\newtheorem{proposition}[theorem]{Proposition}
\newtheorem*{proposition*}{Proposition}

\newtheorem{corollary}[theorem]{Corollary}
\newtheorem*{corollary*}{Corollary}

\newtheorem{lemma}[theorem]{Lemma}
\newtheorem*{lemma*}{Lemma}
\newtheorem{observation}[theorem]{Observation}
\newtheorem*{observation*}{Observation}

\newtheorem*{conjecture*}{Conjecture}
\newtheorem{conjecture}[theorem]{Conjecture}

\newtheorem*{question*}{Question}
\newtheorem{question}[theorem]{Question}
\newtheorem*{questions*}{Questions}
\newtheorem{questions}[theorem]{Questions}

\newtheorem*{problem*}{Problem}
\newtheorem{problem}[theorem]{Problem}
\newtheorem*{problems*}{Problems}
\newtheorem{problems}[theorem]{Problems}

\newtheorem*{openproblem*}{Open Problem}
\newtheorem{openproblem}[theorem]{Open Problem}


\newcommand{\Claim}[1]{\textit{Claim #1}}

%\numberwithin{equation}{section}

\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{exercise}[theorem]{Exercise}
\newtheorem*{exercise*}{Exercise}

\theoremstyle{remark}
%\newtheorem{algorithm}[theorem]{Algorithm}
\newtheorem{remark}[theorem]{Remark}
\newtheorem*{remark*}{Remark}
\newtheorem{remarks}[theorem]{Remarks}
\newtheorem*{remarks*}{Remarks}
\newtheorem{claim}[theorem]{Claim}
\newtheorem*{claim*}{Claim}
\newtheorem{fact}[theorem]{Fact}

\newcommand{\extraqed}{}
\newcommand{\authorstwo}[4]{\author[#1 and #3]{#1\\#2\\and\\#3\\#4}}
\newcommand{\titlewsub}[2]{\title[#1]{#1: #2}}
\newcommand{\subclass}[1]{}


\newenvironment{rems}{\begin{remsTh}~\\[-\baselineskip]
\begin{enumerate}\enumTi{\arabic{enumi}.}}
{\end{enumerate}\end{remsTh}}

\DeclareMathOperator{\grad}{grad}


\newcommand{\enumTi}[1]{\renewcommand{\theenumi}{#1}}
\newcommand{\enumTii}[1]{\renewcommand{\theenumii}{#1}}
\newcommand{\enumTiii}[1]{\renewcommand{\theenumiii}{#1}}
\newcommand{\enumTiv}[1]{\renewcommand{\theenumiv}{#1}}

\newcommand{\alphenumi}{\enumTi{\alph{enumi}}}
\newcommand{\Alphenumi}{\enumTi{\Alph{enumi}}}
\newcommand{\romenumi}{\enumTi{\roman{enumi}}}
\newcommand{\itromenumi}{\enumTi{\textit{\roman{enumi}}}}
\newenvironment{aenumeratei}{%
  \begin{enumerate}\alphenumi}{%
  \end{enumerate}}
\newenvironment{renumeratei}{%
  \begin{enumerate}\romenumi}{%
  \end{enumerate}}

% thesis/defs.common.tex

\newcommand{\from}{\leftarrow}
\newcommand{\downto}{\searrow}
\newcommand{\upto}{\nearrow}
\newcommand{\aand}{\mathrel\&}

\newcommand{\Frowney}{${{}_{.\,.}}\above0pt{}^{\frown}$}
\newcommand{\Smiley}{{\ensuremath{{{}_{.\,.}}\above0pt{}^{\smile}}}}

\newlength{\hspaceforlengthglumpf}
\newcommand{\hsfor}[1]{\settowidth{\hspaceforlengthglumpf}{#1}\hspace{\hspaceforlengthglumpf}}
\newcommand{\hsforminus}[1]{\settowidth{\hspaceforlengthglumpf}{#1}\hspace{-\hspaceforlengthglumpf}}

\newcommand{\onespace}{\mspace{1mu}}


\newcommand{\stepnr}[1]{{\footnotesize #1}}

\newcommand{\stbox}[2]{\text{\tiny\parbox{#1}{\centering\baselineskip=0.5 \baselineskip #2}}}

\newcommand{\ubtxt}[3]{\underbrace{#1}_{\stbox{#2}{#3}}}
\newcommand{\obtxt}[3]{\overbrace{#1}^{\stbox{#2}{#3}}}
\newcommand{\utxt}[3]{\mathop{#1}\limits_{\stbox{#2}{#3}}}

\renewcommand{\em}{\sl}

\newcommand{\comment}[1]{\text{\footnotesize[#1]}}


\newcommand{\case}[1]{\par\smallskip\noindent\textit{#1}}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% math relations & Operators

\newcommand{\approxcmmt}[1]{\mathrel{\mathop\approx\limits_{\stbox{1.5cm}{#1}}}}
\newcommand{\eqcmmt}[1]{\mathrel{\mathop=\limits_{\stbox{1.5cm}{#1}}}}
\newcommand{\lecmmt}[1]{\mathrel{\mathop\le\limits_{\stbox{1.5cm}{#1}}}}
\newcommand{\gecmmt}[1]{\mathrel{\mathop\ge\limits_{\stbox{1.5cm}{#1}}}}
\newcommand{\eqcomment}[2]{\mathrel{\mathop=\limits_{\stbox{#1}{#2}}}}
\newcommand{\eqshrtcmmt}[1]{\mathrel{\mathop=\limits_{#1}}}

\newcommand{\eqcmt}[1]{\mathrel{\mathop=\limits_{#1}}}
\newcommand{\lecmt}[1]{\mathrel{\mathop\le\limits_{#1}}}
\newcommand{\gecmt}[1]{\mathrel{\mathop\ge\limits_{#1}}}
\newcommand{\lesssimcmt}[1]{\mathrel{\mathop\lesssim\limits_{#1}}}
\newcommand{\gtrsimcmt}[1]{\mathrel{\mathop\gtrsim\limits_{#1}}}

\newcommand{\musteq}{\stackrel{!}{=}}
\newcommand{\questioneq}{\stackrel{?}{=}}


\newcommand{\then}{\mathrel{\mathop{\;\Rightarrow\;}}}
\newcommand{\into}{\hookrightarrow}
\newcommand{\onto}{\twoheadrightarrow}
\newcommand{\leply}{\mathrel{\le_p}}

\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\ord}{ord}
\DeclareMathOperator{\dist}{dist}
\DeclareMathOperator{\diam}{diam}
\DeclareMathOperator{\divg}{div}
\DeclareMathOperator{\rot}{rot}
\DeclareMathOperator{\relint}{relint}
\DeclareMathOperator{\defect}{def}
\DeclareMathOperator{\codim}{codim}
\DeclareMathOperator{\sgn}{sgn}
\DeclareMathOperator{\vrt}{vert}
\DeclareMathOperator{\supp}{supp}
\DeclareMathOperator{\tr}{tr}
\DeclareMathOperator{\polylog}{polylog}
\DeclareMathOperator{\lnln}{lnln}
%\DeclareMathOperator{\Res}{Res}

\renewcommand{\div}{\divg}
\newcommand{\Id}{\mathrm{Id}}


\DeclareMathOperator{\Img}{Im}
\DeclareMathOperator{\img}{im}

\DeclareMathOperator{\lin}{lin}
\DeclareMathOperator{\spn}{span}
\DeclareMathOperator{\aff}{aff}
\DeclareMathOperator{\cvxhull}{cvxhull}
\DeclareMathOperator{\cvxcone}{cvxcone}
\DeclareMathOperator{\rk}{rk}
\DeclareMathOperator{\vol}{vol}

\DeclareMathOperator{\Real}{Re}
\DeclareMathOperator{\Imag}{Im}

\newcommand{\dg}{d}

\newcommand{\prt}{\partial}
\newcommand{\bd}{\partial}
\newcommand{\Lapl}{\Delta}
\newcommand{\orth}{\bot}
\newcommand{\cb}[1]{{\partial#1}}
\newcommand{\cbs}[2]{{\partial_{#1}#2}}


\newcommand{\GL}{\mathit{GL}}
\newcommand{\SL}{\mathit{SL}}
%\newcommand{\GFtwo}{\mathbb{Z}_2}
\newcommand{\GFtwo}{{\{0,1\}}}
\newcommand{\One}{\mathbf{1}}
\newcommand{\Zero}{\mathbf{0}}
\newcommand{\place}{{\text{\footnotesize$\oblong$}}}

\newcommand{\Pow}[1]{{\mathbf 2^{#1}}}

\newcommand{\lt}{\left}
\newcommand{\rt}{\right}
\newcommand{\lft}{\bigl}
\newcommand{\rgt}{\bigr}
\newcommand{\Lft}{\Bigl}
\newcommand{\Rgt}{\Bigr}

\newcommand{\Tp}{{\mspace{-1mu}\scriptscriptstyle\top\mspace{-1mu}}}
\newcommand{\polar}{\vartriangle}
\newcommand{\poOps}{\Diamond}
\newcommand{\blOps}{\sharp}

\newcommand{\lmit}{\;\;\vrule\;\;}
\newcommand{\Mit}{\;\;\vrule\;\;}


\newcommand{\mIt}[1]{{\mathit{#1}}}

% \newcommand{\Mod}{\!\big/\!}
% \newcommand{\MOd}{\!\Big/\!}

%%\newcommand{\cupdj}{\stackrel{.}{\cup}}
%\DeclareMathOperator*{\bigcupdj}{{\bigcup\kern-1.7ex {}^.}\,}
%\newlength{\DOTwidthofabigcup}
%\DeclareMathOperator*{\bigcupdj}{{\settowidth{\DOTwidthofabigcup}{\bigcup}{\bigcup\kern-\DOTwidthofabigcup/2{}^.}}}
%\newcommand{\bigcupdj}{{\bigcup^.}\limits}


\newcommand{\comma}{\text{, }}
\newcommand{\Order}{O}
\newcommand{\OrderEq}{\ensuremath{\Theta}}

\newcommand{\modu}[2]{%
{{}^{\textstyle #1}\!\!\Big/\!\!{}_{\textstyle #2}}}
\newcommand{\restr}[2]{\lt.{#1}\vrule{}_{#2}\rt.}
\newcommand{\vrstr}[2]{\restr{#1}{#2}}
\newcommand{\sstack}[1]{{\substack{#1}}}
\newcommand{\sstck}[1]{{\substack{#1}}}

\newcommand{\abs}[1]{{\lt\lvert{#1}\rt\rvert}}
\newcommand{\pAbs}[2]{{\lt\lvert{#1}\rt\rvert_{#2}}}
\newcommand{\babs}[1]{{\bigl\lvert{#1}\bigr\rvert}}
\newcommand{\Babs}[1]{{\Bigl\lvert{#1}\Bigr\rvert}}
\newcommand{\bbabs}[1]{{\biggl\lvert{#1}\biggr\rvert}}
\newcommand{\BBabs}[1]{{\Biggl\lvert{#1}\Biggr\rvert}}
\newcommand{\sabs}[1]{{\lvert{#1}\rvert}}
\newcommand{\sabstight}[2][2]{{\lvert\mspace{-#1mu}{#2}\mspace{-#1mu}\rvert}}
\newcommand{\floor}[1]{\lt\lfloor{#1}\rt\rfloor}
\newcommand{\close}[1]{\overline{#1}}
\newcommand{\cj}[1]{\overline{#1}}
%\newcommand{\widebar}[1]{\overline{#1}}
\newcommand{\widevec}[1]{\overrightarrow{#1}}

\DeclareMathOperator{\pr}{pr}

\newcommand{\dann}{\Rightarrow}
\newcommand{\pii}{{\pi i}}

\newcommand{\edzpii}{{\frac{1}{2\pii}}}
\newcommand{\nfrac}[2]{{\nicefrac{#1}{#2}}}

\newcommand{\cplmt}{\complement}

\newcommand{\symdiff}{{\vartriangle}}

\newcommand{\AAa}{\mathbb{A}}
\newcommand{\BB}{\mathbb{B}}
\newcommand{\CC}{\mathbb{C}}
\newcommand{\EE}{\mathbb{E}}
\newcommand{\FF}{\mathbb{F}}
\newcommand{\HH}{\mathbb{H}}
\newcommand{\KK}{\mathbb{K}}
\newcommand{\LL}{\mathbb{L}}
\newcommand{\MM}{\mathbb{M}}
\newcommand{\NN}{\mathbb{N}}
\newcommand{\PP}{\mathbb{P}}
\newcommand{\QQ}{\mathbb{Q}}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\Ss}{\mathbb{S}}
\newcommand{\TT}{\mathbb{T}}
\newcommand{\ZZ}{\mathbb{Z}}
\newcommand{\kk}{\mathbb{k}}

\newcommand{\CCpkt}{\C^{\bullet}}

\newcommand{\SPH}{\mathbb{S}}
\newcommand{\PR}{\mathbb{PR}}
\newcommand{\PC}{\mathbb{PC}}

\newcommand{\DDelta}{\mathbb{\Delta}}
\newcommand{\Spx}{\mathbb{\Delta}}

%% E x p e c t a t i o n

\DeclareMathOperator{\bbbExp}{\EE}
\newcommand{\bbblExp}{\bbbExp\displaylimits}
\DeclareMathOperator{\bbbEXp}{\text{\Large$\EE$}}
\newcommand{\bbblEXpo}{\bbbEXp\displaylimits}
\newcommand{\bbblEXp}{\bbbEXp\displaylimits}
\DeclareMathOperator{\bbbEXP}{\text{\huge$\EE$}}
\newcommand{\bbblEXP}{\bbbEXP\displaylimits}

\DeclareMathOperator{\expct}{\mathbf E}
\DeclareMathOperator{\Expct}{\text{\large$\mathbf E$}}

\DeclareMathOperator{\Prb}{\mathbf{P}}
\DeclareMathOperator{\Exp}{\mathbf{E}}
\DeclareMathOperator{\HExp}{\text{\huge$\mathbf{E}$}}
\DeclareMathOperator{\LExp}{\text{\Large$\mathbf{E}$}}
\DeclareMathOperator{\Var}{\mathbf{Var}}
\DeclareMathOperator{\Covar}{\mathbf{Covar}}
\DeclareMathOperator{\IndicatorOp}{\mathbf{I}}
\newcommand{\Ind}{\IndicatorOp}
\newcommand{\Cov}{\Covar}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newcommand{\Bd}{\mathcal B}

\newcommand{\fB}{{ \mathfrak{B} }}
\newcommand{\fA}{{ \mathfrak{A} }}
\newcommand{\fT}{{ \mathfrak{T} }}
\newcommand{\fS}{{ \mathfrak{S} }}
\newcommand{\fE}{{ \mathfrak{E} }}

\newcommand{\yps}{\upsilon}
\newcommand{\Yps}{\Upsilon}
\newcommand{\eps}{\varepsilon}

\newcommand{\nMtx}[1]{{\begin{smallmatrix}#1\end{smallmatrix}}}
\newcommand{\Mtx}[1]{{\bigl(\begin{smallmatrix}#1\end{smallmatrix}\bigr)}}
\newcommand{\Mtxv}[1]{{\lt(\begin{smallmatrix}#1\end{smallmatrix}\rt)}}
\newcommand{\bMtx}[1]{{\begin{pmatrix}#1\end{pmatrix}}}

\newcommand{\mtt}[1]{{\text{\tt #1}}}
\newcommand{\ID}{\Mtx{1&0\\0&1}}
\newcommand{\bID}{\bMtx{1&0\\0&1}}
\newcommand{\Mabcd}{\Mtx{a&b\\c&d}}
\newcommand{\bMabcd}{\bMtx{a&b\\c&d}}

\newcommand{\scp}[2]{{\lt.\lt( #1 \,\right| #2 \rt)}}
\newcommand{\pNm}[2]{{\lt\lVert #1 \rt\rVert_{#2}}}
\newcommand{\Nm}[1]{{\lt\lVert #1 \rt\rVert}}
\newcommand{\sNm}[1]{{\lVert #1 \rVert}}
\newcommand{\bracket}[2]{{\lt< #1 \mid #2 \rt>}}
\newcommand{\bra}[1]{{\lt< #1 \rt|}}
\newcommand{\ket}[1]{{\lt| #1 \rt>}}
\newcommand{\dual}[1]{\lt< #1 \rt>}
\newcommand{\rsp}[2]{\lt<#1\,,\, #2 \rt>}

\newcommand{\iprod}{\bullet}
\newcommand{\ip}[1]{\lt(#1\rt)}
\newcommand{\ipn}[1]{(#1)}
\newcommand{\ipb}[1]{\bigl(#1\bigr)}
\newcommand{\ipB}[1]{\Bigl(#1\Bigr)}
\newcommand{\ipbg}[1]{\biggl(#1\biggr)}
\newcommand{\ipBg}[1]{\Biggl(#1\Biggr)}


%\definecolor{purp}{named}{Purple}
\newcommand{\xyATabular}[5]{
\vspace{0.1cm}
\begin{tabular}{l@{}l}
   & \fcolorbox{black}{#1}{\parbox[c][0.3cm]{1.1cm}{\centering #2}} \\
\fcolorbox{black}{#3}{\parbox[c][1.1cm]{0.3cm}{#4}} &
        \fbox{\parbox[c][1.1cm]{1.1cm}{\centering #5}}
\end{tabular}
\vspace{0.1cm}
}
\newcommand{\rlstopline}[9]{
\begin{picture}(#1,#3)(0,-#2)
        \put(0,0){\line(1,0){#1}}
        \put(0,0){\line(0,1){#2}}
        \put(0,0){\line(0,-1){#2}}
        \put(#1,0){\line(0,1){#2}}
        \put(#1,0){\line(0,-1){#2}}
        \put(#4,#5){#6}
        \put(#7,#8){#9}
\end{picture}
}
\newcommand{\tridipicture}[2]{
\unitlength=#1
\begin{picture}(5,5)(0,0)
\put(0,5){\line(1,-1){5}}
\put(1,5){\line(1,-1){4}}
\put(0,4){\line(1,-1){4}}
#2
\end{picture}
}

\newcommand{\Kone}{\ding{"0C0}}
\newcommand{\Ktwo}{\ding{"0C1}}
\newcommand{\Kthree}{\ding{"0C2}}
\newcommand{\Kfour}{\ding{"0C3}}
\newcommand{\Kfive}{\ding{"04C}}
\newcommand{\Ksix}{\ding{"05C}}
\newcommand{\Kseven}{\ding{"60C}}
\newcommand{\Keight}{\ding{"0C7}}
\newcommand{\Knine}{\ding{"0C8}}
\newcommand{\Kzero}{\ding{"0CA}}


% Damit nach Abkuerzungen nicht derselbe Abstand wie nach einer
% Satzendeinterpunktion erfolgt, wird xspace eingesetzt.
% Die verwendeten Abkuerzungen werden hier der Einfachheit halber
% definiert:
%\newcommand{\Nr}{Nr.}

\newlength{\algotabbingwidth}
\setlength{\algotabbingwidth}{1cm}
\newenvironment{algo}{%
  \begin{tabbing}
    \hspace*{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \kill\\
    }{%
  \end{tabbing}%
}
\newenvironment{algoShort}{%
  \begin{tabbing}
    \hspace*{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \hspace{\algotabbingwidth}\=%
    \kill
    }{%
  \end{tabbing}%
}
%% \newlength{\algoboxwidth}%
%% \newlength{\algolabelwidth}%
%% \newcommand{\algbox}[3]{%
%%   \settowidth{\algolabelwidth}{#1~}%
%%   \setlength{\algoboxwidth}{-\algolabelwidth}%
%%   \addtolength{\algoboxwidth}{-#2cm}%
%%   \addtolength{\algoboxwidth}{\textwidth}%
%%   #1~\parbox[t]{\algoboxwidth}{\raggedright #3\vspace{1mm}
%%     }%
%% }

\newcommand{\STOP}[0]{{STOP}}

\newcommand{\var}[1]{{\ensuremath{\text{\textit{#1}}}}}

\newenvironment{code}[1]{\begin{tabbing}
\hspace*{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=
\kill {{\tt Algorithm} {#1}}\\ \{\+ \\}{\-\\ \} \end{tabbing}}

\newenvironment{codeseg}{\begin{tabbing}
\hspace*{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=\hspace{6mm}\=
\kill }{\end{tabbing}}

\newenvironment{mylist}{%
  \begin{list}{}{%
      \renewcommand{\makelabel}[1]{%
        \hspace{\parindent}{\em{##1}}\quad}%
      \setlength{\parsep}{0mm}%
      \setlength{\labelsep}{0mm}%
      \setlength{\leftmargin}{0mm}%
      \setlength{\listparindent}{\parindent}}%
}{\end{list}}%
\newenvironment{txtlist}{%
  \begin{list}{}{%
      \renewcommand{\makelabel}[1]{%
%       \hspace{\parindent}%
        {\em{##1}}\quad%
      }
%      \setlength{\parsep}{0mm}%
%      \setlength{\labelsep}{0mm}%
%      \setlength{\leftmargin}{0mm}%
%      \setlength{\listparindent}{\parindent}%
}%
}{\end{list}}%

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\newcommand{\eqm}{\equiv}
\newcommand{\eqm}{=}
%% \renewcommand{\subset}{\subseteq}
%% \renewcommand{\supset}{\supseteq}
% \newcommand{\Pz}[1]{P_{#1}}
% \newcommand{\Pb}[1]{P^{\scriptscriptstyle 0/1}_{#1}}
% \newcommand{\Pm}[1]{P_{#1}}
% \newcommand{\Pse}[1]{P^{\text{se}}_{#1}}

% \input{algodefs}



\numberwithin{theorem}{section}

% \renewcommand{\thesubsection}{\thesection.\alph{subsection}}

\newcommand{\myparagraph}[1]{\textbf{#1}}
\newcommand{\myparagraphwskip}[1]{\smallskip\paragraph{#1}}
\newcommand{\claimpf}[1]{\myparagraph{\textit{#1}}}

\newcommand{\myskip}{\medskip\noindent}
\newcommand{\mypar}{\par\medskip\noindent}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
%
% ToDo macros
%
%
% There are 2 todo macros: \todo and \TODO.
% The difference is that, when the paper is submitted, the \todo just disappear,
% but the \TODO don't.
% So the things which _have_ to be fixed before submission no matter what, they should be \TODO.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
% BEGIN comment / uncomment
\newcommand{\todo}[1][ToDo]{{\tiny\color{red}$\scriptscriptstyle\circ$}%
  {\marginpar{\centering\tiny\sf todo$\color{red}\bullet$\color{red}#1}}}
\newcommand{\TODO}[1][Yikes!]{{\tiny\color{red}$\scriptscriptstyle\circ$}%
  {\marginpar{\centering\tiny\sf todo$\color{red}\bullet$\color{red}#1}}}
% \newcommand{\todo}[1][ToDo]{}
% \newcommand{\TODO}[1]{\Yikes}
% END comment / uncomment

\newcommand{\NEW}[1][]{#1}


% \newcommand{\Mark}[1]{\todo[Mark]\colorbox{yellow}{#1}}
\newcommand{\Mark}[1]{\todo[\color{green}Mark]\bgroup\markoverwith{\textcolor{green}{\rule[-0.5ex]{2pt}{0.4pt}}}\ULon{#1}}

% Margin notes for Tomasz Luczak
\newcommand{\Tomasz}[1][{}]{{\color{red}${}^{{}^{\tiny\scriptscriptstyle\star}}\!$}%
  {\marginpar{\centering$\color{red}\bigstar$\tiny\sf\color{red}#1}}}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
% 
% Long/short version macros
% 
% 
% If there's some text which you want to appear only in the long version,
% put it into \longversion{...}
% If there's some text which you want to appear only in the short version,
% put it into \shortversion{...}
% 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
\newcommand{\longversion}[1]{}\newcommand{\shortversion}[1]{#1}
% \newcommand{\longversion}[1]{#1}\newcommand{\shortversion}[1]{}

% definitions.tex

\DeclareMathOperator{\Bin}{Bin}

\DeclareMathOperator{\logb}{log}
\renewcommand{\log}[1]{\logb_{#1}}


\DeclareMathOperator{\rc}{rc}
%\DeclareMathOperator{\fool}{fool}
%\DeclareMathOperator{\sep}{sep}
\DeclareMathOperator{\gen}{gen}

\DeclareMathOperator{\Adj}{Adj}
\DeclareMathOperator{\dCC}{dCC}

\DeclareMathOperator{\ir}{ir}

\newcommand{\Matrixd}{M}
\newcommand{\Matrix}[2]{\Matrixd^{#1,#2}}
\newcommand{\Graphd}{G_{\mspace{-3mu}\mbox{\tiny$\scriptstyle\boxtimes$}\mspace{-3mu}}}
\newcommand{\Graph}[2]{\Graphd^{#1,#2}}

\newcommand{\HbipM}{H_{\!\scriptscriptstyle M}}
\newcommand{\HbipMnp}{H_{\!\scriptscriptstyle \Matrix{n}{p}}}
\DeclareMathOperator{\xfmatch}{\nu^\times}

%\newcommand{\Hnnp}{H_{n\times n,p}}
\newcommand{\Gnp}[2]{G_{#1,#2}}


\newcommand{\notp}{{\bar p}}

\newcommand{\alphasq}{\alpha_\Box}


\newcommand{\tmpy}{\Huge\bf This is just so that I don't have to distinguish between renewcommand and newcommand when I (re)define this.}

\newcommand{\omitthis}[1]{} % Just omit it.

\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
%%
%%
\date{\dateline{}{}}%
%%
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 
% title-and-abstract.tex

\title{The Rectangle Covering Number of Random Boolean Matrices}%
\author{Mozhgan Pourmoradnasseri, Dirk Oliver Theis\thanks{Supported by the Estonian Research Council, ETAG (\textit{Eesti Teadusagentuur}), through PUT Exploratory Grant \#620, and by the European Regional Development Fund through the Estonian Center of Excellence in Computer Science, EXCS.}\\[1ex]
  \small Institute of Computer Science {\tiny of the } University of Tartu\\
  \small \"Ulikooli 17, 51014 Tartu, Estonia\\
  \small \texttt{\{mozhgan,dotheis\}@ut.ee}%
}

\maketitle

\begin{abstract}
  The rectangle covering number of an $n\times n$ Boolean matrix~$M$ is the smallest number of 1-rectangles which are needed to cover all the 1-entries of~$M$.  Its binary logarithm is the Nondeterministic Communication Complexity, and it equals the chromatic number of a graph $\Graphd(M)$ obtained from~$M$ by a construction of Lov\'asz \& Saks.

  We determine the rectangle covering number and related parameters (clique size, independence ratio, fractional chromatic number of $\Graphd(M)$) of random Boolean matrices, where each entry is 1 with probability $p = p(n)$, and the entries are independent.
  % \\
  % \textbf{Keywords:}
\end{abstract}
%\subjclass[2012]{Primary ...........; Secondary ...................}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% arXiv abstract:
%
% The rectangle covering number of an $n\times n$ Boolean matrix $M$ is the smallest number of 1-rectangles which are needed to cover all the 1-entries of $M$.  Its binary logarithm is the Nondeterministic Communication Complexity, and it equals the chromatic number of a graph $G_\boxtimes(M)$ obtained from $M$ by a construction of Lovasz & Saks.
%
% We study the rectangle covering number and related parameters (clique size, independence ratio, fractional chromatic number of $G_\boxtimes(M)$) of random Boolean matrices, where each entry is 1 with probability $p = p(n)$, and the entries are independent.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\maketitle

% \todo[remember this]
% \TODO[This {\bf has} to be dealt with, no matter what]

%\input{todo.tex}
% intro.tex

\section{Introduction}\label{sec:intro}
For a positive integer~$n$, a \textit{rectangle} is a product $R = K\times L \subset [n]\times[n]$ (with $[n] := \{1,\dots,n\}$).
%%
Given an $n\times n$ matrix~$M$ with 0/1 entries, a \textit{1-rectangle} is a rectangle $R$ with $M_{k,\ell}=1$ for all $(k,\ell)\in R$.  A \textit{rectangle covering} of~$M$ is a collection of 1-rectangles $R^{(1)},\dots,R^{(r)}$ such that $\{(k,\ell)\mid M_{k,\ell}=1\} = \bigcup_r R^{(r)}_{k,\ell}$, i.e., every 1-entry of~$M$ is contained in one of the chosen 1-rectangles.
%%
The \textit{rectangle covering number}~\cite{Lovasz90,Lovasz-Saks:CC-lattice:93}, $\rc(M)$, of~$M$ is the smallest number of 1-rectangles in a rectangle covering of~$M$.
%
The \textit{nondeterministic communication complexity}~\cite{Kushilevitz-Nisan:Book:97} of~$M$ is $\lceil \log{2}(\rc(M))\rceil$.

Rectangle covering is the same as covering by bicliques the edges of the bipartite graph $\HbipM$ whose vertex set is the disjoint union $[n]\uplus[n]$, and whose bipartite adjacency matrix equals~$M$. Hence, the term \textit{biclique covering} is often used.  In Matrix Theory, the name \textit{Boolean rank} \cite{CaenGregoryPullman81,CaenGregoryPullman88,KimBk82} is common.  There, the Boolean rank lower bounds the nonnegative rank~\cite{GregoryPullman83}: Indeed, the rectangle covering number is the best possible lower bound on the nonnegative rank if one considers only zero-nonzero information of the matrix (cf.~\cite{LeeTheis12}).

Rectangle covering coincides with the \textit{2-dimension} of certain posets (cf.~\cite{Fiorini-Kaibel-Pashkovich-Theis:CombLB:13}) and with the strong isometric dimension of certain graphs~\cite{Froncek-Jerebic-Klavzar-Kovar:CPC:07}.  It has applications in coding theory~\cite{Hajiabolhassan-Moazami:code:12}, and connections to extremal set theory~\cite{Hajiabolhassan-Moazami:cover-free:12}.

The rectangle covering number is an important lower bound on the so-called extension complexity~\cite{Yannakakis:91} of polytopes, a quantity of interest in Combinatorial Optimization: Lower bounds on the rectangle covering number were the techniques which allowed to prove exponential lower bounds for sizes of linear programming formulations for a number of combinatorial optimization problems~\cite{Fiorini-Massar-Pokutta-Tiwary-Dewolf:lin-vs-semidef:12,Fiorini-Massar-Pokutta-Tiwary-Dewolf:ACM:15,Braun-Fiorini-Pokutta:rndStable:14}.  \NEW While random 01-matrices have high rank, the matrices occurring in Combinatorial Optimization, so-called slack matrices, have arbitrary real (or rational) entries, and their ranks are typically only polylog in their sizes.  It is important to realize that, while the rank of the actual slack matrix is a lower bound to the extension complexity, the rank of the 01-matrix (each non-zero entry of the slack matrix is replaced by~1) is not.

\mypar%
As hinted above, the rectangle covering number is the chromatic number of the \textit{(Lov\'asz-Saks) rectangle graph,} $\Graphd(M)$, of the matrix~$M$ which has as its vertices the 1-entries of~$M$, with two 1-entries being adjacent, if they span a $2\times 2$ rectangle containing a 0-entry of~$M$.  More precisely:
\begin{align*}
  V(\Graphd(M)) &= \Bigl\{ (k,\ell) \in [n]\times[n] \mid M_{k,\ell} =1 \Bigr\}\\
  E(\Graphd(M)) &= \Bigl\{ \{(k,\ell),(k',\ell')\} \bigm| M_{k,\ell}M_{k',\ell'} = 1 \ \& \  M_{k,\ell'}M_{k',\ell}=0 \Bigr\}.
\end{align*}
The following is implicit in~\cite{Lovasz-Saks:CC-lattice:93}, and implies $\chi(\Graphd(M)) = \rc(M)$.
\begin{proposition}[\cite{Lovasz-Saks:CC-lattice:93}]\label{prop:rectangle-graph}
  The inclusion-wise maximal 1-rectangles of~$M$ are precisely the vertex sets of the inclusion-wise maximal independent sets of~$\Graphd(M)$.
\end{proposition}

Other parameters of~$M$ and $\Graphd$ are used as lower bounds in the application areas: the ratio ``number of 1-entries by size of the largest 1-rectangle'' is the independence ratio of~$\Graphd$; so-called fooling sets in~$M$ coincide with cliques in $\Graphd$; the so-called generalized fooling set bound~\cite{Dietzfelbinger-Hromkovic-Schnitger:96} coincides with the Hall ratio (the supremum of all independence ratios of induced subgraphs) of $\Graphd$; fractional rectangle covering~\cite{KarchmerKushilevitzNisan95} is the same as fractional coloring of~$\Graphd$. (Except for the part about fooling sets, all of these statements follow from Prop.~\ref{prop:rectangle-graph}.)  A fooling set (cf., e.g., \cite{Kushilevitz-Nisan:Book:97,FriesenTheis13}), is a set of 1-entries of~$M$ which, when considered as edges of $\HbipM$ (see above), form a \textit{cross-free matching,} i.e., a matching no two of whose edges induce a $K_{2,2}$ in~$\HbipM$.\footnote{%
  More precisely, a matching $M\subseteq E\subseteq [n]\times[n]$ is \textit{cross-free} if, for every $(k,\ell), (k',\ell')\in M$ we have $(k,\ell)=(k',\ell')$ or $(k,\ell')\notin E$ or $(k',\ell)\notin E$.  } %
Fooling sets are referred to as sets of \textit{independent} entries in Matrix Theory~\cite{CohenRothblum93}.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\medskip%
\myparagraph{Our results and connections to related work.}  %
In this paper we study the rectangle covering number, and the parameters listed above, of a random $n\times n$ Boolean matrix~$\Matrix{n}{p}$, where each entry is independently~1 with probability $p=p(n)$ and~0 with probability $1-p$, or, respectively, of the random graph $\Graph{n}{p} := \Graphd(\Matrix{n}{p})$.  As usual, we are interested in $n\to\infty$.

In (Communication) Complexity Theory, random objects are usually used for establishing that hard objects exist in the given model of computation.  In this spirit, some easy results about the (nondeterministic) communication complexity of random functions and related parameters exist, with~$p$ a constant, mostly $p=\nfrac12$.

For us, part of the motivation for investigating the parameters studied in this paper was to see to which extend random Boolean matrices behave similarly to the difficult families of matrices in Combinatorial Optimization whose rectangle covering number one would like to bound.  In these families, the number of zeros per row/column is typically small, i.e., the typical number of 0-entries per row is $\polylog n$.  In terms of random matrices, this prompted us to look at these parameters when $p=p(n)\to 1$ with $n\to\infty$ -- in the spirit of the study of properties of Erd\H{o}s-R\'enyi random graphs.  In an analogy to those random graphs, the results become both considerably more interesting and also more difficult that way.

% fool
\smallskip%
The clique number of $\Graph{n}{\nfrac12}$ (fooling set size) was shown~\cite{Dietzfelbinger-Hromkovic-Schnitger:96} to be $\Theta(\ln n)$, asymptotically almost surely (``a.a.s.'', i.e., the probability of that happening tends to~$1$ with $n\to\infty$).  In the interesting examples from Combinatorial Optimization, the fooling set bound is typically $O(\ln n)$---even worse than the theoretical maximum (from Tur\'an's theorem, cf.~\cite{Fiorini-Kaibel-Pashkovich-Theis:CombLB:13}) of $O(\polylog n)$ for matrices with $O(\polylog n)$ zeros per row/column.  For example, for the Spanning Tree polytope, it was recently shown that the fooling set bound is (almost) useless~\cite{Khoshkhah-Theis:treeFool:2017}.  This is in line with what we observe in random matrices: We give upper and lower bounds for $\omega(\Graph{n}{p})$ for $p\to 1$ and also for $p\to 0$, based on matchings in random (binomial) bipartite graphs (Section~\ref{sec:fool}).

Recently, Izhakian, Janson, and Rhodes~\cite{Izhakian-Janson-Rhodes:PAMS:15} have determined asymptotically the so-called triangular rank of random Boolean matrices with independent Bernoulli entries.  The triangular rank is important in Communication Complexity~\cite{Lovasz-Saks:CC-lattice:93} and in Combinatorial Optimization~\cite{LeeTheis12}, and it is a lower bound to the size of a fooling set.  In Izhakian et al.'s paper, determining the behavior for $p\to0,1$ is posed as an open problem.

% alpha
\smallskip%
The size of the largest monochromatic rectangle in a random Bernoulli matrix was determined in~\cite{Park-Szpankowski:biclusters:05} when~$p$ is bounded away from~$1$, but their technique fails for $p\to 1$.  We determine the asymptotic behaviour of that parameter (the independence number of the Lov\'asz-Saks rectangle graph) for $p\to 1$ (Section~\ref{sec:alpha}).  The importance of this parameter lies mainly in the fact that the independence ratio (the number of vertices over the independence number) is a lower bound to the chromatic number.

% chi
\smallskip%
The chromatic number of $\Graph{n}{\nfrac12}$ is given in Sherstov's lecture notes on Communication Complexity\footnote{http://www.cs.ucla.edu/$\sim$sherstov/teaching/2012-winter/}.  The rectangle covering number of Clique-vs-Stable-Set (which is important in the context of the extension complexity of the Stable Set polytope) on random graphs was studied in \cite{Braun-Fiorini-Pokutta:rndStable:14}.  We discuss the chromatic number $\chi(\Graph{n}{p})$ and the fractional chromatic number $\chi^*(\Graph{n}{p})$ in Section~\ref{sec:chi}.

Interestingly, the ratio $\chi/\chi^*$ tends to infinity a.a.s.\ in a certain range of~$p$.  This is not only a notable difference to the situation $p=\nfrac12$, where the chromatic number and the independence ratio (which also lower bounds the fractional chromatic number) coincide a.a.s.  It also distinguishes this random graph model from, say, Erd\H{o}s-Renyi random graphs, where, a.a.s.,\ the independence ratio is within a constant factor of the chromatic number.  %We conjecture that there is a gap between $\chi$ and $\chi^*$ for $p > \nfrac 12$.

Since the fractional chromatic number is today the most sucessfully used (combinatorial) lower bound for extension complexity, it came as a surprise to the authors that random rectangle graphs exhibit a gap between the fractional chromatic number and the cromatic number, at least in some parameter ranges.  This phenomenon coincides with observations about some of the matrix families in Combinatorial Optimization, (e.g., related to spanning trees in complete graphs, see, e.g., \cite{Weltge:phd:2015,Khoshkhah-Theis:tree-rc:2017}), where the fractional chromatic number might be a weak lower bound.  We refer the reader to the conclusions for a discussion of concrete conjectures on $\chi$-vs-$\chi^*$.

\smallskip%
An extended abstract (without proofs) of the results of this paper was accepted to the 13th annual conference on Theory and Applications of Models of Computation, in a form taylored to Complexity Theory audience~\cite{Pourmoradnasseri-Theis:rnd-ndcc:17}.  (The conference was canceled and accepted papers were presented one year later at the April 2017 TAMC.)

% known_facts.tex

\section{Some basic properties of rectangle covering}\label{sec:basics}
We summarize some properties of the rectangle covering number in the following remark.  All proofs are easy to trivial, and we leave them to the reader as a warm-up.

\begin{remark}\label{rem:basics:detprop}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item\label{enum:basics:detprop:rc_le_n} Trivial upper bound: If $M$ is an $m\times n$ matrix then $\rc(M) \le \min(n,m)$.
  \item Redundant rows or columns: If the set of 1s of row~$k$ equals the union of the sets of 1s of other rows, then deleting row~$k$ does not change the rectangle covering number.  A special case is a row with no 1s.
    Another special case is this: If $n'$ ($m'$, resp.)\ is the number of distinct, non-0 rows (columns, resp.)\ of~$M$, then $\rc(M) \le \min(n',m')$.
  \item The rectangle covering number is not monotone in the set of 1s.  Changing a 0 into a~1 can affect the rectangle covering number in both directions.
  \end{enumerate}
\end{remark}

\subsection{Hypergraphs and the Binary Logarithm lower bound}
Let~$H=(V,E)$ be a hypergraph and $E' \subset 2^V$.  We say that $E'$ \textit{generates}~$H$, if  every $e\in E$ is a union of $e'\in E'$; in symbols:
\begin{equation*}
  \text{For all $e\in E$:} \quad e = \bigcup_{\substack{e'\in E'\\e'\subseteq e}} e'
\end{equation*}

The \textit{generation number}, $\gen(H)$, of~$H$ is the smallest number of sets in a $E'$ which generates~$H$.
%%
The following is an easy lower bound for the generation number.

\begin{lemma}\label{lem:log2-lbs}\mbox{}
  If~$H$ is a hypergraph with all edges distinct, then $\gen(H) \ge \log{2}\abs{E(H)}$.
\end{lemma}
\begin{proof}
  Let $E'$ generate~$H$, and for each $e\in E(H)$ consider the set $E'_e := \{ e'\in E(H') \mid e'\subseteq e\}$.  Since
  $ %\begin{equation*}
    e = \bigcup_{e'\in E'_e} e',
  $ %\end{equation*}
  the hyperedge~$e$ is uniquely determined by~$E'_e \in 2^{E(H')}$.  Hence, $2^{\sabs{E(H')}} \ge E$.
\end{proof}

We leave the proof of the following proposition to the reader.  The trick is use 1-rectangles ``generated by'' row/column sets (see the definition before Theorem~\ref{thm:alpha}, p.~\pageref{page:generated_rectangle}).
\begin{proposition}\label{prop:basics:sep-rc-equality}
  Let~$H$ be a hypergraph and~$M$ its vertex-hyperedge incidence matrix, i.e., $M_{v,e} = 1$ iff $v\in e$.  Then $\gen(H) = \rc(M)$.
\end{proposition}
This means that hypergraph generation and rectangle covering are equivalent concepts, and we obtain the following lower bound on the rectangle covering number.

\begin{remark}[Log-2-lower-bound]\label{rem:log2-bound}
  If a Boolean matrix has $m'$ distinct rows and $n'$ distinct columns, then $\rc(M) \ge \log{2}(\max(m',n'))$.
\end{remark}
This simple bound has been found independently in Communication Complexity (cf., e.g., \cite{Lovasz90}), Matrix Theory~\cite{BezrukovFroncekRosenbergKovar08}, and Combinatorial Optimization~\cite{Goemans:permutahedron:15}.

% basics.tex

\section{The random graph $\Graph{n}{p}$}\label{sec:random-graph-basics}
We use the conventions
\begin{equation*}
  \notp = 1-p \text{, and } \lambda = \notp n.
\end{equation*}
E.g., the expected number of zeros in each row of $\Matrix{n}{p}$ is~$\lambda$.

We denote the number of vertices of $\Graph{n}{p}$ by~$N$.  It is a $\Bin(n^2,p)$ binomial random variable.

\subsection{The cases when $p$ tends to $0$ or~$1$ very fast}
When $p$ tends to $0$ or~$1$ quickly, the graph is nearly deterministic, and there is nothing much to study in terms of random properties.

Denote by $G_{(r)}$ the graph whose vertices are all pairs $(k,\ell) \in \{1,\dots,r\}\times\{1,\dots,n\} \cup \{1,\dots,n\}\times\{1,\dots,r\}$ with $k\ne \ell$, and let $(k,\ell)$ be adjacent to $(k',\ell')$ iff $k=\ell'$ or $\ell=k'$.

\begin{proposition}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item For $p=o(n^{-\nfrac32})$, $\Graph{n}{p}$ is a clique with $p n^2 + O(\sqrt{p}n) = o(\sqrt{n})$ vertices.
  \item for $p = 1-o(n^{-\nfrac32})$, the graph $\Graph{n}{p}$ is, a.a.s., isomorphic to a disjoint union of $(n-R)^2$ isolated vertices with $G_{(R)}$, for $R := (n^2-N) = \notp n^2 + O(\sqrt{\notp}n) = o(\sqrt n)$.
  \end{enumerate}
\end{proposition}
\begin{proof}[Proof (sketch).]
  For (a), by, say, Chebyshev's inequality, we have, a.a.s., $N = pn^2 + O(\sqrt{p}n) = o(sqrt n)$, since the standard deviation of~$N$ is at most $\sqrt{p}n$.  Moreover, the probability that a fixed row (or column) of $\Matrix{n}{p}$ contains two or more ones is at most $p^2\binom{n}{2} = o(1/n)$, so that, by the union bound, a.a.s., there is no row (or column) which contains two or more ones.  Thus, $\Graph{n}{p}$ is a clique of $o(\sqrt n)$ vertices.

For (b), the same argument shows that the number $R := n^2-N$ of zero-entries of $\Matrix{n}{p}$ is $o(\sqrt n)$ a.a.s., and that, a.a.s., no two of them are in the same row or column.  Hence, after permuting rows and columns, can transform $\Matrix{n}{p}$ into a matrix all of whose  entries are~$1$, except for the entries $(1,1),\dots,(R,R)$, which are~$0$.  So there are $(n-R)^2$ isolated vertices (bottom right part of the matrix), and the top~$R$ rows and leftmost~$R$ columns form a $G_{(R)}$.  The same application of Chebyshev as above completes the proof.
\end{proof}


Hence, the smallest interesting~$p$ is $\Omega(n^{-\nfrac32})$, and the largest interesting~$p$ is $1-\Omega(n^{-\nfrac32})$.

\subsection{The cases when $p$ tends to $0$ or~$1$ not too fast}
If $p n^2$ tends to infinity sufficiently fast, then the number of vertices of $\Graph{n}{p}$ is concentrated near $pn^2$

Denote the number of edges of $\Graph{n}{p}$ by~$N'$.  For two potential vertices $u,v \in \{1,\dots,n\}^2$, we have $\Prb( u\sim v \mid u,v\in V(\Graph{n}{p})) = 1-p^2 =: \delta$.  We fix this meaning of~$\delta$ for the remainder of the paper.  Now
\begin{equation*}
  \Exp N'
  = p^2(1-p^2) \frac{n^2(n-1)^2}{2}
  = \delta\, \binom{pn^2}{2} + O(p^2n^3)
  = \delta\, \binom{\Exp N}{2} + O(p^2n^3),
\end{equation*}
so that the expected density of $\Graph{n}{p}$ is asymptotic to~$\delta$ if~$N$ is concentrated.
%%
Changing a single matrix entry can affect the number of edges, $N'$, by up to~$2n-1$, so we do not expect very good concentration.  But, if~$p$ is $\Omega(1/n)$ and $1-\Omega(1/n)$, then, an application of Chebyshev's inequality shows that we have, a.a.s.,
\begin{equation}\label{eq:rnd-graph-basics:numo-edges}
  N' = \delta\binom{pn^2}{2} + O\bigl(pn^2\sqrt{\delta}\bigr).
\end{equation}
This is a special case of the concentration of the number of $r$-cliques, which we discuss in Section~\ref{sec:fool}.

Let us take a more precise look at the number of edges, conditioned on the number of vertices.  With $\pi := N/n^2$, we find that
% \begin{equation*}
%   \Prb\bigl( u\sim v  \bigm|  u,v\in V(\Graph{n}{p}), N \bigr)
%   = \frac{ 2\binom{n-4}{N-3} + \binom{n^2-4}{N-2} }{ \binom{n^2-2}{N-2} }
%   = \frac{ (n^2+n-3)(n^2-N) }{ (n^2-2)(n^2-3) }
%   = 1-\pi^2 + O(1/n^2),
% %\delta \binom{N}{2}.
% \end{equation*}
% so that
the probability, conditioned on~$N$, that a fixed 2-by-2 submatrix of $\Matrix{n}{p}$ gives an edge of $\Graph{n}{p}$ equals\footnote{As usual, we denote by $(a)_b := a(a-1)\dots,(a-b+1)$ the falling factorial.}
\begin{align*}
  &\frac{ 2\binom{n^2-4}{N-2} + 4\binom{n^2-4}{N-3}/2 }{ \binom{n^2}{N} }
  \\
  &=
  2\frac{ (n^2-N) N (N-1) }{ (n^2)_4 } \, (n^2-N-1 + 2(N-2))
  \\
  &=
  2\frac{ (n^2-N) N (N-1) }{ (n^2)_4 } \, (n^2+N-3)
  \\
  &=
  2\pi^2(1-\pi^2) + O(1/n^2),
\end{align*}
and hence,
\begin{equation}\label{eq:def-density}
  \Exp( N' \mid N )
  =
  \binom{n}{2}^{\!\!2} \cdot 2\pi^2(1-\pi^2)
  =
  (1-\pi^2) \binom{\pi n^2}{2} + O(\pi^2 n^3)
  =
  (1-\pi^2) \binom{N}{2} + O(N^2/n)
\end{equation}

\subsection{Concentration of clique number and chromatic number}
The clique size (fooling set size) and the chromatic number (corresponding to the nondeterministic communication complexity) are concentrated: Changing one row (or column) of the matrix can change each of these numbers by at most~1, so a standard application of McDiarmid's inequality~\cite{mcdiarmid:method:1989} yields the following.

\begin{proposition}%\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item The size, $\omega(\Graph{n}{p})$, of the largest clique in $\Graph{n}{p}$ is within~$t$ from its mean with probability at least
  % \begin{equation*}
    $\displaystyle 1 - 2e^{2t^2/n}$.
  % \end{equation*}
\item The chromatic number, $\chi(\Graph{n}{p})$, of $\Graph{n}{p}$ is within~$t$ from its mean with probability at least
  % \begin{equation*}
  $\displaystyle 1 - 2e^{2t^2/n}$.
  % \end{equation*}
\end{enumerate}
%\qed
\end{proposition}

The bounds are only useful when $\omega$ and $\chi$ are larger than $\sqrt n$.  However, we will see in Sections \ref{sec:fool} and~\ref{sec:chi} that for $p=1-o(1)$, $\omega$ becomes small (even $O(1)$ ultimately) and $\chi$ becomes $O(\polylog n)$.

For the clique number, in that region of~$p$, we can give upper and lower bounds differing by a small constant factor, but for the chromatic number, we do not know any similarly strong concentration results.


% alpha.tex
\section{Independence number (largest 1-rectangle)}\label{sec:alpha}
Let $\alpha(\cdot)$ denote the independence number of a graph.  We extend the results of Park et al.~\cite{Park-Szpankowski:biclusters:05} from $p = \Theta(1)$ to $p=1-o(1)$.

For $K\subseteq \{1,\dots,n\}$, the \textit{1-rectangle\label{page:generated_rectangle} generated by the row-set $K$ in~$M$} is the largest 1-rectangle~$R$ in~$M$ with row set $K$, i.e., $R = K\times L$ with
\begin{equation*}
  L := \Bigl\{ \ell\in\{1,\dots,n\} \mid \forall \; k \in K\colon \ M_{k,\ell}=1 \Bigr\}.
\end{equation*}
The 1-rectangle generated by a set of columns is defined similarly.
%%
Note that every inclusion-wise maximal 1-rectangle is generated by its row-set and also by its column set.

\begin{theorem}\label{thm:alpha}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item\label{thm:alpha:small-p} If $\nfrac5n \le p \le \nfrac1e$, then a.a.s., the largest 1-rectangle is generated  by a single row or column, and if $p\gg (\ln n)/n$, its size is $(1+o(1))pn$.
    %%
  \item\label{thm:alpha:large-p} Define
    \begin{equation}\label{eq:alpha:large-p:def-a}
      \begin{aligned}
        a_- &:= \lfloor \log{\nfrac1p}e \rfloor, \\
        a_+ &:= \lceil \log{\nfrac1p}e \rceil, \text{ and}\\
        a   &:= \mbox{\rm argmax}_{b \in \{a_-,a_+\}} b p^b \; = \mbox{\rm argmax}_{b \in\{1,2,3,\dots\}} b p^b.\\
      \end{aligned}
    \end{equation}
    There exists a constant $\lambda_0$, such that if $\nfrac1e \le p \le 1-\nfrac{\lambda_0}{n}$, then, a.a.s., a largest 1-rectangle is generated by~$a$ rows or columns and its size is $(1+o(1))ap^an$.
  \end{enumerate}
\end{theorem}
The result is important for Section~\ref{sec:chi}.  In the proof, we first get rid of rectangles which are squares, using a simple union bound, and then use Chernoff-style concentration for non-square rectangles.

Before we present the proof in \S\S\ref{ssec:alpha:small-p}--\ref{ssec:alpha:large-p}, we discuss the result.   First of all, note that $\log{\nfrac1p}e = 1/\ln(\nfrac1p)$.  The following helps compare the quantities.
\begin{remark}\label{rem:alpha:facts-about-a}
  \begin{enumerate}[label=(\alph*)]
  \item If $p\ge \nfrac1e$, then
    \begin{equation}\label{eq:alpha:p-to-a-lb}
      \nfrac{1}{e^2}
      \le
      \frac{p}{e}
      \le
      p\cdot p^{\log{\nfrac1p}e}
      \le
      p^a
      \le
      \frac{1}{p}\cdot p^{\log{\nfrac1p}e}
      \le
      \frac{1}{pe}
      \le
      \nfrac{1}{e},
    \end{equation}
    i.e., $p^a \approx \nfrac1e$, more accurately $p^a = (1-o_{p\to 1}(1))/e$.
    %%
  \item With $p = 1-\notp = 1 - \nfrac{\lambda}{n}$, the following makes the range of $\alpha(\Graph{n}{p})$ clearer: Since $\notp \le \ln(\nfrac{1}{(1-\notp)}) \le \notp+\notp^2$ holds when $\notp \le 1-\nfrac1e$, we have
    \begin{equation}\label{eq:alpha:a-bounds}
      \frac{1}{e\bar p} = \frac{n}{e\lambda}   \le\  p\frac{n}{\lambda} = \frac{p}{\bar p}   \le\  \frac{1}{1+\notp}\cdot \frac{1}{\notp} \,\le  \log{\nfrac1p}e  \le  \frac{1}{\notp} =\frac{n}{\lambda}
    \end{equation}
  \end{enumerate}
\end{remark}

\begin{corollary}\label{cor:alpha:large-p:1-o1}
  For $p = 1-\frac{\lambda}{n}$ with $\lambda_0 \le \lambda = o(n)$, we have $\displaystyle%
  \alpha\bigl(\Graph{n}{p}\bigr)
      % &=
      % \frac{n^2}{e\lambda\,(1+\frac{\lambda}{2n})} + O\bigl(\lambda + \frac{n^{\nfrac32}}{\lambda} \ln n\bigr)\\ &
  =
      \frac{n^2}{e\lambda} + O(n).
      $
\end{corollary}
\begin{proof}[Proof of the corollary from Theorem~\ref{thm:alpha}]
 For the given~$p=1-\notp$, if $\nfrac{1}{e}=p^{a}$, we have
 \begin{multline*}
   ap^a
   =
   (1+O(\notp)) \frac{ \log{\nfrac{1}{p}}e }{ e }
   =
   \frac{1+O(\notp)}{e\ln\frac{1}{1-\notp}}
   \\
   =
   \frac{1+O(\notp)}{e\lt( \notp + \notp^2/2 +  \notp^3/3 + \dots\rt)}
   \eqcmt{(*)}
   \frac{1+O(\notp)}{e\notp}
   =
   \frac{1}{e\notp} + O(1)
   =
   \frac{n}{e\lambda} + O(1),
 \end{multline*}
 where equation~(\textasteriskcentered) uses $\notp=o(1)$.  Multiplying by~$n$ and invoking Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p}), we obtain the desired bound.
\end{proof}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{Small~$p$: Proof of Theorem~\ref{thm:alpha}~\ref{thm:alpha:small-p}}\label{ssec:alpha:small-p}
Theorem~\ref{thm:alpha}~\ref{thm:alpha:small-p} occurs in a weaker form in \cite{Park-Szpankowski:biclusters:05}, with the additional condition that $p = \Omega(1)$: Their proof does not cover the case $p\to 0$; the following one does.

We say that a rectangle is \textit{bulky}, if it extends over at least~2 rows and also over at least~2 columns.
%%
The proof of Theorem~\ref{thm:alpha} proceeds by considering three types of rectangles:
\begin{enumerate}[label=\arabic*.]
\item those consisting of exactly one row or column (they give the bound in the theorem);
\item square bulky rectanges;
\item bulky rectangles which are not square.
\end{enumerate}

Let us start with the easiest type (1).  The size of such a rectangle is the number of 1s in the chosen row.
\begin{lemma}
  For all $p,n$, a.a.s., there exists a row in $\Matrix{n}{p}$ containing at least~$pn$ 1s.  If $p\gg (\ln n)/n$, for every constant $\eps \in \lt]0,1\rt]$, a.a.s., no row or column has more than $(1+\eps)pn$ 1s.
\end{lemma}
\begin{proof}
  For the first statement, note that the probability that number of 1s in a fixed row is less than $pn$ is at most $\nfrac12$ (median of a binomial distribution).  Since the rows are independent, the probability that all rows have fewer than $pn$ 1s is at most $2^{-n}$.

  For the second statement, we use an easy Chernoff-type bound (Theorem 4.4(2) in \cite{Mitzenmacher-Upfal:Book}).  Denote by~$X$ the number of 1s in a fixed row of $\Matrix{n}{p}$.  Then
  \begin{equation*}
    \Prb( X \ge (1+\eps)pn ) \le e^{-\eps^2 pn /3} \le e^{-2 \ln n} = n^{-2},
  \end{equation*}
  where the last inequality holds for large enough~$n$, because $pn \gg \ln n$ implies $pn > 6\eps^{-2}\ln n$ for $n$ sufficiently large.  Hence, the probability that a row (or a column) exists which has at least $(1+\eps)pn$ 1s is $o(1)$.
\end{proof}

We now deal with rectangles of type~(2).

\begin{lemma}\label{lem:alpha:small-p:no-square}
  If $p \ge \nfrac5n$, then, a.a.s., there is no square 1-rectangle of size~$\sqrt{pn}\times\sqrt{pn}$.
\end{lemma}
\begin{proof}
  We abbrevieate $\kappa := pn$.
  %%
  By the union bound, for the probability~$q=q(n)$ that there exists a 1-rectangle of size~$\sqrt{\kappa}\times\sqrt{\kappa}$, we have
  \begin{equation*}
    q \le
    \binom{n}{\sqrt{\kappa}}^2 p^{\kappa}
    \le
    \biggl( \frac{e^2 n}{p} \biggr)^{\sqrt{\kappa}} p^{\kappa};
  \end{equation*}
  where we used the inequality $\binom{s}{t} \le (es/t)^t$.  Applying $\ln$, we find
  \begin{equation}\label{eq:alpha:small-p:no-square:ln}
    \ln q
    \le
    \sqrt{\kappa} \ln n + 2\sqrt{\kappa} + \sqrt{\kappa} \ln(1/p) - \kappa \ln(1/p)
    =
    \sqrt{\kappa}\Bigl( \ln n + 2 - \bigl(\sqrt{\kappa}-1\bigr)\ln(1/p) \Bigr).
  \end{equation}
  Now we distinguish cases.
  If $(2\ln n)^2/n \le p \le \nfrac1e$, then $\sqrt{\kappa} \ge 2 \ln n$, and hence we can bound the expression in the parentheses in~\eqref{eq:alpha:small-p:no-square:ln} as follows:
  \begin{equation*}
     \ln n + 2 - \bigl(\sqrt{\kappa}-1\bigr)\ln(1/p) \le \ln n + 2 - 2\ln n  +1 \le  - \ln n,
  \end{equation*}
  for all large enough~$n$.  Hence, $q\to 0$ in this region.
  %%
  If, on the other hand, $5/n \le p \le (2\ln n)^2/n$, then
  \begin{equation*}
    \ln q
    \le
    \sqrt{5}\Bigl( \ln n + 2 - \bigl(\sqrt{5}-1\bigr)\bigl(\ln n - 2\ln(2\ln n)\bigr) \Bigr)
    \le
    - \sqrt{5}\bigl( \sqrt{5}-2 \bigr)\ln n + O(\ln\ln n)
    = -\Omega(\ln n).
  \end{equation*}
  Hence, $q\to 0$ in this region, too, which completes the proof of the lemma.
\end{proof}

Finally, we come to rectangles of type~(3).  Consider the probability, $\varrho$, that $\Matrix{n}{p}$ contains a bulky 1-rectangle of size~$s$.
%%
By Lemma~\ref{lem:alpha:small-p:no-square}, if such a 1-rectangle has dimensions $a\times b$, we must have $a < \sqrt{pn}$ or $b < \sqrt{pn}$, or else $\varrho=o(1)$.  We have $\varrho \le 2\varrho'$, where $\varrho'$ is the probability that $\Matrix{n}{p}$ contains a 1-rectangle of size~$s$ consisting of at least as many columns than rows.  For $\varrho'$, we need to consider only 1-rectangles with $a < \sqrt{pn}$.  Moreover, increasing~$b$ if necessary, w.l.o.g., we may restrict to rectangles generated by a row-set of size $a$, with $2\le a \le n$ (the lower bound $a\ge 2$ comes from the condition that the rectangle be bulky).

\begin{lemma}\label{lem:alpha:small-p:bulky-smallp}
  With $\kappa = pn$, if $5 \le \kappa = O(\polylog n)$, then, a.a.s., there is no bulky rectangle of size at least~$\kappa$.
\end{lemma}
\begin{proof}
  By the remarks above, we have to bound the probability that there exists a row-set of size $a\in\{2,\dots,\sqrt\kappa\}$ which generates a 1-rectangle of size at least $\kappa/a$.

  Firstly, for a given set $K$ of~$a$ rows, we bound the probability that the rectangle it generates has size at least $pn$.
% For that, we use the following well-known inequality \cite[Thm.~1.1]{BollobasBkRndGraphs}: If~$S$ is a $\Bin(n,\varrho)$ r.v., $u>1$, and $m:=\lceil upn\rceil$, then
%   \begin{equation}\label{eq:bollobas}\tag{$*$}
%     \Prb(S\ge m) \le \frac{u}{u-1}\Prb(S=m).
%   \end{equation}
  Denote by~$S$ the number of columns in the rectangle generated by~$K$.  This is a $\Bin(n,p^a)$ r.v.
  and
  % using~\eqref{eq:bollobas} with $u := \frac{\lfloor \kappa/a \rfloor}{p^a n} = 1 +\Omega(1)$,
  we find that
  \begin{equation*}
    \Prb( a\cdot b \ge \kappa)
    =
    \Prb( S \ge \nfrac{\kappa}{a} )
    \le
    \binom{n}{\kappa/a} p^\kappa
    =
    \binom{n}{\kappa/a} \lt( \frac{\kappa}{n} \rt)^\kappa.
  \end{equation*}
  Secondly, we sum over all sets~$K$ of cardinality~$a$, and compute
  \begin{multline*}
    \binom{n}{a} \binom{n}{\kappa/a} p^\kappa (1-p^a)^{n-\kappa/a}
    \le
    n^{a+\kappa/a - \kappa +\kappa\log{n}{\kappa}}
    =
    n^{-\bigl( \kappa(1-\nfrac1a) - a - o(\kappa) \bigr)},
  \end{multline*}
  where $\kappa \log{n}{\kappa} = o(\kappa)$ follows from $\kappa = O(\polylog n)$.

  Now, because $a<\sqrt\kappa$, we have that the exponent on~$\nfrac1n$ is $\kappa(1-\nfrac1a) - o(\kappa) \ge \kappa / 3$, as $a \ge 2$.  Finally, summing over all~$a$, we obtain, as an upper bound for the probability that one of these rectangles has size~$\kappa$ or larger, the expression $n^{-(\kappa/3 -1)}$ which is $o(1)$, as $\kappa \ge 5$.
\end{proof}

% For the remaining case, we will need the following numerical fact, whose proof we leave to the reader.
% \begin{lemma}\label{lem:alpha:small-p:large-p:chernoff-less-1}
%   There exists an $\eps > 0$ such that, for all $p \in \lt]\nfrac18, \nfrac1e\rt]$ and $a\in\{2,3\}$,
%   \begin{equation*}
%     \biggl( a p^{a-2} \biggr)^{p/a} \biggl( \frac{1-p^a}{1-p/a} \biggr)^{1-p/a} \le 1 - \eps.
%   \vspace*{-5ex}%
%   \end{equation*}%
%   \qed
% \end{lemma}

Now we deal with bulky rectangles.
\begin{lemma}\label{lem:alpha:small-p:bulky-largep}
  With  $\kappa := pn$, if $\ln^4 n \le \kappa \le \nfrac{n}{e}$, then, a.a.s., there is no bulky rectangle of size at least~$\kappa$.
\end{lemma}
\begin{proof}
  By the remarks above Lemma~\ref{lem:alpha:small-p:bulky-smallp}, we have to bound the probability that there exists a row-set of size $a\in\{2,\dots,\sqrt{\kappa}\}$ which generates a 1-rectangle of size at least $\kappa/a$.

  For $2 \le a < \sqrt{\kappa}$, let $X_a$ count the number of columns~$\ell$ with $M_{k,\ell}=1$ for $k=1,\dots,a$.  We are going to show that
  \begin{equation*}
    P := \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a}  \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr) = o(1).
  \end{equation*}
  The r.v.\ $X_a$ has $\Bin(n,p^a)$ distribution.  We compute
  \begin{multline}\label{eq:alpha:small-p:large-p:prbub}
    \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr)
    \le
    \binom{n}{\nfrac{\kappa}{a}} (p^a)^{\kappa/a}
%    \\
    \le
    \lt( \frac{en}{ \nfrac{\kappa}{a} } \rt)^{\kappa/a}  (p^a)^{\kappa/a}
    =
    \lt(  \lt( (ea)^{1/(a-1)} p \rt)^{\kappa/a}  \rt)^{a-1}.
  \end{multline}
  Now there exists a constant $\varrho < 1$ such that
  \begin{equation}\label{eq:alph:small-p:large-p:the-rhos}
    (ea)^{1/(a-1)} \le
    \begin{cases}
      8\varrho,     &\text{for all $a\ge 2$, and}\\
      e\varrho,      &\text{for $a \ge 4$.}
    \end{cases}
  \end{equation}
  Consequently, we distinguish two cases:
  \begin{enumerate}[label={\textit{(\roman{*})}}]
  \item\label{enum:alpha:small-p:case-1} $p\le \nfrac18$ and
  \item\label{enum:alpha:small-p:case-2} $\nfrac18 < p \le \nfrac{1}{e}$.
  \end{enumerate}

  \myparagraph{\it Case~(\ref{enum:alpha:small-p:case-1}): $p\le \nfrac18$.}
  In this case, we compute
  \begin{align*}
    P &= \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a}  \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr)&& \\
    &\le
    \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a}  \lt(  \lt( (ea)^{1/(a-1)} p \rt)^{\kappa/a}  \rt)^{a-1} && \comment{by \eqref{eq:alpha:small-p:large-p:prbub}}\\
    &\le
    \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a} \lt( \varrho^{\kappa/a} \rt)^{a-1}&& \comment{by \eqref{eq:alph:small-p:large-p:the-rhos}}\\
    &\le
    \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a} \lt( \varrho^{\sqrt{\kappa}} \rt)^{a-1}&& \comment{since $a\le \sqrt{\kappa}$ and $\varrho<1$}\\
    &=
    \varrho^{\sqrt{\kappa}} \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a} \lt( \varrho^{\sqrt{\kappa}} \rt)^{a-2}&&\\
    &\le
    \varrho^{\sqrt{\kappa}} \; n^2 \; \sum_{a=0}^{\sqrt{\kappa}-2} \binom{n-2}{a} \lt( \varrho^{\sqrt{\kappa}} \rt)^a&& \comment{replacing $a\leadsto a-2$}\\
    &\le \varrho^{\sqrt{\kappa}} \; n^2 \; (1+\varrho^{\sqrt{\kappa}})^{n-2} && \comment{Binomial theorem}\\
    &\le \varrho^{\sqrt{\kappa}} \; n^2 \; e^{n \varrho^{\sqrt{\kappa}}} && \\
    &\le \varrho^{\sqrt{\kappa}} \; n^2 \; e^{n \varrho^{\ln^2 n}} && \comment{because $p\ge (\ln^4n)/n$ and $\varrho<1$}\\
    &= o(1) && \comment{because $\varrho = 1-\Omega(1)$.}\\
  \end{align*}

  \myparagraph{\it Case~(\ref{enum:alpha:small-p:case-2}): $\nfrac18 < p \le \nfrac{1}{e}$.}
  In this case, by~\eqref{eq:alph:small-p:large-p:the-rhos}, the same calculation as in the $p<\nfrac18$-case works if the sum is started with $a=4$.  For the first two terms of the sum, $a=2,3$, we use a Chernoff bound on $X_a$. %, which gives us (e.g., Eqn.~(2.4) in~\cite{Janson-Luczak-Rucinski:Book})
  % \begin{equation}\label{eq:alpha:small-p:large-p:chernoff}
  %   \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr) \le \lt(   \biggl( a p^{a-2} \biggr)^{p/a} \biggl( \frac{1-p^a}{1-p/a} \biggr)^{1-p/a}   \rt)^n.
  % \end{equation}
  The convenient choice is Corollary 21.9 in~\cite{Frieze-Karonski:rndGBook:2015}: With $c := ap^{1-a}$ and $\mu := \Exp X_a$, we have
  \begin{equation}\label{eq:alpha:small-p:large-p:chernoff}
    \begin{aligned}
      \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr)
      &
      =
      \Prb\bigl( X_a \ge c\mu \bigr)
      \\ &
      \le
      \bigl( e^{1-1/c}/c \bigr)^{c\mu}
      &&\comment{Corollary 21.9 in~\cite{Frieze-Karonski:rndGBook:2015}}
      \\ &
      =
      \bigl( e^{1-1/c}/c \bigr)^{pn/a}
      \\ &
      =
      (1+\Omega(1))^{\Omega(n)} = o(n^{-3})
      &&\text{($*$)},
    \end{aligned}
  \end{equation}
  where ($*$) follows as $a \ge 2$ implies that $c=1+\Omega(1)$ ($c$ is greater than and bounded away from~$1$), which in turn implies that $e^{1-1/c}/c = 1 - \Omega(1)$ (the base of the exponent is less than and bounded away from~$1$).

  % Using Lemma~\ref{lem:alpha:small-p:large-p:chernoff-less-1},
  We conclude
  \begin{multline*}
    P = \sum_{a=2}^{\sqrt{\kappa}} \binom{n}{a}  \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr)
    \\
    =
    \binom{n}{2} \Prb\bigl( X_2 \ge \nfrac{\kappa}{2} \bigr)
    + \binom{n}{3} \Prb\bigl( X_3 \ge \nfrac{\kappa}{3} \bigr)
    + \sum_{a=4}^{\sqrt{\kappa}} \binom{n}{a}  \Prb\bigl( X_a \ge \nfrac{\kappa}{a} \bigr)
    \\
    = o(1) + o(1) + o(1),
  \end{multline*}
  where the first two ``$o(1)$''s follow from~\eqref{eq:alpha:small-p:large-p:chernoff},
  %and Lemma~\ref{lem:alpha:small-p:large-p:chernoff-less-1}
  and the third is the same calculation as in the previous case.
\end{proof}

This concludes the proof of Theorem~\ref{thm:alpha}(\ref{thm:alpha:small-p}).

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Large~$p$: Proof of Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p})}\label{ssec:alpha:large-p}
Now we prove the part of Theorem~\ref{thm:alpha} about $p \ge \nfrac{1}{e}$.  Again, we first prove a statement about square rectangles.
\begin{lemma}\label{lem:alpha:large-p:no-square-rect}
  For every $\eps > 0$ there exists a constant $\lambda_0$ such that, if $\notp n = \lambda \ge \lambda_0$, then, a.a.s., there is no square 1-rectangle of size
  \begin{equation*}
    \frac{n}{\lambda^{1-\eps}} \times \frac{n}{\lambda^{1-\eps}}
  \end{equation*}
\end{lemma}
\begin{proof}
  This is a direct union bound computation.  With $b := \frac{n}{\lambda^{1-\eps}}$, the probability that such a 1-rectangle exists is at most
  \begin{equation*}
    \binom{n}{b}^{\!\!2} p^{b^2}
    =
    \binom{n}{b}^{\!\!2} (1-\notp)^{b^2}
    \le
    e^{b (2\ln(en/b) - \lambda b/n)}
    =
    e^{b \cdot A_b},
  \end{equation*}
  where
  \begin{align*}
    A_b
    &=
    2 \ln(en/b) - \lambda b/n
    \\
    &=
    2 \ln\bigl(e \, \lambda^{1-\eps} \bigr) - \lambda^\eps
    \\
    &\le -1,
  \end{align*}
  where the last inequality holds if $\lambda \ge \lambda_0$ and $\lambda_0$ is large enough.  The claim follows because $b\to \infty$ (since $\lambda \le n$.
\end{proof}

As above, we need the notion of a ``bulky'' rectangle: Here, we say that a rectangle of dimensions $k\times \ell$ is \textit{bulky,} if $k \le \ell$.  By Lemma~\ref{lem:alpha:large-p:no-square-rect}, in particular, a.a.s., a bulky rectangle must have $k < n/\lambda^{\nfrac23}$.  Again, by exchanging the roles of rows and columns, and multiplying the final probability estimate by~2, we only need to consider 1-rectangles with at least as many columns as rows (i.e., bulky ones).

\begin{proof}[Proof of Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p})]
  For every $b\in\{1,\dots,n\}$, denote by~$X_b$ the number of columns of the 1-rectangle generated by the row set~$\{1,\dots,b\}$---a random variable with $\Bin(n,p^b)$ distribution.  We prove that, for every $1< u < 2$,
  \begin{equation}\label{eq:alpha:large-p:goal-estimate}
    \sum_{b=1}^{n/\lambda^{\nfrac23}} \binom{n}{b}  \Prb( bX_b \ge u\, ap^an ) = o(1),
  \end{equation}
  which, together with Lemma~\ref{lem:alpha:large-p:no-square-rect}, proves Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p}).

  We split the proof into two lemmas, dealing with the cases $b\le \log{\nfrac1p}e$ and $b \ge \log{\nfrac1p}e$, respectively, stated below.  Establishing these lemmas completes the proof of Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p}).
\end{proof}

\begin{lemma}\label{lem:alpha:large-p:b-le-a}
  For every $u\in\lt]1,2\rt[$ there exists a constant $\lambda_0 \ge 1$ such that, for every $\notp \ge \nfrac{\lambda_0}{n}$, and for every $1\le b\le \log{\nfrac1p}e$,
  we have
  \begin{equation*}
    \binom{n}{b} \Prb\Bigl(  X_b \ge u \, \frac{a}{b}p^a n  \Bigr)  = o_u(\nfrac1n).
  \end{equation*}
\end{lemma}

\begin{lemma}\label{lem:alpha:large-p:a-le-b}
  For every $u \in \lt]1,2\rt[$ there exists a constant $\lambda_0$ such that, if $\notp n = \lambda \ge \lambda_0$, and $\log{\nfrac1p}e \le b \le n/\lambda^{\nfrac32}$, then
  \begin{equation*}
    \binom{n}{b} \Prb\Bigl(  X_b \ge u \, \frac{a}{b}p^a n  \Bigr)  = o_u(\nfrac1n).
  \end{equation*}
\end{lemma}

\begin{proof}[Proof of Lemma~\ref{lem:alpha:large-p:b-le-a}]
  Define
  \begin{equation*}
    \delta := \min\biggl( u\,\frac{ap^a}{bp^b} -1 , \; 1 \biggr).
  \end{equation*}
  Note that $\delta \ge u-1 > 0$ by the definition of~$a$ in~\eqref{eq:alpha:large-p:def-a}.  The ``$1$'' in the second argument of the mininum is somewhat arbitrary: the particular version of the Chernoff inequality  which we refer to, \cite[Thm~4.4-2]{Mitzenmacher-Upfal:Book}, requires $\delta \le 1$.
  %%
  Using this Chernoff bound in
  \begin{equation*}
    \Prb(X_b \ge u\,ap^an/b) \le \Prb(X_b \ge (1+\delta)\Exp X_b) \le e^{-\delta^2\Exp X_b/3},
  \end{equation*}
  and the inequality $\binom{n}{b} \le (en/b)^b$, we estimate
  \begin{align}
    \ln\lt( \binom{n}{b} \Prb\Bigl(  X_b \ge u\,\frac{a}{b}p^a n  \Bigr) \rt)
    &\le
    b\ln\Bigl(\frac{en}{b}\Bigr) - \delta^2 p^bn/3 \notag\\
    &\le
    n\lt( \frac{b}{n}\ln\Bigl(\frac{en}{b}\Bigr) - \frac{\delta^2}{3e} \rt) &&\comment{since $b\le \log{\nfrac1p}e$}.
    \tag{$*$}\label{eq:alpha:large-p:b-le-a:lnovern}
  \end{align}
  For any real $b\in[1,\log{\nfrac1p}e]$, denote by $A_b$ the term inside the parentheses in~\eqref{eq:alpha:large-p:b-le-a:lnovern}.

  Since $b\mapsto A_b$ is nondecreasing on $[1,n]$, we have, for every $b\in [1,\log{\nfrac1p}e]$,
  \begin{align*}
    A_{b}
    &\le
    A_{\log{\nfrac1p}e} \\
    &\le
    A_{1/\notp}                                                  &&\comment{$A_\cdot$ nondecreasing and $\log{\nfrac1p}e \le \frac{1}{\notp} \le n$, by~\eqref{eq:alpha:a-bounds}}\\
    &\le
    A_{n/\lambda_0}                                              &&\comment{$A_\cdot$ nondecreasing and $1/\notp \le n/\lambda_0 \le n$}\\
    &=
    \frac{  \ln(e\lambda_0) }{ \lambda_0 } - \frac{\delta^2}{3e}  &&\\
    &\le
    \frac{  \ln(e^2\lambda_0) }{ \lambda_0 } - \frac{(u-1)^2}{3e} &&\comment{as $\delta \ge u-1$}
  \end{align*}
  Hence, for sufficiently large $\lambda_0$, depending only on~$u$, we have, for all $b \in [1,\log{\nfrac1p}e]$,
  \begin{equation*}
    A_{b} = -\Omega_u(1),
  \end{equation*}
  so that
  \begin{equation*}
    \Prb(X_b \ge ap^an/b) \le e^{-n A_b} = e^{-\Omega_u(n)} = o_u(\nfrac1n)
  \end{equation*}
  which concludes the proof of the lemma.
\end{proof}


\begin{proof}[Proof of Lemma~\ref{lem:alpha:large-p:a-le-b}]
  By Lemma~\ref{lem:alpha:large-p:no-square-rect}, we already know that, if a bulky 1-rectangles generated by~$b$ rows exists with non-$o(1)$ probability, we must have $b < n/\lambda^{\nfrac23}$.

  Define $\delta$ as follows, $0 < u-1 \le \delta := (u-1)\frac{ap^a}{bp^b} \le u\frac{ap^a}{bp^b} -1$, and let
  \begin{equation*}
    \eps :=
    \begin{cases}
      (u-1)^2 /3, &\text{if $\delta \le \nfrac 32$;}\\
      \ln \nfrac52 - 1 + \nfrac25, &\text{otherwise.}
    \end{cases}
  \end{equation*}
  We do the case distinction because we use two slightly different versions of Chernoff in our estimate of 
  \begin{equation*}%\label{eq:alpha:large-p:a-le-b:ln_prb}\tag{P}
    \varrho := \Prb\Bigl( X_b \ge u\frac{a}{b} p^an \Bigr).
  \end{equation*}
  If $\delta \le \nfrac32$, then
  \begin{align*}
    %\hspace*{1em}&\hspace*{-1em}%
    \varrho% \ln\lt(   \binom{n}{b} \Prb\Bigl( X_b \ge u\frac{a}{b} p^an \Bigr)  \rt) &\\
    &\le
    \Prb\Bigl( X_b \ge (1+\delta) \Exp X_b\Bigr)
    \\
    &\le
    e^{- \delta^2 p^b n/3} &&\comment{Chernoff, e.g., \cite[Cor.\ 2.3]{Janson-Luczak-Rucinski:Book}}
    \\
    &\le
    e^{- (u-1)\, (u-1) \frac{a}{b} p^a n/3} &&\comment{definition of $\delta$, and $\delta \ge u-1$}
    \\
    &= 
    e^{- \eps\, \frac{a}{b} p^a n}.
  \end{align*}
  If, on the other hand, $\delta > \nfrac32$, then
  \begin{equation*}
    u \frac{a}{b} p^an
    =
    u\frac{ap^a}{bp^b} \cdot \Exp X_b
    \ge
    (\delta + 1)\cdot \Exp X_b
    \ge
    \tfrac{5}{2} \cdot \Exp X_b,
  \end{equation*}
  and we have, by Eqn.~(2.10) in \cite[Cor.\ 2.4]{Janson-Luczak-Rucinski:Book},
  \begin{equation*}
    \varrho
    \le
    e^{- \eps\, \frac{a}{b} p^a n}.
  \end{equation*}
  In both cases, we conclude
  \begin{align*}
    \hspace*{1em}&\hspace*{-1em}%
    \ln\lt(   \binom{n}{b} \Prb\Bigl( X_b \ge u\frac{a}{b} p^an \Bigr)  \rt) \\
    &\le
    b\ln(en/b) - \eps\, \frac{a}{b} p^a n \\
%
%
    &\le
    b\ln(en/b) -  \eps\, \frac{an}{e^2b} &&\comment{$p^a \ge \nfrac1{e^2}$ by~\eqref{eq:alpha:p-to-a-lb}}
    \\
    &\le
    b\ln(en/b) - \eps\, \frac{n^2}{2e^3\lambda b} &&\comment{$a \ge \lfloor \nfrac{n}{e\lambda} \rfloor$ by~\eqref{eq:alpha:a-bounds}, \& $\lfloor \nfrac{n}{e\lambda} \rfloor \ge \nfrac{n}{2e\lambda}$ as $\lambda\le \nfrac{n}{e}$}
    \\
    &\le
    b\ln(e^2\lambda) - \eps\, \frac{n^2}{2e^3\lambda b} &&\comment{as $b\ge \log{\nfrac1p}e \ge \nfrac{n}{e\lambda}$, by~\eqref{eq:alpha:a-bounds}}
    \\
    &\le
    \frac{n}{\lambda^{\nfrac23}}\ln(e^2\lambda)
    -
    \frac{ \eps }{ 2e^3} \frac{n}{\lambda^{\nfrac13}  }    &&\comment{as $b\le n/(\sqrt{\lambda}\ln\lambda)$}
    \\
    &=
    \frac{n}{\lambda^{\nfrac13}} \lt( - \frac{ \eps }{ 2e^3} + o_{\lambda\to\infty}(1) \rt).
  \end{align*}
  Hence, if $\lambda$ is at least a large enough constant, $\lambda_0$, then
  \begin{equation*}
    \binom{n}{b} \Prb\Bigl( X_b \ge u\frac{a}{b} p^an \Bigr) = e^{-\Omega_u(n^{2/3})} = o(\nfrac{1}{n}),
  \end{equation*}
  and the lemma is proven.
\end{proof}

%fool.tex

\section{Clique number (fooling sets)}\label{sec:fool}
Recall that $\HbipM$ is the bipartite graph with bipartite adjacency matrix~$M$, and note that $\HbipMnp$ is the ``usual'' Erd\H{o}s-Renyi random bipartite graph: each edge is picked independently with probability~$p$.

The next lemma, whose easy proof we leave to the reader, gives the relationship between fooling sets in Communication Complexity, cross-free matchings, and cliques in the rectangle graph.  Recall that a matching~$F$ in a bipartite graph~$H$ is called \textit{cross-free,} if for every two edges $e=(u_1,u_2),f=(v_1,v_2) \in F$, none of the ``cross edges'' $(u_1,v_2)$, $(u_2,v_1)$ is in~$H$.
\begin{lemma}[Trivial]
\item Let~$M$ be a $n\times n$ matrix, and ~$F\subseteq\{1,\dots,n\}\times\{1,\dots,n\}$.  The following are equivalent.
  \begin{enumerate}[label=(\alph*)]
  \item Fooling set property: for all $(k,\ell)\in F$, $M_{k,\ell} = 1$ and for all $(k',\ell')\in F\setminus\{(k,\ell)\}$ $M_{k,\ell'}M_{k',\ell}=0$;
  \item The set~$F$ is a cross-free matching in $\HbipM$;
  \item $F \subseteq V(\Graphd(M))$, and~$F$ is a clique in $\Graphd(M)$.
  \end{enumerate}
  %%\qed
\end{lemma}
It is known that the decision problem whether a bipartite graph has a cross-free matching of a given size is NP-hard~\cite{Shitov13fool}.

Recall $\delta = 1-p^2$, and $\notp = 1-p$.  Denote by $\nu(H)$ the size of the largest matching of a bipartite graph~$H$.  Clearly, $\omega(\Graphd(M)) \le \nu(\HbipM)$.
%%
Let $a(q)$ be a function with the property that, a.a.s., every Erd\H{o}s-Renyi random graph $\Gnp{n}{q}$ on~$n$ vertices with edge-probability~$q$ has an independent set of size at least~$a(q)$.

\begin{theorem}\label{thm:fool}\mbox{}
  \begin{subequations}
    \begin{enumerate}[label=(\alph*)]
    \item\label{thm:fool:o-sqrtn}%
      For $n^{-3/2} \le p = o(1/\sqrt n)$, a.a.s., we have
      \begin{equation}
        \omega(\Graph{n}{p}) = (1-o(1))\nu(\HbipMnp).
      \end{equation}
      In particular, if in addition $p \gg \nfrac1n$, then, a.a.s., $\omega(\Graph{n}{p}) = (1-o(1))n$.
    \item\label{thm:fool:alpha}%
      If $pn - \ln n \to \infty$, then, a.a.s., $\omega(\Graph{n}{p}) \ge a(p^2)$.
    \item\label{thm:fool:ub}%
      If $p \gg \sqrt{(\ln n)/n}$ and $\notp \ge n^{-o(1)}$, then, a.a.s.,
      \begin{equation*}
        \omega(\Graph{n}{p}) \le 2 \log{\nfrac1\delta}(pn^2).
      \end{equation*}
    \item\label{thm:fool:notp-small}%
      If $a \in \lt]0,4\rt[$ is a constant and $\notp = n^{-a}$, then $\displaystyle \omega(\Graph{n}{p}) \le \nfrac{4}{a}+1$.  %%
      If, in addition, $a<1$, then $\displaystyle \omega(\Graph{n}{p}) = \lfloor \nfrac{4}{a} \rfloor+1$
    \end{enumerate}
  \end{subequations}
\end{theorem}

The proof of Theorem~\ref{thm:fool} is spread over the remainder of this section.  %
In the first subsection, we study the expected number of cliques of the relevant sizes and obtain Theorem~\ref{thm:fool}(\ref{thm:fool:ub}).
Subsection~\ref{ssec:fool:2nd_moment} estimates the variance of the number of cliques of constant size.  %
Finally, in Subsection~\ref{ssec:fool:bipartite-matching}, we study the connection to matchings in Erd\H{o}s-Renyi random bipartite graphs and prove items (\ref{thm:fool:o-sqrtn}) and~(\ref{thm:fool:alpha}).


\mypar%
By combining the upper bound in~\ref{thm:fool:ub} with the lower bound in~\ref{thm:fool:alpha} applied to the classical theorems about independent sets and cliques in Erd\H{o}s-Renyi random graphs (cf., e.g., \cite{Janson-Luczak-Rucinski:Book}), we immediately obtain the following bounds.
\begin{corollary}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item\label{cor:fool:uptoconst-p}%
    For~$p$ in the range $n^{-o(1)} \le p \le 1-\Omega(1)$, we have, a.a.s.,
    \begin{equation*}
      2\bigl( \log{\nfrac1\delta} n - \log{\nfrac1\delta}\log{\nfrac1\delta}pn \bigr)
      \le
      \omega(\Graph{n}{p})
      \le
      2 \log{\nfrac1\delta}(pn^2)
    \end{equation*}
  \item\label{cor:fool:very-small-p}%
    For~$p$ in the range $\sqrt{(\ln n)/n} \ll p \le 1/\ln n$ we have, a.a.s.,
    \begin{equation*}
      \frac{2}{p}\bigl( \ln pn - \ln\ln pn \bigr)
      \le
      \omega(\Graph{n}{p})
      \le
      2 \log{\nfrac1\delta}(pn^2)
    \end{equation*}
  % \item\label{cor:fool:small-notp}%
  %   For every constant $a \in \lt]0,4\rt[$, if $1-p=\notp = n^{-a}$, then, a.a.s.,
  %   \begin{equation}
  %     \Bigl\lfloor \frac{2+a}{a} \Bigr\rfloor
  %     \le
  %     \omega(\Graph{n}{p})
  %     \le
  %     \frac{4+a}{a}
  %   \end{equation}
  \end{enumerate}
\end{corollary}

In the range given in for~\ref{cor:fool:uptoconst-p}, the upper bound is within a factor of
\begin{equation*}
  (1-o(1))\frac{ \ln n + \log(pn) }{ \ln n }  \le  2
\end{equation*}
of the lower bound.
%%
If~$p$ tends to~$0$ faster, however, the upper bound in~\ref{cor:fool:very-small-p} is off by a factor of $1/p$ from the lower bound, which leaves ``room for improvement''; cf.~Section~\ref{sec:openquestions}.

\subsection{Upper bounds: The number of $r$-cliques}\label{ssec:fool:markov}
Let the random variable $X = X_r = X_{r,n,p}$ count the number of $r$-cliques in $\Graph{n}{p}$.  For a set $F \subseteq [n]\times[n]$, denote by $A_F$ the event that~$F$ is clique in $\Graph{n}{p}$.
%%
We have
\begin{equation}\label{eq:fool:numofool_RV}
  X_r = \sum_F \Ind[ A_F ],
\end{equation}
where the sum ranges over all~$F$ of the form $F = \{ (k_1,\ell_1),\dots,(k_r,\ell_r) \}$, with all the $k_j$'s distinct, and all the~$\ell_j$'s distinct.  There are $r!\,\binom{n}{r}^{\!\!2}$ of these sets~$F$, and hence
\begin{equation*}
  \Exp X_r = r!\,\binom{n}{r}^{\!\!2} \, p^r \, \delta^{^{\binom{r}{2}}}.
\end{equation*}

Elementary calculus shows that, for fixed $r\ge 2$, $p\mapsto r!\,\binom{n}{r}^{\!\!2} \, p^r \, \delta^{^{\binom{r}{2}}}$ is increasing on $[0,1/\sqrt r]$ and decreasing on $[1/\sqrt r,1]$ (see the proof of the (\ref{enum:fool:exp_numo:n})-part of Lemma~\ref{lem:fool:exp_numo}).  The following lemma describes for which values of~$r$ the expectation $\Exp X_r$ tends to 0 or infinity, resp., in the relevant range of~$p$.

\newcommand{\fThres}{\textstyle n^{-\nfrac12}\,\sqrt{\ln n}}
\begin{lemma}\label{lem:fool:exp_numo}
  \begin{enumerate}[label=(\alph*)]
    %%
  \item\label{enum:fool:exp_numo:n}%
    If $\displaystyle e/n \; \le \; p \; \le \; \fThres$, %
    then %
    $\displaystyle \Exp X_n \to \infty$.
    %%
  \item\label{enum:fool:exp_numo:Cn}%
    For constants $c > 1$, $\eps >0$ %
    if $\displaystyle p \, = \, c \, \fThres$,
    with
 %   \begin{equation}
     $r := (1+\eps)\,\frac{n}{c^2} $
 %   \end{equation}
    we have $\Exp X_r \to 0$.
    %%
  \item\label{enum:fool:exp_numo:o_n}%
    If $p \gg \fThres$ and $1-p = \notp \ge n^{-o(1)}$, letting
%    \begin{align*}
      $r_- := 2 \log{\nfrac1\delta}(pn^2) - 2\log{\nfrac1\delta}\log{\nfrac1\delta}(pn^2)$ and \\ $r_+ := 2 \log{\nfrac1\delta}(pn^2)$
%      \tfrac{r-1}{2}\ln(\nfrac1\delta) &=  \ln(pn^2) - \ln r,
%    \end{align*}
    we have $\Exp X_{r_-} \to \infty$, and $\Exp X_{r_+} \to 0$.
    %%
  \item\label{enum:fool:exp_numo:const-fool}%
    If $a \in \lt]0,4\rt[$ is a constant and $1-p = \notp = n^{-a}$, then
    $\Exp X_r \to 0$      if $\displaystyle r > \nfrac{4}{a}+1$, and
    $\Exp X_r \to \infty$ if $\displaystyle r < \nfrac{4}{a}+1$.
  \end{enumerate}
\end{lemma}
\begin{proof}\mbox{}\\
\textit{(\ref{enum:fool:exp_numo:n}).} %
First of all, we prove that for $r\ge 2$, the function $p\mapsto \Exp X_{r,p}$ is non-decreasing $\lt]0,r^{-\nfrac12}\rt]$ and non-increasing on $\lt[r^{-\nfrac12},1\rt[$.

Clearly, only the function
\begin{equation*}
  f\colon p \mapsto p (1-p^2)^{(r-1)/2}
\end{equation*}
is of interest.  Taking the derivative, we obtain
\begin{equation*}
  f'(p) = (1-p^2)^{(r-1)/2} - (r-1) p^2 (1-p^2)^{(r-3)/2}.
\end{equation*}
If $0<p<1$, then $f'(p) = 0$ and is equivalent to
\begin{equation*}
  0 = 1-p^2 - (r-1) p^2 = 1 - r p^2.
\end{equation*}
For $p < 1/\sqrt{r}$, we have $f'(p) > 0$ and $p > 1/\sqrt{r}$, we have $f'(p) < 0$.

Now, for $p=e/n$, using Stirling's formula, we have
\begin{equation*}
  \Exp X_n
  =
  n! \lt(\frac{e}{n}\rt)^n \lt( 1-\frac{e^2}{n^2}\rt)^{\binom{n}{2}}
  = \Theta( \sqrt{n} ),
\end{equation*}
so $\Exp X_n$ tends to infinity with $n\to\infty$.

Finally, let $p = \fThres$.  We have
\begin{multline*}
  \frac{\ln \Exp X_n}{n}
  =
  \frac{ \ln\bigl(  n! \, p^n \, \delta^{\binom{n}{2}} \bigr) }{n}
  =
  -1+ o(1) + \ln n - \ln(\nfrac1p) - \tfrac{n-1}{2} \ln(\nfrac1\delta)
  %
  \\
  \ge
  -1+ o(1) + \ln n - \ln(\nfrac1p) - \tfrac{n-1}{2} p^2,
\end{multline*}
where we used $\ln(\nfrac1\delta) = \ln(1/(1-p^2)) \le p^2 + O(p^4)$ and $np^4 = o(1)$ in the last inequality.  Replacing $p$, we get
\begin{equation*}
  \frac{\ln \Exp X_n}{n}
  \ge
  % \ln n
  % - \tfrac12\ln n +
  \tfrac12\ln\ln n
  % - \tfrac12\ln n
  + O(1),
\end{equation*}
which proves the claim in~(\ref{enum:fool:exp_numo:n}) for this particular value of~$p$.  

\mypar%
  \textit{(\ref{enum:fool:exp_numo:Cn}).} %
  First of all, note that, for $4 \le r < n$, using the estimates %\todo[??]
  \begin{eqnarray*}
    \sqrt{r} \, \Bigl( \frac{r}{e} \Bigr)^r         \le&\displaystyle r! &\le r\Bigl( \frac{r}{e} \Bigr)^r, \text{ and}\\
    \tfrac{1}{3\sqrt r} \, e^{r-r^2/(n-r)} \Bigl( \frac{n}{r} \Bigr)^r \le&\displaystyle \binom{n}{r} &\le
    e^r \Bigl( \frac{n}{r} \Bigr)^r,
  \end{eqnarray*}
  we have
  \begin{multline}\label{eq:fool:bounds_for_Exp_fool}\tag{$*$}
    1 - \tfrac{r}{n-r} - O(\tfrac{\ln r}{r})
    \le
 %   \\
    \frac{ \ln\bigl(  r! \binom{n}{r}^2\, p^r \, \delta^{\binom{r}{2}} \bigr) }{r}
    -
    \biggl(
    \ln(n^2) - \ln(\nfrac1p) - \tfrac{r-1}{2}\ln(\nfrac1\delta) - \ln r
    \biggr)
 %   \\
    \le
    1 + \tfrac{\ln r}{r}.
  \end{multline}
  (We will use this for~(\ref{enum:fool:exp_numo:o_n}), too.)

  Now, with $c>1$, $p = c\,\fThres$ and $r = (1+\eps) n / c^2 = (1-\Omega(1))n$, we get
  \begin{align*}
    \frac{\ln \Exp X_r}{r}
    &=
    \ln(n^2) - \ln(\nfrac1p) - \tfrac{r-1}{2}\ln(\nfrac1\delta) - \ln r
    + O(1)
    \\
    &=
    \ln(n^2)
    - \tfrac12 \ln(n/\ln n)
    - \tfrac{r-1}{2}\ln(\nfrac1\delta)
    - \ln n
    +
    O(1)
    \\
    &=
    \tfrac12 \ln n
    - \tfrac{r-1}{2}n\ln(\nfrac1\delta)
    + O( \ln\ln n )
    \\
    &=
    \tfrac12 \ln n
    - \tfrac{r-1}{2}\bigl( p^2 + O(p^4) )
    + O(\ln\ln n)
    \\
    &=
    - \tfrac{\eps}{2} \ln n
    + O(\ln\ln\ln n),
  \end{align*}
  which proves $\Exp X_r \to 0$.

  \mypar%
  \textit{(\ref{enum:fool:exp_numo:o_n}).} %
  With $r := r_+ = 2 \ln(pn^2)/\ln(\nfrac1\delta)$, using the upper bound from~\eqref{eq:fool:bounds_for_Exp_fool}, we get
  \begin{align*}
    \frac{\ln \Exp X_r}{r}
    &\le
    \ln(pn^2)
    - \tfrac{r-1}{2}\ln(\nfrac1\delta)
    - \ln r
    + 1 + \tfrac{\ln r}{r}\\
    &=
    \tfrac12 \ln(\nfrac1\delta)
    - \ln r
    + 1 + \tfrac{\ln r}{r}\\
    &= -\Omega(1),
  \end{align*}
  where the last equation follows from $r \to \infty$ (due to $\notp \ge n^{-o(1)}$), which also implies $\Exp X_r \to 0$.

  On the other hand, with $r := r_- = 2 \log{\nfrac1\delta}(pn^2) - 2\log{\nfrac1\delta}\log{\nfrac1\delta}(pn^2)$, using the upper bound from~\eqref{eq:fool:bounds_for_Exp_fool}, we get
  \begin{align*}
    \frac{\ln \Exp X_r}{r}
    &\ge
    \ln(pn^2)
    - \tfrac{r-1}{2}\ln(\nfrac1\delta)
    - \ln r
    + 1 -O(\tfrac{\ln r}{r})\\
    &\ge
    \bigl( \log{\nfrac1\delta}\log{\nfrac1\delta}(pn^2) \bigr) \ln(\nfrac1\delta)
    - \ln r
    + 1 + O(\tfrac{\ln r}{r})\\
    &=
    \ln \log{\nfrac1\delta}(pn^2)
    - \ln r
    + 1 + O(\tfrac{\ln r}{r})\\
    &\ge
    -\ln 2 + 1 + O(\tfrac{\ln r}{r})\\
    &= \Omega(1).
  \end{align*}
  Again, the last equation and the conclusion $\Exp X_r \to \infty$ follows from $r\to\infty$.

  \mypar%
  \textit{(\ref{enum:fool:exp_numo:const-fool}).} %
  Finally, let $0 < a < 4$ be a constant and $1-p = \notp = n^{-a}$.  Noting that $\delta = (1+p)\notp = \Theta(\notp)$, if $r=O(1)$, we have
  \begin{align*}
    \Bigl( \Exp X_r \Bigr)^{\nfrac1r}
    = \Theta\bigl( n^2 \notp^{(r-1)/2} \bigr)
    = \Theta\bigl( n^{2-a(r-1)/2} \bigr),
  \end{align*}
  which implies $\Exp X_r \to \infty$ if $\displaystyle r > \nfrac{4}{a}+1$, and $\Exp X_r \to 0$ if $\displaystyle r < \nfrac{4}{a}+1$.
\end{proof}

From this lemma, we immediately get the upper bound on $\omega$ in Theorem~\ref{thm:fool}(\ref{thm:fool:ub}).

\begin{proof}[Proof of Theorem~\ref{thm:fool}(\ref{thm:fool:ub})]
  Follows from~\ref{enum:fool:exp_numo:o_n}.
\end{proof}

Item~\ref{enum:fool:exp_numo:n} of the lemma suggests the question, for which $p$ the value of $\omega$ drops from $(1-o(1))n$ to $(1-\Omega(1))n$.  If the expectation is ``right'', this happens crossing from $p=\sqrt{(\ln n)/n}$ to $p=(1+\eps)\sqrt{(\ln n)/n}$.  This is supported by the fact that our lower bounds in this region---in the next subsection---appear to be quite simple, in that they only consider one fixed maximal matching in $\HbipMnp$, and delete edges from it until it becomes cross-free.

\subsection{Second moment calculation}\label{ssec:fool:2nd_moment}
\begin{lemma}\label{fool:variance}
  If $r=O(1)$ and $p\delta \gg \nfrac1n$, then $\displaystyle \Var(X_r) = o\bigl( \bigl(\Exp X_r\bigr)^{\!2\,} \bigr)$.
\end{lemma}
\begin{proof}
  With the notations as in equation~\eqref{eq:fool:numofool_RV}, let $F_0 := \{ (1,1),\dots,(r,r) \}$, and abbreviate $A_0 := A_{F_0}$.  We have
  \begin{equation*}
    \Exp(X^2) = \Exp X \cdot \sum_{F} \Prb( A_F \mid A_0 )
  \end{equation*}
  where the sum ranges over all~$F$ of the form $F = \{ (k_1,\ell_1),\dots,(k_r,\ell_r) \}$, with all the $k_j$'s distinct, and all the~$\ell_j$'s distinct, as in~\eqref{eq:fool:numofool_RV}.

  If $F\subset \{r+1,\dots,n\}\times\{1,\dots,n\}$, % or $F \subset \{1,\dots,n\}\times\{r+1,\dots,n\}$,
  then the events $A_F$ and~$A_0$ are clearly independent, so that, with the following sum ranging over these~$F$, we have
  \begin{equation*}
    \sum_{F} \Prb( A_F \mid A_0 ) = \frac{ (n-r)_r }{ (n)_r }  \Exp X.
  \end{equation*}
  Consequently, we have
  \begin{equation*}
    \Exp(X^2) = \frac{ (n-r)_r }{ (n)_r } \bigl( \Exp X \bigr)^2  + \Exp X \cdot \sum_{F} \Prb( A_F \mid A_0 ),
  \end{equation*}
  where the last sum ranges over all~$F$ with %both
  $F \cap \{1,\dots,r\}\times\{1,\dots,n\} \ne \emptyset$.  %and $F \cap \{1,\dots,n\}\times\{1,\dots,r\} \ne \emptyset$
  For each such~$F$,
  \begin{equation*}
    \Prb( A_F \mid A_0 ) = O\biggl( \frac{1}{p\delta n} \biggr)^{O(r^2)} \Prb( A_F ),
  \end{equation*}
  with absolute constants in the big-$O$s.

  Hence, if $r=O(1)$ and $p\delta \gg \nfrac1n$,
  \begin{equation*}
    \Exp(X^2)
    =
    \frac{ (n)_r }{ (n-r)_r } \bigl( \Exp X \bigr)^2
    +
    O\biggl( \frac{1}{p\delta n} \biggr)^{O(r^2)}\bigl( \Exp X \bigr)^2
    =
    (1+o(1)) \bigl( \Exp X \bigr).
  \end{equation*}
  This proves the statement of the lemma.
\end{proof}

The lemma proves that the number of edges of $\Graph{n}{p}$ is a.a.s.\ as given in equation~\eqref{eq:rnd-graph-basics:numo-edges} on page~\pageref{eq:rnd-graph-basics:numo-edges}.

\begin{proof}[Proof of Theorem~\ref{thm:fool}(\ref{thm:fool:notp-small})]
  The upper bound, for general~$a$ is in Lemma~\ref{lem:fool:exp_numo}(\ref{enum:fool:exp_numo:const-fool}).  The lower bound when $a<1$ follows from Lemma~\ref{lem:fool:exp_numo}(\ref{enum:fool:exp_numo:const-fool}) and Lemma~\ref{fool:variance}.
\end{proof}

\subsection{Lower bounds: Cross-free sub-matchings}\label{ssec:fool:bipartite-matching}
Let $\xfmatch(\cdot)$ denote the size largest cross-free matching of a bipartite graph.

Let $H$ be a bipartite graph, and $m=\{e_1,\dots,e_r\} \subseteq E(H)$ a matching in~$H$.  Define the graph~$G'=G'(H,m)$ with vertex set $V(G') = \{1,\dots,r\}$ and $\{k,\ell\}\in E(G')$ if $e_k$, $e_\ell$ induce a $K_{2,2}$ in~$H$.    Then $\displaystyle \xfmatch(H) \ge \alpha(G')$ holds: for any independent set~$A$ of $G'$, the set $\{ e_j \mid j \in A\}$ is a cross-free matching in~$H$.

Our strategy for obtaining a large cross-free matching will be this: fix a large matching~$m$ in $\HbipMnp$, then bound the independence number of the corresponding random graph~$G'_{n,p}(m) := G'(\HbipMnp,m)$.  This random graph behaves similarly to an Erd\H{o}s-Renyi random graph with $\abs{m}$ vertices and edge-probability $p^2$.  The following technical lemma takes care of the dependency issues which arise.

Let $\Gnp{r}{q}$ denote the Erd\H{o}s-Renyi random graph with~$r$ vertices and edge probability~$q$.
\begin{lemma}\label{lem:fool:bipartite}
  For all positive integers~$n,r,a$, and $p\in[0,1]$, we have
  \begin{equation*}
    \Prb\Bigl( \xfmatch(\HbipMnp) < a \quad\&\quad  \nu(\HbipMnp) \ge r \Bigr) \quad\le\quad \Prb\bigl( \alpha(\Gnp{r}{p^2}) < a \bigr).
  \end{equation*}
\end{lemma}
\newcommand{\thematchings}{\mathcal M}
\begin{proof}
  Let $\thematchings$ be the set of matchings of size~$r$ of $K_{n,n}$, and for each $m\in\thematchings$ denote by $C_m$ the event that $\HbipMnp$ contains~$m$.  Fix a matching $m\in\thematchings$.  For every edge~$e\in E(K_{n,n})$, we have
  \begin{equation*}
    \Prb\bigl( e \in \HbipMnp \mid C_m \bigr) = p,
  \end{equation*}
  and these events are jointly independent.
  Hence, for each potential edge $e'$ of $G'_{n,p}(m)$,
  \begin{equation*}
    \Prb\bigl( e' \in G'_{n,p}(m) \mid C_m \bigr) = p^2,
  \end{equation*}
  again with joint independence of the events.

  Now, denote by~$A$ the event that there does not exists a cross-free matching of size larger than~$a$ in $\HbipMnp$.  By the discussion above, $A$ and $C_m$ together imply $\alpha(G'_{n,p}(m)) < a$, so that
  \begin{equation*}
        \Prb\bigl( A                         \mid C_m \bigr)
    \le \Prb\bigl( \alpha(G'_{n,p}(m)) < a \mid C_m \bigr)
    =   \Prb\bigl( \alpha(\Gnp{r}{p^2}) < a \bigr).
  \end{equation*}
  It follows that
  \begin{multline*}
    \Prb\Bigl( \xfmatch(\HbipMnp) < a \quad\&\quad  \nu(\HbipMnp) \ge r \Bigr)
    = \Prb\bigl( A \cap \bigcup_m C_m \bigr)
    \le \sum_m \Prb\bigl( A \cap C_m \bigr)
    \\
    = \sum_m \Prb\bigl( A \mid C_m \bigr)\Prb(C_m)
    \le \Prb\bigl( \alpha(\Gnp{r}{p^2}) < a \bigr),
  \end{multline*}
  which concludes the proof of the lemma.
\end{proof}


\begin{remark}\label{rem:fool:use-lem:fool:bipartite}
  We will use Lemma~\ref{lem:fool:bipartite} in the following way: If $p$, $r_-$, $r_+$ are such that both
  \begin{equation}\label{eq:fool:scholie-lem-bipartite-cond}
    \begin{aligned}
      \Prb\bigl( \nu(\HbipMnp) < r_+\bigr)           &= o(1), \text{ and}\\
      \Prb\bigl( \alpha(\Gnp{r_+}{p^2}) < r_- \bigr) &= o(1),
    \end{aligned}
  \end{equation}
  then, a.a.s., $\Graphd(\Matrix{n}{p})$ has a clique of size~$r_-$.  Indeed,
  \begin{align*}
    &\Prb\bigl( \omega(\Graphd(\Matrix{n}{p})) < r_- \bigr)
    \\
    &\le \Prb\Bigl( \xfmatch(\HbipMnp) < r_- \quad\&\quad \nu(\HbipMnp) \ge r_+ \Bigr) + \Prb\bigl( \nu(\HbipMnp) < r_+ \bigr)
    \\
    &\le \Prb\bigl( \alpha(\Gnp{r_+}{p^2}) < r_- \bigr) + \Prb\bigl( \nu(\HbipMnp) < r_+ \bigr) &&\comment{Lemma~\ref{lem:fool:bipartite}}
    \\
    &= o(1)+o(1) && \comment{by~\eqref{eq:fool:scholie-lem-bipartite-cond}}.
  \end{align*}
\end{remark}

We are now ready to prove the first two items of Theorem~\ref{thm:fool}.  We start with the easiest part.

\begin{proof}[Proof of Theorem~\ref{thm:fool}(\ref{thm:fool:alpha})]
  This is a direct consequence of the remark with $r_- := a(p^2)$ and $r := n$, since, if $pn - \ln n \to \infty$, then $\nu(\HbipMnp) = n$, a.a.s. (e.g., \cite[Thm~4.1]{Janson-Luczak-Rucinski:Book}).
\end{proof}

\begin{proof}[Proof of Theorem~\ref{thm:fool}(\ref{thm:fool:o-sqrtn})]
  Let $\eps > 0$ be a constant.  Proceeding as in Remark~\ref{rem:fool:use-lem:fool:bipartite}, with $r_- := r$ and $r_+ := (1+\eps)r$, if both a.a.s.\ $\nu(\HbipMnp) \ge r$ and a.a.s.\ $\alpha(\Gnp{r}{p^2}) \ge (1-\eps)r$, then, a.a.s.,
  \begin{equation*}
    (1-\eps) \nu(\HbipMnp) \le \omega(\Graphd(\Matrix{n}{p})) \le \nu(\HbipMnp).
  \end{equation*}
  Letting $\eps$ tend to~0 then gives the desired result.

  For $n^{-3/2} \le p=o(n)$, a.a.s., the number of edges of $\Gnp{n}{p^2}$ is $o(1)$, and hence $\alpha(\Gnp{n}{p^2}) = (1-o(1))n$, while easy arguments show that a.a.s.\ $\nu(\HbipMnp) = \Omega(n)$ with concentration in a window of size $O(\sqrt n)$.   Hence the conditions~\eqref{eq:fool:scholie-lem-bipartite-cond} are satisfied.

  For $p=\Omega(1/n)$, a classical result by Karp \& Sipser~\cite{KarpSipser81:match} states that there is a function $h\colon]0,\infty[\to[0,1]$ with $\lim_{c\to\infty}h(c)=1$ such that if $p=c/n$, then, a.a.s., $\nu(\HbipMnp) = (1-o(1))h(c)/n$.  Since $p=o(1/\sqrt n)$, a.a.s., the number of edges of $\Gnp{n}{p^2}$ is $o(n)$, and hence $\alpha(\Gnp{n}{p^2}) = (1-o(1))n$.  It follows that $\omega(\Graphd{\Matrix{n}{p}}) = (1-o(1))\nu(\HbipMnp)$.  In particular, if $p \gg 1/n$, then, a.a.s, $\nu(\HbipMnp) = (1-o(1))n$.
\end{proof}

% chi.tex
\section{(Fractional) Chromatic number}\label{sec:chi}
We start with the easy case $p \le \nfrac12$.

\begin{corollary}
  In the range $1/n \ll p \le \nfrac12$, we have $\chi(\Graph{n}{p}) = (1-o(1))n$.
\end{corollary}
\begin{proof}
  Remark~\ref{rem:basics:detprop}(\ref{enum:basics:detprop:rc_le_n}) gives the upper bound. Theorem~\ref{sec:fool}(\ref{thm:fool:o-sqrtn}) gives the lower bound for small~$p=o(1/\sqrt n)$.

  For, say, $\nfrac1e \ge p = \Omega(n^{-\nfrac14})$ (say), Theorem~\ref{thm:alpha}(\ref{thm:alpha:small-p}) plus an easy concentration argument yield $\alpha(\Graph{n}{p}) = (1+o(1))pn$ a.a.s.
  and for $\nfrac1e \le p \le \nfrac12$, the value of~$a$ in equation~\eqref{eq:alpha:large-p:def-a} of Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p}) is~1, so that $\alpha(\Graph{n}{p}) = (1+o(1))pn$ there, too.

  We conlcude
  $\displaystyle%\begin{equation*}
    \chi(\Graph{n}{p})
    \ge
    N/\alpha(\Graph{n}{p})
    =
    ((1-o(1)) pn^2)/((1-o(1)) pn)
    =
    (1-o(1))n.
  $%\end{equation*}
\end{proof}

The case $p > \nfrac12$ is more interesting.

\subsection{The fractional chromatic number}
We briefly review the definition of fractional chromatic number and (equivalently) fractional rectangle covering number.

Let~$R$ be a random independent set of~$G$, drawn according to a distribution~$\pi$, and consider
$\displaystyle%\begin{equation}\label{eq:chi:def-min-cover-prb}
\gamma(\pi) := \min_{u\in V(G)} \Prb( u \in R )
$.   %\end{equation}
A convenient definition of the fractional chromatic number of~$G$ is then $\chi^*(G) := \min_\pi 1/\gamma(\pi)$, where the minimum is taken over all distributions~$\pi$ on the set of independent sets of~$G$.  It is OK to restrict the supports of the distributions~$\pi$ to a subset~$\mathcal I$ of all independent sets of~$G$, as long as~$\mathcal I$ contains all inclusion-wise maximal independent sets.  In particular, for a Lov\'asz-Saks rectangle graph, we can assume w.l.o.g., that~$R$ takes values in the 1-rectangles of the corresponding matrix.

Recall the following well-known inequalities:\footnote{See \cite[Vol.~B]{Schrijver:Book:03}, Theorem~64.13, for the last inequality.}
\begin{equation}\label{eq:coloring-parameters}
  \lt.
  \begin{array}[c]{r}
    \displaystyle \ir(G) := \frac{\abs{V(G)}}{\alpha(G)} \\[2ex]
    \displaystyle \omega(G)
  \end{array}
  \rt\}
  \le
  \sup_{U\subseteq V(G)} \frac{\abs{U}}{\alpha(G[U])}
  \le
  \chi^*(G)
  \le
  \chi(G)
  \lecmt{(*)}
  \bigl( 1+\ln\alpha(G) \bigr) \, \chi^*(G).
\end{equation}
The number $\ir(G)$ is called the \textit{independence ratio.}
%%
Theorem~\ref{thm:alpha}(\ref{thm:alpha:large-p}) allows us to bound it:
%
With $\nfrac{\lambda}{n} = \notp=1-p$ as always, we have a.a.s.
\begin{multline}\label{eq:chi:lb-ir}
  \frac{N}{\alpha(\Graph{n}{p})}
  \ge
  \frac{(1+o(1))pn^2}{(1+o(1)) n/e\ln(1/p)}
  =
  \\
  (1+o(1))\; ep\ln(1/p) \ n
  =
  (1+o(1))\; (1-\notp/2-\notp^2/3+O(\notp^3))\; e \lambda,
\end{multline}
where the last equation is the Taylor expansion
\begin{equation*}
  \ln(1/p)
  =
  \sum_{j=1}^\infty \frac{\notp^j}{j}
  =
  \frac{\lambda}{n} \lt( 1 + \notp/2 + \notp^2/3 + \dots \rt).
\end{equation*}
For $\notp=o(1)$, this is asymptotic to $e \lambda$.  It is worth noting that the first inequality in~\eqref{eq:chi:lb-ir} becomes an asymptotic equality if $\notp=o(1)$.


\mypar%
We now give upper bounds on the $\chi^*(\Graph{n}{p})$.
%%
To prove an upper bound~$b$ on~$\chi^*$ for a fixed matrix~$M$, we have to give a distribution~$\pi$ on the 1-rectangles of~$M$ such that, if~$R$ is sampled according to~$\pi$, we have, for all $(k,\ell)$ with $M_{k,\ell}=1$, we have
$\displaystyle%\begin{equation*}
  \Prb( (k,\ell) \in R ) \ge \nfrac{1}{b}.
$  %\end{equation*}

To prove an ``a.a.s.'' upper bound for a random matrix, we have to show that
\begin{equation}\label{eq:chi:fracchi-ub:prob_rnd-rect-covers-everything}
  \Prb\biggl( \exists (k,\ell)\colon\  \Prb\bigl( (k,\ell) \in R  \mid  \Matrix{n}{p} \ \&\  \Matrix{n}{p}_{k,\ell}=1 \bigr) < \nfrac1b  \biggr) = o(1).
\end{equation}

Our random 1-rectangle~$R$ within the random matrix $\Matrix{n}{p}$ is sampled as follows.  Let $K$ be a random subset of $[n]$, by taking each $k$ into $K$ independently, with probability~$q$.  Then let $R := K\times L$ be the 1-rectangle generated (p.~\pageref{page:generated_rectangle}) by the row-set~$K$, i.e., $L := \{ \ell \mid \forall k \in K\colon \Matrix{n}{p}_{k,\ell}=1\}$.

Let the random variable $Z_\ell$ count the number of 0s in column~$\ell$, and set $Z := \max_\ell Z_{\ell}$.
%% 
For $k,\ell\in \{1,\dots,n\}$, conditioned on $\Matrix{n}{p}$ and $\Matrix{n}{p}_{k,\ell} =1$, the probability that $(k,\ell) \in R$ equals
\begin{equation*}
  q(1-q)^{Z_\ell} \ge q(1-q)^Z,
\end{equation*}
so that for every positive integer~$z$, using $\nfrac1b = q(1-q)^z$ in~\eqref{eq:chi:fracchi-ub:prob_rnd-rect-covers-everything},
\begin{equation}\label{eq:chi:fracchi-ub:prb_in-rnd-rect}
  \Prb\biggl( \exists (k,\ell)\colon\  \Prb\bigl( (k,\ell) \in R  \mid  \Matrix{n}{p} \ \&\  \Matrix{n}{p}_{k,\ell}=1 \bigr) < q(1-q)^z  \biggr)
  =
  \Prb( Z > z ).
\end{equation}
To obtain upper bounds on $\chi^*$, we give a.a.s.\ upper bounds on~$Z$, and choose~$q$ accordingly.  The following theorem summarizes the results.

\begin{theorem}\label{thm:chi:fracchi-ub}
  \begin{subequations}
    Let $p = 1 - \notp = 1 - \nfrac{\lambda}{n}$ as usual, and $\notp < \nfrac12$.
    \begin{enumerate}[label=(\alph*)]
    \item\label{enum:chi:fracchi-ub:omega(logn)}%
      If $\ln n \ll \lambda < n/2$, then, a.a.s.,
      $\displaystyle%\begin{equation}
        \chi^*(\Graph{n}{p}) \le {\textstyle (1+o(1))} \; e\lambda
      $%\end{equation}
    \item\label{enum:chi:fracchi-ub:Theta(logn)}%
      If $\lambda = \Theta(\ln n)$, then, a.a.s.,
      $\displaystyle%\begin{equation}
        \chi^*(\Graph{n}{p}) = O(\ln n).
      $%\end{equation}
    \item\label{enum:chi:fracchi-ub:o(logn)}%
      If $1 \ll \lambda = o(\ln n)$, then, a.a.s.,
      $\displaystyle%\begin{equation}
        \chi^*(\Graph{n}{p})
        \le
        {\textstyle (1+o(1))} \;
        e\max\Bigl(   2\lambda,  \frac{\ln n}{\ln((\ln n)/\lambda)}   \Bigr)
      $%\end{equation}
    \end{enumerate}
  \end{subequations}
\end{theorem}
\begin{proof}
  %
  \textit{The bound in \ref{enum:chi:fracchi-ub:omega(logn)}.}
  For every constant $t>0$, let
  \begin{equation*}
    \psi(t) := 1/\bigl( (1+t) \ln(1+t) - t  \bigr).
  \end{equation*}
%%
  With
  \begin{equation*}
    h(t) = h(t,n) := \frac{\lambda}{\psi(t) \ln n},
  \end{equation*}
  using the a standard Chernoff estimate (Theorem~2.1, Equation (2.5) in~\cite{Janson-Luczak-Rucinski:Book}) we find that
  \begin{equation*}
    \Prb\bigl( Z_1 \ge (1+t)\lambda \bigr) \le e^{-\lambda/\psi(t)} \le e^{-h(t)}n,
  \end{equation*}
  so that, by the union bound,
  \begin{equation}\label{eq:chi:fracchi-ub:bound_on_Z_w_h}
    \Prb\bigl( Z \ge (1+t)\lambda \bigr) \le e^{-h(t)}.
  \end{equation}

  For every fixed $t>0$, $h(t)$ tends to infinity with~$n$, so that the RHS in~\eqref{eq:chi:fracchi-ub:bound_on_Z_w_h} is $o(1)$.  Using that in~\eqref{eq:chi:fracchi-ub:prb_in-rnd-rect}, we obtain
  \begin{equation*}
    \Prb\biggl( \exists (k,\ell)\colon\  \Prb\bigl( (k,\ell) \in R  \mid  \Matrix{n}{p} \ \&\  \Matrix{n}{p}_{k,\ell}=1 \bigr) < q(1-q)^{(1+t)\lambda}  \biggr)
    =
    \Prb( Z > (1+t)\lambda )
    = o(1),
  \end{equation*}
  and, taking $q := \frac{1}{(1+t)\lambda}$, we obtain, a.a.s.,
  \begin{equation*}
    \chi^* \le \frac{1}{q(1-q)^{(1+t)\lambda}} \le \frac{1+t}{1+\frac{1}{(1+t)\lambda}}e\lambda,
  \end{equation*}
  where we used $(1-\eps)^k \ge (1-k\eps^2)e^{-k\eps}$ for $\eps < 1$.
%%
  Since this is true for every~$t>0$, we conclude that, a.a.s., $\chi^* \le (1-o(1))e\lambda$.

  \mypar%
  \textit{The bounds in \ref{enum:chi:fracchi-ub:Theta(logn)}, \ref{enum:chi:fracchi-ub:o(logn)}.}  %
  Here we use a slightly different Chernoff bound, Lemma~\ref{lem:binomial-ub} below.

  For~\ref{enum:chi:fracchi-ub:Theta(logn)}, suppose that $\lambda \le C\ln n$ for a constant $C>1$.  Using Lemma~\ref{lem:binomial-ub} below with $\alpha = e^2 C \ln n$, we obtain
  \begin{equation*}
    \Prb\bigl( Z_1 \ge e^2 C \ln n \bigr)
    =
    O\bigl(\nfrac{1}{\sqrt{\ln n}}\bigr)
    e^{-\lambda} \Bigl( \frac{eC\ln n}{e^2 C \ln n} \Bigr)^{\alpha}
    =
    O\bigl(\nfrac{1}{\sqrt{\ln n}}\bigr)
    e^{-\ln n}.
  \end{equation*}
  and thus
  \begin{equation*}
    \Prb\bigl( Z \ge e^2 C\ln n \bigr) = o(1).
  \end{equation*}
  We conclude similarly as above: with $q := \frac{1}{e^2C\ln n}$ we obtain, a.a.s., $\chi^* \le e^3 C\ln n$.

  Finally, for~(\ref{enum:chi:fracchi-ub:o(logn)}), if $\lambda = o(\ln n)$, let $\eps > 0$ be a constant, and use Lemma~\ref{lem:binomial-ub} again, with
  \begin{equation*}
    \alpha := \max\biggl( 2\lambda, \ \frac{ (1+\eps)\ln  n}{  \ln\bigl( \frac{\ln n}{e\lambda} \bigr) } \biggr).
  \end{equation*}
  We find that
  \begin{equation*}
    \Prb\bigl( Z_1 \ge \alpha \bigr)
    =
    o\bigl( e^{- \alpha \ln(\alpha/e\lambda) } \bigr),
  \end{equation*}
  and the usual calculation (Appendix~\ref{ssec:apx:chi:swift-caculation}) shows that $\alpha\ln(\alpha/e\lambda) \ge \ln n$, which implies
  \begin{equation*}
    \Prb\bigl( Z \ge \alpha \bigr) = o(1).
  \end{equation*}
  Conclude similarly as above, with $q := \frac{1}{\alpha}$, we obtain, a.a.s.,
  \begin{equation*}
    \chi^* \le e\alpha = e\max\lt( 2\lambda, (1+\eps) \frac{\ln n}{  \ln\bigl( \frac{\ln n}{e\lambda} \bigr) } \rt).
  \end{equation*}
  One obtains the statement in the theorem by letting $\eps$ tend to~0; the $e$-factor in the denominator of the $\ln$ of the denominator in~$\alpha$ is irrelevant as $n\to \infty$.
\end{proof}

We have no good reference for the following simple Chernoff estimate (it is almost exactly Theorem~5.4 in~\cite{Mitzenmacher-Upfal:Book}, except that we allow $\lambda\to\infty$ slowly).  For the sake of completeness, we include it here (and its proof in Appendix~\ref{ssec:apx:chi:binomial-ub}).
\begin{lemma}\label{lem:binomial-ub}
  Let $\notp = \lambda/n$ with $1 < \lambda = o(n)$, and $2\lambda \le \alpha \le n/2$.  The probability that a $\Bin(n,\notp)$ random variable is at least $\alpha$ is at most
  \begin{equation}
    O\bigl(\nfrac{1}{\sqrt \alpha}\bigr) \cdot e^{-\lambda} \Bigl( \frac{e\lambda}{\alpha} \Bigr)^\alpha.
  \end{equation}
\end{lemma}

%%
To summarize, comparing with~\eqref{eq:chi:lb-ir}, we can determine the fractional chromatic number accurately in the region $\ln n \ll \lambda \ll n$.  For $\lambda = \Theta(\ln n)$ and for $\lambda=\Theta(n)$, we can determine $\chi^*$ up to a constant,  but for $\lambda = o(\ln n)$, there is a large gap between our upper and lower bounds.

Inequality~($*$) in~\eqref{eq:coloring-parameters} gives us corresponding upper bounds on~$\chi$:
\begin{corollary}\label{cor:chi:chi-ub-from-fracchi}\mbox{}
    \begin{enumerate}[label=(\alph*)]
    \item If $\ln n \ll \lambda = O(n/\ln n)$,  then, a.a.s., $\displaystyle \chi(\Graph{n}{p}) = O(\lambda \ln n)$.
    \item If $\lambda = \Theta(\ln n)$,  then, a.a.s., $\displaystyle \chi(\Graph{n}{p}) = O(\ln^2 n)$.
    \item If $1 \ll \lambda = o(\ln n)$, then, a.a.s., $\displaystyle \chi(\Graph{n}{p}) = O\Bigl(
      \max\Bigl(   \lambda\ln n,  \frac{\ln^2 n}{\ln((\ln n)/\lambda)}   \Bigr) \Bigr)
      $%\end{equation}
    \end{enumerate}
    \qed
\end{corollary}


\subsection{Binary-Logarithm of the number of distinct rows, and the ratio $\chi/\chi^*$}
The binary logarithm of the number of distinct rows of the matrix (Remark~\ref{rem:log2-bound}) is a lower bound on the chromatic number of the rectangle graph.  This bound is not generally available for the chromatic number of general graphs.  The following proposition shows these bounds.

\begin{proposition}\label{prop:chi:log-2-lb}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item\label{enum:chi:log-2-lb:large-p}%
    If $\nfrac12 \ge \notp = \Omega(\nfrac1n)$, then, a.a.s., the 2-Log lower bound on $\chi(\Graph{n}{p})$ is $(1-o(1))\log{2} n$.
  \item\label{enum:chi:log-2-lb:small-p}%
    If $\notp = n^{-\gamma}$ for $1 < \gamma \le \nfrac32$, then a.a.s., the 2-Log lower bound on $\chi(\Graph{n}{p})$ is $(1-o(1))(2-\gamma)\log{2} n$.
  \end{enumerate}
\end{proposition}
\begin{proof}
  Directly from the following Lemma~\ref{lem:chi:numo_distinct_rows} about the number of distinct rows, with $\lambda = n^{1-\gamma}$.
\end{proof}
\begin{lemma}\label{lem:chi:numo_distinct_rows}\mbox{}
  \begin{enumerate}[label=(\alph*)]
  \item\label{enum:apx-chi:numo_distinct_rows:large_p}%
    If $\nfrac12 \ge \notp = \Omega(\nfrac1n)$, then, a.a.s., $\Matrix{n}{p}$ has $\Theta(n)$ distinct non-zero rows.
  \item\label{enum:apx-chi:numo_distinct_rows:small_p}%
    With $\notp = \lambda/n$, if $n^{-\nfrac12} \le \lambda \le \nfrac12$, then, a.a.s., $\Matrix{n}{p}$ has $\Omega(\lambda n)$ distinct non-zero rows.
  \end{enumerate}
  (The constants in the big-Omegas are absolute.)
\end{lemma}
For the sake of completeness, we sketch the proof in Appendix~\ref{ssec:apx:chi:distinct-rows}.

\mypar
Erd\H{o}s-Renyi random graphs have the property that the chromatic number is within a small constant factor from the lower bound one obtains from the independence ratio.
%%
For random rectangle graphs, this is not the case.  Indeed, Theorem~\ref{thm:chi:fracchi-ub}(\ref{enum:chi:fracchi-ub:o(logn)}), together with Proposition~\ref{prop:chi:log-2-lb}, shows that, a.a.s.,
\begin{equation*}
  \frac{  \chi(\Graph{n}{p})  }{ \chi^*(\Graph{n}{p})  }
  \ge
  (1+o(1))
  \frac{ \log{2} n }{ \  \frac{ \ln n }{ \ln\bigl(\frac{\ln n}{\lambda}\bigr) } \  }
  =
  \Omega\!\lt( \ln\Bigl( \frac{\ln n}{\lambda} \Bigr) \rt),
\end{equation*}
which is $\Omega(\ln\ln n)$ if $\lambda = \ln^{o(1)}n$.

This gap is more pronounced in the (not quite as interesting) situation when $\lambda =o(1)$.  Consider, e.g., $\lambda = n^{-\eps}$, for some $\eps = \eps(n) = o(1/\ln\ln n)$, say.  Similarly to the proofs of Theorem~\ref{thm:chi:fracchi-ub}, we obtain that $\chi^*(\Graph{n}{p}) \le e\max(10,2/\eps)$.  (The $\max$-term comes from the somewhat arbitrary upper bound $Z \le \max(10,2/\eps)$.)   For the Log-2 lower bound on~$\chi$, we have $(1-\eps)\log{2}n$, by Proposition~\ref{prop:chi:log-2-lb}, and thus
\begin{equation*}
  \frac{  \chi(\Graph{n}{p})  }{ \chi^*(\Graph{n}{p})  }
  =
  \Omega( \eps\ln n ).
\end{equation*}

% conclusion.tex

\section{Conclusion and open questions}\label{sec:openquestions}
The most important question for future research is to decide whether the gap between $\chi^*$ and $\chi$ goes away for $\lambda = \Omega(\ln n)$, or is inherent in the random graph model.  Recall that the largest possible ratio $\chi/\chi^*$ is $O(\ln n)$.  We conjecture the following.

\begin{conjecture}[Separation between $\chi^*$ and $\chi$, for $p=1-o(1)$]\label{conj:chi:separate-large-p}
  There are constants $c,d$ such that in the range $\ln^{c} n \le \lambda \le n^d$, a.a.s.,
  \begin{equation*}
    \frac{  \chi( \Graph{n}{p} )  }{  \chi^*( \Graph{n}{p} )  } = \Omega\Bigl( \frac{\ln n}{\ln\ln n} \Bigr).
  \end{equation*}
\end{conjecture}

\begin{conjecture}[Separation between $\chi^*$ and $\chi$, for constant~$p$]\label{conj:chi:separate-const-p}
  There is a strictly increasing function $c=c(p)$ such that, for every constant $p\ge\nfrac12$, a.a.s.,
  \begin{equation*}
    \frac{  \chi( \Graph{n}{p} )  }{  \chi^*( \Graph{n}{p} )  } = (1+o(1)) \, c(p).
  \end{equation*}
\end{conjecture}
Our results show that $c(\nfrac12) = 1$ and that, if $c(\cdot)$ exists, then $\lim_{p\to 1} c(p) = \infty$.

In order to tackle these two conjectures, it appears necessary improve the techniques for proving and lower bounds for $\chi(\Graph{n}{p})$.
%%
Our upper bounds rely on bounding the fractional chromatic number $\chi^*$, and then using the fact that the chromatic number is at most $O(\ln n)$ from $\chi^*$.  It is possible that these can be improved.  But our upper bounds for the fractional chromatic number don't appear to be optimal, either, when $\notp=o(\ln n)$.  Our lower bound is the fractional chromatic number, which we can determine accurately in the region $\ln n \ll \lambda \ll n$.  Obviously, other bounds are needed to prove $\chi > \chi^*$.

Last not least, it would be interesting to know whether $\chi$ is concentrated on a small range of values, for some range of~$p$, as is the case for, e.g., both Erd\H{o}s-Renyi and regular random graphs.


\mypar%
As for the clique number, in the region $p=1-\Omega(1)$, it seems to be difficult to understand satisfactorily.  The first moment upper bound on the number of fooling sets, Lemma~\ref{lem:fool:exp_numo}, suggests that $\omega(\Graph{n}{p}) = (1-o(1))n$ for $n^{-\nfrac12} \le p \le n^{-\nfrac12}\sqrt{\ln n\,}$, but it seems plausible that $\omega = (1-\Omega(1))n$ already for $p > p_0 = o(n^{-\nfrac12}\sqrt{\ln n\,})$.
\begin{question}\label{question:fool:n}
  Is it true that, a.a.s., $\omega(\Graph{n}{p}) = (1-o(1))n$ for $n^{-\nfrac12} \le p \le n^{-\nfrac12}\sqrt{\ln n\,}$?
\end{question}
More poignantly,
\begin{equation*}
   \omega(\Graph{n}{1/\sqrt n}) = \mathrm{?}
\end{equation*}

Answering this would likely require to give better lower bounds on $\omega(\Graph{n}{p})$ than the ones obtained by fixing the diagonal (and then using $\alpha(G_{n,p})$).

Another question about the fooling set size of random matrices presents itself naturally:
\begin{question}\label{question:fool:unimodal}
  Is $\omega(p)$ unimodal as a function of~$p$? I.e., is there a $q=q(n)$ such that if, for all~$n$, $p_1(n)<p_2(n)<q(n)$, a.a.s., $\omega(\Graph{n}{p_1}) \le \omega(\Graph{n}{p_2})$,
  and
  if, for all~$n$, $q(n)<p_1(n)<p_2(n)$, a.a.s., $\omega(\Graph{n}{p_1}) \ge \omega(\Graph{n}{p_2})$?
  % with the following property.  For every $\eps=\eps(n)=o(p(1-p))$, if $p+\eps < p_1$ for all~$n$,
  % then a.a.s., $\omega(\Graph{n}{p}) \le \omega(\Graph{n}{p+\eps})$; if $p > p_1$ for all~$n$, then, a.a.s., $\omega(\Graph{n}{p}) \ge \omega(\Graph{n}{p+\eps})$.
\end{question}
It seems plausible that such a $q$ would coincide with the $p_0$ above.


\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
% \MRhref is called by the amsart/book/proc definition of \MR.
\providecommand{\MRhref}[2]{%
  \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\begin{thebibliography}{10}

\bibitem{BezrukovFroncekRosenbergKovar08}
Sergei Bezrukov, Dalibor Fron{\v{c}}ek, Steven~J. Rosenberg, and Petr
  Kov{\'a}{\v{r}}, \emph{On biclique coverings}, Discrete Math. \textbf{308}
  (2008), no.~2-3, 319--323. \MR{2378030 (2008j:05272)}

\bibitem{BollobasBkRndGraphs}
B{\'e}la Bollob{\'a}s, \emph{Random graphs}, second ed., Cambridge Studies in
  Advanced Mathematics, vol.~73, Cambridge University Press, Cambridge, 2001.
  \MR{1864966 (2002j:05132)}

\bibitem{Braun-Fiorini-Pokutta:rndStable:14}
G{\'{a}}bor Braun, Samuel Fiorini, and Sebastian Pokutta, \emph{Average case
  polyhedral complexity of the maximum stable set problem}, Approximation,
  Randomization, and Combinatorial Optimization. Algorithms and Techniques,
  {APPROX/RANDOM} 2014, September 4-6, 2014, Barcelona, Spain, 2014,
  pp.~515--530.

\bibitem{CohenRothblum93}
Joel~E. Cohen and Uriel~G. Rothblum, \emph{Nonnegative ranks, decompositions,
  and factorizations of nonnegative matrices}, Linear Algebra Appl.
  \textbf{190} (1993), 149--168. \MR{1230356 (94i:15015)}

\bibitem{CaenGregoryPullman81}
D.~de~Caen, D.~A. Gregory, and N.~J. Pullman, \emph{The {B}oolean rank of
  zero-one matrices}, Proceedings of the {T}hird {C}aribbean {C}onference on
  {C}ombinatorics and {C}omputing ({B}ridgetown, 1981) (Cave Hill Campus,
  Barbados), Univ. West Indies, 1981, pp.~169--173. \MR{657202 (83f:05009)}

\bibitem{CaenGregoryPullman88}
\bysame, \emph{The {B}oolean rank of zero-one matrices ii}, Proceedings of the
  {F}ifth {C}aribbean {C}onference on {C}ombinatorics and {C}omputing
  ({B}ridgetown, 1988) (Cave Hill Campus, Barbados), Univ. West Indies, 1988,
  pp.~120--126.

\bibitem{Dietzfelbinger-Hromkovic-Schnitger:96}
Martin Dietzfelbinger, Juraj Hromkovi{\v{c}}, and Georg Schnitger, \emph{A
  comparison of two lower-bound methods for communication complexity}, Theoret.
  Comput. Sci. \textbf{168} (1996), no.~1, 39--51, 19th International Symposium
  on Mathematical Foundations of Computer Science (Ko{\v{s}}ice, 1994).
  \MR{1424992 (98a:68068)}

\bibitem{Fiorini-Kaibel-Pashkovich-Theis:CombLB:13}
Samuel Fiorini, Volker Kaibel, Kanstantin Pashkovich, and Dirk~Oliver Theis,
  \emph{Combinatorial bounds on nonnegative rank and extended formulations},
  Discrete Math. \textbf{313} (2013), no.~1, 67--83.

\bibitem{Fiorini-Massar-Pokutta-Tiwary-Dewolf:lin-vs-semidef:12}
Samuel Fiorini, Serge Massar, Sebastian Pokutta, Hans~Raj Tiwary, and {Ronald
  de} Wolf, \emph{Linear vs.\ semidefinite extended formulations: Exponential
  separation and strong lower bounds}, STOC, 2012.

\bibitem{Fiorini-Massar-Pokutta-Tiwary-Dewolf:ACM:15}
Samuel Fiorini, Serge Massar, Sebastian Pokutta, Hans~Raj Tiwary, and Ronald~De
  Wolf, \emph{Exponential lower bounds for polytopes in combinatorial
  optimization}, Journal of the ACM (JACM) \textbf{62} (2015), no.~2, 17.

\bibitem{FriesenTheis13}
Mirjam Friesen and {Dirk Oliver} Theis, \emph{Fooling-sets and rank in nonzero
  characteristic}, The Seventh European Conference on Combinatorics, Graph
  Theory and Applications (Jaroslav Ne{\v{s}}et{\v{r}}il and Marco Pellegrini,
  eds.), CRM series, vol.~16, CRM, 2013, pp.~383--390.

\bibitem{Frieze-Karonski:rndGBook:2015}
Alan Frieze and Micha{\l} Karo{\'n}ski, \emph{Introduction to random graphs},
  Cambridge University Press, 2015.

\bibitem{Froncek-Jerebic-Klavzar-Kovar:CPC:07}
Dalibor Froncek, Janja Jerebic, Sandi Klavzar, and Petr Kov{\'{a}}r,
  \emph{Strong isometric dimension, biclique coverings, and sperner's theorem},
  Combinatorics, Probability {\&} Computing \textbf{16} (2007), no.~2,
  271--275.

\bibitem{Goemans:permutahedron:15}
Michel~X. Goemans, \emph{Smallest compact formulation for the permutahedron},
  Mathematical Programming \textbf{153} (2015), no.~1, 5--11.

\bibitem{GregoryPullman83}
D.~A. Gregory and N.~J. Pullman, \emph{Semiring rank: {B}oolean rank and
  nonnegative rank factorizations}, J. Combin. Inform. System Sci. \textbf{8}
  (1983), no.~3, 223--233. \MR{783759 (86h:15008)}

\bibitem{Hajiabolhassan-Moazami:code:12}
Hossein Hajiabolhassan and Farokhlagha Moazami, \emph{Secure frameproof code
  through biclique cover}, Discrete Mathematics \& Theoretical Computer Science
  \textbf{14} (2012), no.~2, 261--270.

\bibitem{Hajiabolhassan-Moazami:cover-free:12}
\bysame, \emph{Some new bounds for cover-free families through biclique
  covers}, Discrete Mathematics \textbf{312} (2012), no.~24, 3626--3635.

\bibitem{Izhakian-Janson-Rhodes:PAMS:15}
Zur Izhakian, Svante Janson, and John Rhodes, \emph{Superboolean rank and the
  size of the largest triangular submatrix of a random matrix}, Proceedings of
  the American Mathematical Society \textbf{143} (2015), no.~1, 407--418.

\bibitem{Janson-Luczak-Rucinski:Book}
Svante Janson, Tomasz {\L}uczak, and Andrzej Rucinski, \emph{Random graphs},
  Wiley-Interscience Series in Discrete Mathematics and Optimization,
  Wiley-Interscience, New York, 2000. \MR{1782847 (2001k:05180)}

\bibitem{KarchmerKushilevitzNisan95}
Mauricio Karchmer, Eyal Kushilevitz, and Noam Nisan, \emph{Fractional covers
  and communication complexity}, SIAM J. Discrete Math. \textbf{8} (1995),
  no.~1, 76--92. \MR{1315960 (96e:68062)}

\bibitem{KarpSipser81:match}
Richard~M. Karp and Michael Sipser, \emph{Maximum matchings in sparse random
  graphs}, FOCS, 1981, pp.~364--375.

\bibitem{Khoshkhah-Theis:treeFool:2017}
Kaveh Khoshkhah and Dirk~Oliver Theis, \emph{Fooling sets and the spanning tree
  polytope}, Preprint arXiv:1701.00350 (2017).

\bibitem{Khoshkhah-Theis:tree-rc:2017}
\bysame, \emph{On the combinatorial lower bound for the extension complexity of
  the spanning tree polytope}, Preprint arXiv:1702.01424 (2017).

\bibitem{KimBk82}
Ki~Hang Kim, \emph{Boolean matrix theory and applications}, Monographs and
  Textbooks in Pure and Applied Mathematics, vol.~70, Marcel Dekker Inc., New
  York, 1982, With a foreword by Gian-Carlo Rota. \MR{655414 (84a:15001)}

\bibitem{Kushilevitz-Nisan:Book:97}
Eyal Kushilevitz and Noam Nisan, \emph{Communication complexity}, Cambridge
  University Press, Cambridge, 1997. \MR{1426129 (98c:68074)}

\bibitem{LeeTheis12}
Troy Lee and {Dirk Oliver} Theis, \emph{Support based bounds for positive
  semidefinite rank}, Tech. Report arXiv:1203.3961, arXiv, 2012.

\bibitem{Lovasz-Saks:CC-lattice:93}
L.~Lov{\'a}s and M.~Saks, \emph{Communication complexity and combinatorial
  lattice theory}, Journal of Computer and System Sciences \textbf{47} (1993),
  322--349.

\bibitem{Lovasz90}
Laszlo Lov{\'a}sz, \emph{Communication complexity: a survey}, Paths, flows, and
  VLSI-Layout (B.~Korte, L.~Lov{\'a}sz, H.J. Pr{\"o}mel, and A.~Schrijver,
  eds.), Springer, 1990, pp.~235--265.

\bibitem{mcdiarmid:method:1989}
Colin McDiarmid, \emph{On the method of bounded differences}, Surveys in
  combinatorics \textbf{141} (1989), no.~1, 148--188.

\bibitem{Mitzenmacher-Upfal:Book}
M.~Mitzenmacher and E.~Upfal, \emph{Probability and computing --- randomized
  algorithms and probabilistic analysis}, Cambridge, 2006.

\bibitem{Park-Szpankowski:biclusters:05}
Gahyun Park and Wojciech Szpankowski, \emph{Analysis of biclusters with
  applications to gene expression data}, International Conference on Analysis
  of Algorithms DMTCS proc. AD, vol. 267, 2005, p.~274.

\bibitem{Pourmoradnasseri-Theis:rnd-ndcc:17}
Mozhgan Pourmoradnasseri and Dirk~Oliver Theis, \emph{Nondeterministic
  communication complexity of random boolean functions (extended abstract)},
  Proceedings of the 13th annual conference on Theory and Applications of
  Models of Computation, 2016.

\bibitem{Schrijver:Book:03}
Alexander Schrijver, \emph{Combinatorial optimization. {P}olyhedra and
  efficiency.}, Algorithms and Combinatorics, vol.~24, Springer-Verlag, Berlin,
  2003.

\bibitem{Shitov13fool}
Yaroslav Shitov, \emph{On the complexity of {B}oolean matrix ranks}, Linear
  Algebra and Its Applications \textbf{439} (2013), 2500--2502.

\bibitem{Weltge:phd:2015}
Stefan Weltge, \emph{Sizes of linear descriptions in combinatorial
  optimization}, Ph.D. thesis, University of Magdeburg, 2016.

\bibitem{Yannakakis:91}
Mihalis Yannakakis, \emph{Expressing combinatorial optimization problems by
  linear programs}, J. Comput. System Sci. \textbf{43} (1991), no.~3, 441--466.
  \MR{1135472 (93a:90054)}

\end{thebibliography}
\appendix
% appendices.tex
\section{Omitted proofs}\label{apx:omitted_proofs}
%
\subsection{Section~\ref{sec:chi} (chromatic number): The ``usual calculation''}\label{ssec:apx:chi:swift-caculation}
With
\begin{equation*}
  \alpha := \max\biggl( 2\lambda, \ \frac{ (1+\eps)\ln  n}{  \ln\bigl( \frac{\ln n}{e\lambda} \bigr) } \biggr),
\end{equation*}
we have to show that
\begin{equation*}
  \alpha\ln(\alpha/e\lambda) \ge \ln n.
\end{equation*}
We write it down informally. In the following list of inequalities, the each one is implied by the next one:
\begin{align*}
  \alpha\ln(\alpha/e\lambda) &\ge \ln n &&\comment{replace $\alpha$ by the 2nd term in the max}\\
  (1+\eps)\frac{\ln\lt(\frac{\alpha}{e\lambda}\rt)}{ \ln\lt( \frac{\ln n}{e\lambda} \rt) }&\ge 1\\
  \alpha &\ge  \ln^{1/(1+\eps)} n\\%  &&\comment{replace $\alpha$ by the 2nd term in the max}\\
  \frac{ (1+\eps)\ln  n}{  \ln\bigl( \frac{\ln n}{e\lambda} \bigr) } &\ge  \ln^{1/(1+\eps)} n &&\comment{is true.}
\end{align*}

\subsection{Section~\ref{sec:chi} (chromatic number): Chernoff}\label{ssec:apx:chi:binomial-ub}
\begin{proof}[Proof of Lemma~\ref{lem:binomial-ub}]
  Using Thm~1.1 in~\cite{BollobasBkRndGraphs} (here we need the $\alpha \ge 2\lambda$), and the usual estimates for binomial coefficients, we find that said probability (for~$n$ sufficiently large) is at most an absolute constant times
  \begin{multline*}
    \Prb\Bigl( \Bin(n,\notp) = \alpha \Bigr)
    \le
    \frac{1.1}{\sqrt{2\pi \alpha (n-\alpha)/n}} \Bigl( \frac{\lambda}{\alpha} \Bigr)^\alpha \Bigl( \frac{n-\lambda}{n-\alpha} \Bigr)^{n-\alpha}
    \\
    \le
    \frac{1}{\sqrt \alpha} \Bigl( \frac{\lambda}{\alpha} \Bigr)^\alpha \Bigl( 1 - \frac{\alpha-\lambda}{n-\alpha} \Bigr)^{n-\alpha}
    \le
    \frac{1}{\sqrt \alpha} \Bigl( \frac{\lambda}{\alpha} \Bigr)^\alpha e^{\alpha-\lambda},
  \end{multline*}
  as promised.
\end{proof}


\subsection{Section~\ref{sec:chi} (chromatic number): Number of distinct rows}\label{ssec:apx:chi:distinct-rows}
\begin{proof}[Proof of Lemma~\ref{lem:chi:numo_distinct_rows}]
  For notational convenience, for $k=1,\dots,n$, let
  \begin{equation*}
    S_k := \{ \ell \mid M_{k,\ell} = 0 \}
  \end{equation*}
  The $S_k$ are random sets, where the events $\ell \in S_k$ are all independent and have probability~$\notp$.
  %%
  For $m \ge 0$, with $\Zero := \{1,\dots,n\}$, denoting by
  \begin{equation*}
    X_m := \abs{ \{ S_1,\dots,S_k \} \setminus \{\Zero\} },
  \end{equation*}
  the number of distinct non-zero rows among the first~$m$ rows of $\Matrix{n}{p}$, we need to show that $X_n = \Omega(n)$.
  %%
  This is quite easy for $\notp = \Omega(\nfrac1n)$, i.e., Item~(\ref{enum:apx-chi:numo_distinct_rows:large_p}).  Here, we just prove it in the case that $\notp \le 1/2n$, i.e., Item~(\ref{enum:apx-chi:numo_distinct_rows:small_p}).

  Denote by $A_{m+1}$ the event that the $(m+1)$st row is zero or a duplicate of the first~$m$ rows, i.e., that
  \begin{equation*}
    S_{m+1} \in \{\Zero,S_1,\dots,S_m\}.
  \end{equation*}
  We enumerate the distinct sets: $\{S_1,\dots,S_m\} =: \{S_{k_1},\dots,S_{k_{X_m}}\}$.  Now, for $m\ge 1$, we have
  \begin{multline*}
    \Prb\Bigl( A_{m+1} \Bigm|   \abs{S_1},\dots,\abs{S_m}, X_m \Bigr)
    %\\
    \shoveright{%
      =
      \Prb\Bigl( S_{m+1} \in \{\Zero,S_1,\dots,S_m\}    \Bigm|    \abs{S_1},\dots,\abs{S_m}, X_m\Bigr)
    }\\\shoveright{%
    =
    \Prb(S_{m+1} = \Zero) + \sum_{j=1}^{X_m} \Prb\Bigl( S_{m+1} = S_{k_j} \Bigm|  \abs{S_1},\dots,\abs{S_m},  X_m \Bigr)
    }\\%\shoveright{%
    =
    \notp^n + \sum_{j=1}^{X_m} \notp^{\sabstight{S_{k_j}}} p^{n-\sabstight{S_{k_j}}}
    % }\\
    \le
    \notp^n + p^n + \max(0,X_m-1)\notp \onespace p^{n-1},
  \end{multline*}
  where the last inequality comes from the fact that, since the $S_{k_j}$ are all distinct, at most one of them has cardinality~0.
  %%
  Hence, for $m\ge 2$,
  \begin{align*}
    \Prb\bigl( A_{m+1} \bigm|    X_m, X_1=1 \bigr)
    &\le
    \notp^n + p^n + (X_m-1)\notp p^{n-1}
    \\
    &\le
    \notp^n + p^n -\notp p^{n-1} + \notp p^{n-1} X_m.
  \end{align*}
  Now, for $m \ge 1$,
  \begin{multline*}
    \Exp\bigl( X_{m+1} \bigm | X_m, X_1=1 \bigr)
    =
    X_m + 1 - \Prb(A_{m+1}\mid X_m, X_1=1),
    \\
    \ge
    X_m + 1 - \notp^n - p^n + \notp p^{n-1} - \notp p^{n-1} X_m
    \\
    =
    1 + \notp p^{n-1} - \notp^n - p^n + (1-\notp p^{n-1}) X_m.
  \end{multline*}
  Using the law of total probability and solving the recursion\footnote{%
    The recursion: $\displaystyle \mu_{m+1} = \alpha + \beta\mu_m = \ldots = \alpha \sum_{j=0}^{m-1} \beta^j + \beta^m \mu_1 = \alpha\frac{1-\beta^m}{1-\beta} + \beta^m \mu_1$.
    %%
  }, %
  we find that
  \begin{equation*}
    \Exp\bigl( X_m \bigm | X_1=1 \bigr)
    \ge
    (1 + \notp p^{n-1} - \notp^n - p^n) \frac{1 - (1-\notp p^{n-1})^{m-2}}{\notp p^{n-1}}
    + (1-\notp p^{n-1})^{m-1}
  \end{equation*}
  With $\lambda := \notp n$, again, note that, since, by our assumption above, $\lambda \le \nfrac12$, using the Bernoulli inequalities $1-tn \le (1-t)^n \le 1-tn+t^2\binom{n}{2}$ for $t<1$, we have
  \begin{equation*}
    \frac12 \le 1 - \lambda \le p^n \le p^{n-1} \le 1 - \lambda\lt( \frac{n-1}{n} + \lambda\frac{n-1}{n}\rt) \le 1,
  \end{equation*}
  so that
  \begin{equation*}
    (1-\notp p^{n-1})^{m-2} \le (1-\nfrac{\notp}{2})^{m-2} \le 1   -  \frac{\lambda}{2}\lt( \frac{m-2}{n} + \frac{\lambda}{2}\frac{m-2}{n} \rt).
  \end{equation*}
  We conclude that, for $m=n$,
  \begin{multline*}
    \Exp\bigl( X_m \bigm | X_1=1 \bigr)
    \ge
    (1  - p^n) \frac{1 - (1-\notp p^{n-1})^{m-2}}{\notp p^{n-1}}
    \\
    \ge
    \lambda \lt( \frac{n-1}{n} + \lambda\frac{n-1}{n}\rt) \cdot \frac{   \frac{\lambda}{2} \lt( \frac{m-2}{n} + \frac{\lambda}{2}\frac{m-2}{n} \rt)  }{ \lambda/n }
    \ge (1+o(1)) \frac{\lambda n}{2}.
  \end{multline*}
  Since $\Prb(X_1=1) = \Prb(S_1=\Zero) = (1-\notp^n) = 1-o(1)$, this implies $\Exp X_n \ge \Exp(X_n\mid X_1=1)\Prb(X_1=1) \ge (1-o(1))\nfrac{\lambda n}{2}.$

  To obtain the a.a.s.\ statement from the one about the expectation, we use Martingale-based concentration bound (Corollary 2.27 in~\cite{Janson-Luczak-Rucinski:Book}): as changing one row can affect $X_n$ by at most~1, we get
  \begin{equation*}
    \Prb\bigl(  X_n \le \nfrac{\lambda n}{4} \bigr)
    \le
    \Prb\bigl(  X_n \le \Exp X_n - \nfrac{\lambda n}{4} \bigr)
    \le
    e^{-\nfrac{(\lambda n)^2}{32n}}
    =
    e^{-\Omega(\lambda^2 n)}
    = o(1),
  \end{equation*}
  where the last equation follows from the condition $n^{-\nfrac32} = o(\notp)$.
\end{proof}


\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: nil
%%% fill-column: 999999
%%% End:
