\documentclass[ALCO,ThmDefs,Unicode,epreuves]{cedram}
\OneNumberAllTheorems
\usepackage{youngtab,ytableau}
\usepackage[all]{xy}
%Color
\definecolor{ltgrey}{RGB}{180, 187, 198}
%Spacing
%\newcommand{\hs}{{\hspace{3mm}}}
%\newcommand{\hsm}{{\hspace{1mm}}}
\newcommand{\hsmH}{}
%Math commands
\newcommand{\Ssf}{\mathsf{S}}
\newcommand{\Xsf}{\mathsf{X}}
\newcommand{\glfrak}{\mathfrak{gl}}
\newcommand{\defg}[1]{\emph{#1}}
\newcommand{\C}{{\mathbb{C}}}
\newcommand{\Z}{{\mathbb{Z}}}
\newcommand{\Q}{{\mathbb{Q}}}
\newcommand{\g}{\mathfrak{g}}
\newcommand{\Symm}{{\mathfrak{S}}}
\newcommand{\Flags}{{\mathcal{F}\ell ags}}
\newcommand{\Hess}{{\mathcal{H}ess}}
\newcommand{\Rep}{{\mathcal{R}ep}}
\DeclareMathOperator{\Lie}{Lie}
\DeclareMathOperator{\Stab}{Stab}
\DeclareMathOperator{\row}{row}
\DeclareMathOperator{\col}{col}
\DeclareMathOperator{\NR}{NR}
\DeclareMathOperator{\src}{src}
\DeclareMathOperator{\tgt}{tgt}
\DeclareMathOperator{\sk}{sk}
\DeclareMathOperator{\SK}{SK}
\DeclareMathOperator{\asc}{asc}
\DeclareMathOperator{\inv}{inv}
\DeclareMathOperator{\ind}{ind}
\DeclareMathOperator{\spanrm}{span}
\DeclareMathOperator{\ch}{ch}
\DeclareMathOperator{\indexrm}{index}
\DeclareMathOperator{\partsrm}{parts}
\DeclareMathOperator{\Aut}{Aut}
\DeclareMathOperator{\GL}{GL}
%\newtheorem{fact}[cdrthm]{Fact}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}
\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}
\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%% Auteur
\author{\firstname{Megumi} \lastname{Harada}}
\address{Department of Mathematics and Statistics\\
McMaster University\\
1280 Main Street West\\
Hamilton\\
Ontario L8S4K1, Canada}
\email{Megumi.Harada@math.mcmaster.ca}
\urladdr{http://www.math.mcmaster.ca/Megumi.Harada/}
\thanks{The first author is partially supported by an NSERC Discovery Grant and a Canada Research Chair (Tier 2) award.}
\author{\firstname{Martha} \middlename{E.} \lastname{Precup}}
\address{Department of Mathematics and Statistics\\
Washington University in St. Louis \\
One Brookings Drive\\
St. Louis\\
Missouri 63130, U.S.A.}
\email{martha.precup@wustl.edu}
\urladdr{http://www.math.northwestern.edu/~mprecup/}
%%%%% Sujet
\keywords{Stanley--Stembridge conjecture, symmetric functions, e-positivity, Hessenberg varieties, abelian ideal}
\subjclass{14M17, 05E05}
%%%%% Gestion
%\DOI{10.5802/alco.76}
%\datereceived{2018-07-30}
%\daterevised{2019-01-18}
%\datererevised{2019-04-18}
%\dateaccepted{2019-03-20}
%%%%% Titre et résumé
\title[Abelian Hessenberg varieties and the Stanley--Stembridge conjecture]
{The cohomology of abelian Hessenberg varieties and the Stanley--Stembridge conjecture}
\begin{abstract}
We define a subclass of Hessenberg varieties called abelian Hessenberg varieties, inspired by the theory of abelian ideals in a Lie algebra developed by Kostant and Peterson. We give an inductive formula for the $\mathfrak{S}_n$-representation on the cohomology of an abelian regular semisimple Hessenberg variety with respect to the action defined by Tymoczko. Our result implies that a graded version of the Stanley--Stembridge conjecture holds in the abelian case, and generalizes results obtained by Shareshian--Wachs and Teff. Our proof uses previous work of Stanley, Gasharov, Shareshian--Wachs, and Brosnan--Chow, as well as results of the second author on the geometry and combinatorics of Hessenberg varieties. As part of our arguments, we obtain inductive formulas for the Poincar\'e polynomials of regular abelian Hessenberg varieties.
\end{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
\section{Introduction}\label{sec:intro}
Hessenberg varieties in type A are subvarieties of the full flag variety $\Flags(\C^n)$ of nested sequences of linear subspaces in $\C^n$. These varieties are parameterized by a choice of linear operator $\Xsf \in \glfrak (n,\C)$ and Hessenberg function $h: \{1,2,\ldots,n\} \to \{1,2,\ldots,n\}$. We denote the corresponding Hessenberg variety by $\Hess(\Xsf , h)$. The geometry and (equivariant) topology of Hessenberg varieties has been studied
extensively since the late 1980s~\cite{DeMari1987, DeMProSha92}. This
subject lies at the intersection of, and makes connections between,
many research areas such as geometric representation theory,
combinatorics,
and
algebraic geometry and topology.
In this manuscript, we are concerned with the connection between the geometry and topology of Hessenberg varieties and the famous Stanley--Stembridge conjecture in combinatorics, which states that the chromatic symmetric function of the incomparability graph of a so-called $(3+1)$-free poset is $e$-positive, \ie it is a non-negative linear combination of elementary symmetric functions~\cite[Conjecture~5.5]{StanleyStembridge1993} (see also~\cite{Stanley1995}). Guay-Paquet has subsequently proved that this conjecture, which we refer to below as the ``original Stanley--Stembridge conjecture,'' can be reduced to the statement that the chromatic symmetric function of the incomparability graph of a unit interval order is $e$-positive~\cite{Guay-Paquet2013}, and we refer to the latter statement as the ungraded Stanley--Stembridge conjecture.
Shareshian and Wachs linked the ungraded Stanley--Stembridge conjecture to Hessenberg varieties via the ``dot action'' $\Symm_n$-representation on the cohomology ring of a regular semisimple Hessenberg variety defined by Tymoczko~\cite{Tym08}, as we now explain. The Hessenberg variety $\Hess(\Ssf , h)$ is called a regular semisimple Hessenberg variety if $\Ssf $ is a regular semisimple element of $\glfrak (n,\C)$. Shareshian and Wachs established a bijection between Hessenberg functions and unit interval orders~\cite[Proposition~4.1]{ShareshianWachs2016}; their bijection associates each Hessenberg function $h$ to the incomparability graph of a unit interval order, here denoted by $\Gamma_h$. In addition, Shareshian and Wachs defined the chromatic quasisymmetric function $X_{\Gamma}(\underline{x}, t)$ of a graph $\Gamma$, which refines Stanley's chromatic symmetric function in the sense that $X_{\Gamma}(\underline{x},1)$ is Stanley's chromatic symmetric function. They then formulated a conjecture relating the chromatic quasisymmetric function of the graph $\Gamma_h$ to the image of the character of the dot action representation on $H^*(\Hess(\Ssf , h))$ under the characteristic map. This conjecture, known as the Shareshian--Wachs conjecture, provides the link between Hessenberg varieties and chromatic symmetric (and quasi-symmetric) functions. We discuss it in greater detail in Section~\ref{sec: Stanley--Stembridge conjecture and dot action} below.
Since cohomology rings are naturally graded by degree, the Shareshian--Wachs conjecture actually suggests that one should consider a graded version of the Stanley--Stembridge conjecture.
Specifically, the ``graded Stanley--Stembridge conjecture''
(see~\cite[Conjecture~10.4]{ShareshianWachs2016}) states that the coefficient of $t^i$ in the chromatic quasisymmetric function $X_{\Gamma_h}(\underline{x},t)$ is $e$-positive. We discuss this refined version of the ungraded Stanley--Stembridge conjecture in Section~\ref{sec: Stanley--Stembridge conjecture and dot action} and state it formally in Conjecture~\ref{conj:Stanley--Stembridge}.
To emphasize, the statement of the Shareshian--Wachs conjecture provides the necessary link between the cohomology of Hessenberg varieties and the graded Stanley--Stembridge conjecture, thus yielding a new way of attacking both the ungraded and the graded versions of the conjecture.
The Shareshian--Wachs conjecture was proved in 2015 by Brosnan and Chow~\cite{BrosnanChow2015} (also independently by Guay-Paquet~\cite{Guay-Paquet2016}) by showing a remarkable relationship between the Betti numbers of different Hessenberg varieties. (Direct computations of cohomology rings of certain Hessenberg varieties also yield partial proofs of the Shareshian--Wachs conjecture; see~\cite{AHHM, AHM2017}.)
As we have explained above, it then follows that in order to prove the graded Stanley--Stembridge conjecture, it suffices to
prove that the cohomology
$H^{2i}(\Hess(\Ssf ,h))$ for each $i$ is a non-negative combination of the tabloid representations $M^\lambda$~\cite[Part~II, Section~7.2]{Ful97} of $\Symm_n$ for $\lambda$ a partition of $n$. In other words, given the decomposition
\begin{equation}\label{eq: decomp intro}
H^{2i}(\Hess(\Ssf ,h)) = \sum_{\lambda \vdash n} c_{\lambda, i} M^\lambda
\end{equation}
in the representation ring $\Rep (\Symm_n)$ of $\Symm_n$, it suffices to show that the coefficients $c_{\lambda,i}$ are non-negative.
The above discussion explains the motivation for this manuscript, and we now describe our main results.
Let $h: \{ 1, 2, \ldots,n\} \to \{ 1,2,\ldots,n \}$ be a Hessenberg function. Our approach to the graded Stanley--Stembridge conjecture is by induction. Roughly, the idea is as follows. From any Hessenberg function $h$ we can construct the corresponding incomparability graph $\Gamma_h$ (made precise in Section~\ref{sec: graphs and orientations}). Previous results of Stanley show that the acyclic orientations of $\Gamma_h$, and their corresponding sets of sinks, encode information about the coefficients $c_{\lambda,i}$. We develop this idea further by decomposing the set of acyclic orientations according to their sink sets, and make a key observation (Proposition~\ref{proposition: max sink set induction}) that, if the size of a sink set is maximal, then the set of acyclic orientations with that fixed sink set corresponds precisely to the set of \emph{all} acyclic orientations on a smaller incomparability graph. This observation sets the stage for an inductive argument.
Any Hessenberg function corresponds uniquely to a certain subset $I_h$ of the negative roots of $\glfrak (n,\C)$. In this manuscript, in the special case when $I_h$ is abelian (cf. Definition~\ref{def: abelian} below), we are able to fully implement an argument yielding an inductive formula for the coefficients of the tabloid representations.
A rough statement of our main result is as follows (definitions are in Section~\ref{sec: Hessenberg basics}, Section~\ref{sec: Stanley--Stembridge conjecture and dot action} and Section~\ref{sec: sink sets and induction}); the precise statement is Theorem~\ref{theorem:induction}. The idea is that the coefficients $c_{\lambda,i}$ above, associated to a Hessenberg variety in $\Flags(\C^n)$ for $n\geq 3$, can be computed using the coefficients associated to certain Hessenberg varieties in the flag variety $\Flags(\C^{n-2})$.
\begin{theorem}\label{theorem:main}
Let $n\geq 3$ be a positive integer and $h: \{1,2,\ldots,n\} \to \{1,2,\ldots,n\}$ be a Hessenberg function such that the ideal $I_h$ is abelian. Let $\Ssf $ denote a regular semisimple element in the Lie algebra of $\glfrak (n,\C)$. Let $i \geq 0$ be a non-negative integer. Regard the cohomology $H^{2i}(\Hess(\Ssf ,h))$ as a $\Symm_n$-representation using Tymoczko's dot action.
Then, in
the representation ring $\Rep (\Symm_n)$ we have the equality
\begin{equation}\label{eq:main inductive step_1}
H^{2i}(\Hess(\Ssf , h)) = c_{(n),i} M^{(n)} + \sum_{T \in\SK_2(\Gamma_h)} \left( \sum_{\substack{\mu \,\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T M^{(\mu_1+1,\mu_2+1)} \right)
\end{equation}
where the set $SK_2(\Gamma_h)$ is a certain collection of subsets of the vertices of $\Gamma_h$ and the coefficients $c^T_{\mu, i-\deg(T)}$ are the coefficients as in~\eqref{eq: decomp intro} associated to a Hessenberg function $h_T: \{1,2,\ldots,n-2\} \to \{1,2,\ldots,n-2\}$ for a Hessenberg variety in $\Flags(\C^{n-2})$.
\end{theorem}
The technical details of the induction argument leading to Theorem~\ref{theorem:main}
require the use, among other things, of Brosnan and Chow's proof of the Shareshian--Wachs conjecture, as well as the second author's combinatorial characterization of the Betti numbers of regular Hessenberg varieties. In fact, the technical core of the paper consists of two inductive formulas for the Poincar\'e polynomials of regular Hessenberg varieties in the abelian case. These formulas are stated in Proposition~\ref{prop: reg step} and Proposition~\ref{prop:induction step} and are of independent interest.
It is quite straightforward to prove the graded Stanley--Stembridge conjecture for the abelian case based on our inductive formula in Theorem~\ref{theorem:main}, and we record this argument in Corollary~\ref{corollary: graded SS for abelian}. Our result generalizes previous results. Indeed, in the case when $h$ satisfies $h(3)=\cdots=h(n)=n$, Shareshian and Wachs obtained results on the corresponding chromatic quasisymmetric function which, given Brosnan and Chow's proof of the Shareshian--Wachs conjecture, implies Corollary~\ref{corollary: graded SS for abelian} for that case. Separately, Teff~\cite[Theorem~4.20]{Teff2013a} proved the case when $h$ corresponds to a maximal standard parabolic Lie subalgebra $\mathfrak{p}$ of $\glfrak (n,\C)$. Both instances are special cases of our result, as we explain in Section~\ref{sec: abelian Hessenberg varieties}. Separately, we also note that Gebhard and Sagan have proved the original Stanley--Stembridge conjecture for a collection of graphs called $K_{\alpha}$-chains~\cite[Corollary~7.7]{GebhardSagan2001}. Their result does not subsume, nor is it subsumed by, the case considered in this manuscript, but it is of independent interest. Since the first version of this manuscript appeared on the arXiv, Cho and Huh have posted another independent proof of the graded Stanely-Stembridge conjecture in the same case we consider below~\cite{Cho-Huh2017}.
As part of our arguments, we define the height of an ideal of negative roots using the lower central series of an ideal in a Lie algebra. An ideal is abelian precisely when the height is either $1$ or $0$, so we can interpret Theorem~\ref{theorem:main} as a ``base case'' for an argument for the graded Stanley--Stembridge conjecture using induction on the height of the ideal $I_h$. We intend to explore this further in future work.
As already mentioned, the graded Stanley--Stembridge conjecture implies the ungraded Stanley--Stembridge conjecture simply by summing over all $i$, or, in the language of chromatic quasisymmetric functions, by ``setting $t$ equal to $1$''. We record this fact in Proposition~\ref{prop: classical from graded}. We note here that the ``abelian case'' considered in Theorem~\ref{theorem:induction} (and Corollary~\ref{corollary: graded SS for abelian}) corresponds, in combinatorial language, to the case in which the vertices of the graph $\Gamma_h$ can be partitioned into two disjoint cliques. The fact that the coefficients $c_{\lambda} = \sum_{i\geq 0} c_{\lambda, i}$ are non-negative in this case was originally stated by Stanley in~\cite[Corollary~3.6]{Stanley1995} as a corollary to~\cite[Theorem~3.4]{Stanley1995}; moreover, this fact is also equivalent to~\cite[Remark~4.4]{StanleyStembridge1993}. However,~\cite[Theorem~3.4]{Stanley1995} is incorrect as stated~\cite{Stanley-personal}, and the equivalence of~\cite[Remark~4.4]{StanleyStembridge1993} and~\cite[Corollary~3.6]{Stanley1995} is not explicit in~\cite{StanleyStembridge1993, Stanley1995}. Thus, our Corollary~\ref{corollary: graded SS for abelian} (together with Proposition~\ref{prop: classical from graded}) records a new and explicit proof of this fact.
We now give a brief overview of the contents of the paper. Section~\ref{sec:background} is devoted to background material. Specifically, Section~\ref{sec: Hessenberg basics} is a crash course on Hessenberg varieties. Section~\ref{sec: Stanley--Stembridge conjecture and dot action} establishes the terminology for discussing the $\Symm_n$-representations $H^{2i}(\Hess(\Ssf ,h))$, and gives a more detailed account of the relation between the Stanley--Stembridge conjecture and our results. Section~\ref{sec: graphs and orientations} recalls the language of incomparability graphs in the setting of Hessenberg functions and states a result of Stanley connecting acyclic orientations on this graph to the $\Symm_n$-representations above. Section~\ref{sec: Ph tableaux} recounts Gasharov's definition of a $P_h$-tableau and a result relating these $P_h$-tableaux to the same $\Symm_n$-representations above. We then begin our work in earnest in Section~\ref{sec: abelian Hessenberg varieties} where we define abelian Hessenberg varieties and briefly discuss the relation between this notion and the cases of Hessenberg varieties previously studied in the literature. In Section~\ref{sec: sink sets and induction} we focus attention on the sink sets of an acyclic orientation of an incomparability graph, and introduce the notion of sink-set size. In Section~\ref{sec: sink sets, ideals and representations} we link the subjects of Sections~\ref{sec: abelian Hessenberg varieties} and~\ref{sec: sink sets and induction} using a new invariant of an ideal called the height. Sections~\ref{section: inductive formula} and~\ref{section: the proofs} form the technical core of the paper, where we state and prove our main results. Finally, Section~\ref{section: conjecture} states a conjecture which, if true, would represent a first step towards generalizing the techniques in this paper to prove the full Stanley--Stembridge conjecture for all possible heights.
%%%%%%%%%%%%%%%%
\section{The setup and background}\label{sec:background}
Let $n$ be a positive integer. We denote by $[n]$ the set of positive integers $\{1,2,\ldots,n\}$.
We work in type A throughout, so $\GL (n,\C)$ is the group of invertible $n\times n$ complex matrices and $\glfrak (n,\C)$ is the Lie algebra of $\GL (n,\C)$ consisting of all $n\times n$ complex matrices.
\subsection{Hessenberg varieties}\label{sec: Hessenberg basics}
Hessenberg varieties in Lie type A are subvarieties of the (full) flag variety
$\Flags(\C^n)$, which is the collection of sequences of nested linear subspaces of $\C^n$:
%\[
%\Flags(\C^n) :=
%\{ V_{\bullet} = (\{0\} \subset V_1 \subset V_2 \subset \cdots V_{n-1} \subset
%V_n = \C^n) \mid \dim_{\C}(V_i) = i \ \textrm{for all} \ i=1,\ldots,n\}.
%\]
\[
\Flags(\C^n) :=
\{ V_{\bullet} = (\{0\} \subset V_1 \subset \cdots \subset V_{n-1} \subset
V_n = \C^n) %\mid
\mid \dim_{\C}(V_i) = i \}.
\]
A Hessenberg variety in $\Flags(\C^n)$ is specified by two pieces of data: a Hessenberg function and a choice of an element in $\glfrak (n,\C)$. We have the following.
\begin{defi}\label{definition:Hessenberg function}
A \defg{Hessenberg function} is a function $h: [n] \to [n]$ such that $h(i) \geq i$ for all $i \in [n]$ and $h(i+1) \geq h(i)$ for all $i \in [n-1]$. We frequently write a Hessenberg function by listing its values in sequence,
\ie $h = (h(1), h(2), \ldots, h(n))$.
\end{defi}
We now introduce some terminology associated to a given Hessenberg function.
\begin{defi}\label{definition:Hessenberg subspace}
Let $h: [n] \to [n]$ be a Hessenberg function. The associated \defg{Hessenberg space} is the linear subspace $H$ of $\glfrak (n,\C)$ specified as follows:
\begin{equation}\label{eq:Hessenberg subspace}
\begin{aligned}
H &:= \{ A = (a_{ij})_{i,j\in[n]} \in \glfrak (n,\C) \mid a_{ij}
= 0 \textup{ if } i > h(j) \} \\
&= \spanrm _{\C} \{E_{ij} \mid i, j \in [n] \textup{ and } i \leq h(j) \}
\end{aligned}
\end{equation}
where $E_{i,j}$ is the usual elementary matrix with a $1$ in the $(i,j)$-th entry and
$0$'s elsewhere.
\end{defi}
It is important to note that $H$ is frequently \emph{not} a Lie subalgebra of $\glfrak (n,\C)$. However, it \emph{is} stable under the conjugation action of the usual maximal torus $T$ (of invertible diagonal matrices) in $\GL (n,\C)$, and the $E_{ij}$ appearing in~\eqref{eq:Hessenberg subspace} are exactly the $T$-eigenvectors. It is also straightforward to see that
\begin{equation}\label{eq:Hess stable under b}
[\mathfrak{b}, H] \subseteq H
\end{equation}
where $[ \cdot, \cdot ]$ denotes the usual Lie bracket in $\glfrak (n,\C)$ and
$\mathfrak{b} = \Lie(B)$ is the Lie algebra of the Borel subgroup $B$ of upper-triangular matrices in $\GL (n,\C)$.
Let $\mathfrak{h} \subseteq \glfrak (n,\C)$ denote the Cartan subalgebra of diagonal matrices, and let $t_i$ denote the coordinate on $\mathfrak{h}$ reading off the $(i,i)$-th matrix entry along the diagonal. Denote the root system of $\glfrak (n,\C)$ by $\Phi$. Then the positive roots $\Phi^+$ of $\glfrak (n,\C)$ are $\Phi^+ = \{ t_i - t_j \mid 1 \leq i < j \leq n\}$ where $\gamma = t_i - t_j \in \Phi^+$ corresponds to the root space spanned by $E_{ij}$, denoted $\mathfrak{g}_{\gamma}$. Similarly, the negative roots $\Phi^-$ of $\glfrak (n,\C)$ are $\Phi^- = \{ t_i - t_j \mid 1 \leq j < i \leq n\}$. We denote the simple positive roots in $\Phi^+$ by $\Delta = \{\alpha_i := t_i - t_{i+1} \; \vert \; 1 \leq i \leq n-1\}$.
Note that the pairs $(i,j)$ with $i > j$ and $i \leq h(j)$ correspond
precisely to those negative roots $\gamma \in \Phi^-$ whose associated root spaces $\g_{\gamma}$
are contained in $H$. Motivated by this, we fix the following notation:
\[
\Phi_h^- := \{ t_i - t_j \in \Phi^- \mid E_{ij} \in H \} =
\{ t_i - t_j \mid i > j \textup{ and } i \leq h(j) \}
\]
and
\[
\Phi_h := \Phi_h^- \sqcup \Phi^+ = \{ t_i - t_j \in \Phi \mid i \leq h(j) \}.
\]
It is clear that $h$ is uniquely determined by either $\Phi_h^-$ or $\Phi_h$.
Recall that an \defg{ideal} (also called an \defg{upper-order ideal}) $I$ of $\Phi^-$ is defined to be a collection of (negative) roots such that if $\alpha\in I$, $\beta\in \Phi^-$, and $\alpha+\beta\in \Phi^-$, then $\alpha+\beta\in I$.
The relation~\eqref{eq:Hess stable under b} immediately implies that
\begin{equation*}\label{eq:definition Ih}
I_h:= \Phi^- \setminus \Phi_h^-
\end{equation*}
is an ideal in $\Phi^-$. We call it the \defg{ideal corresponding to $h$}. In fact, the association taking a Hessenberg function to its corresponding ideal $I_h$ defines a bijection from the set of Hessenberg functions to ideals in $\Phi^-$, as noted by Sommers and Tymoczko in~\cite[Section~10]{SomTym06}.
It is conceptually useful to express the sets $\Phi_h, \Phi_h^-,$ and $I_h$ pictorially. We illustrate this by an example.
\begin{exam} \label{ex: pictures}
Let $n=6$. Figure~\ref{picture for h} contains the pictures corresponding to the Hessenberg function $h=(3,4,5,6,6,6)$. The leftmost square grid contains a star in the $(i,j)$-th box exactly if the $(i,j)$-th matrix entry is allowed to be non-zero for $A \in H$, or equivalently, either $i=j$, or, the corresponding root $t_i - t_j$ of $\glfrak (n,\C)$ is contained in $\Phi_h$. The center square grid contains a star in the $(i,j)$-th box precisely if the corresponding root is contained in $\Phi_h^-$. Finally, the rightmost grid contains a star in the $(i,j)$-th box if and only if the corresponding root is contained in $I_h$, \ie it is the complement of $\Phi_h$. This illustrates why some authors refer to $I_h$ as (the roots corresponding to) the ``opposite Hessenberg space''.
\begin{figure}
\[\ytableausetup{centertableaux}
\Phi_h: \,\begin{ytableau} \star & \star & \star & \star & \star & \star \\ \star & \star & \star & \star & \star & \star \\ \star & \star & \star & \star & \star & \star \\ \empty & \star & \star & \star & \star & \star \\ \empty & \empty & \star & \star & \star & \star\\ \empty & \empty & \empty & \star & \star & \star \end{ytableau}
\quad
\Phi_h^-: \, \begin{ytableau} \empty & \empty & \empty & \empty & \empty & \empty \\ \star & \empty & \empty & \empty & \empty & \empty \\ \star & \star & \empty & \empty & \empty & \empty \\ \empty & \star & \star & \empty & \empty & \empty \\ \empty & \empty & \star & \star & \empty & \empty\\ \empty & \empty & \empty & \star & \star & \empty \end{ytableau}
\quad
I_h: \, \begin{ytableau} \empty & \empty & \empty & \empty & \empty & \empty \\ \empty & \empty & \empty & \empty & \empty & \empty \\ \empty & \empty & \empty & \empty & \empty & \empty \\ \star & \empty & \empty & \empty & \empty & \empty \\ \star & \star & \empty & \empty & \empty & \empty\\ \star & \star & \star & \empty & \empty & \empty \end{ytableau}
\]
\caption{The pictures of $\Phi_h$, $\Phi_h^-$, and $I_h$ for $h=(3,4,5,6,6,6)$.}
\label{picture for h}
\end{figure}
\end{exam}
Let $h:[n]\to[n]$ be a Hessenberg function and $\Xsf $ be an $n\times n$ matrix in $\glfrak (n,\C)$, which we also consider as a linear operator $\C^n \to \C^n$. Then the \defg{Hessenberg variety} $\Hess(\Xsf ,h)$ associated to $h$ and $\Xsf $ is defined to be
\begin{equation}\label{eq:def-general Hessenberg}
\Hess(\Xsf ,h) := \{ V_{\bullet} \in \Flags(\C^n) \;
\vert \; \Xsf V_i \subset
V_{h(i)} \text{ for all } i\in[n]\} \subset \Flags(\C^n)\}.
\end{equation}
In this paper we focus on certain special cases of Hessenberg varieties. Let $\lambda = (\lambda_1, \lambda_2, \ldots, \lambda_n)$ be a composition of $n$ in the sense that $\lambda_1+\lambda_2+\cdots + \lambda_n=n$ and $\lambda_i \geq 0$ for all $i$. A linear operator is \defg{regular of Jordan type $\lambda$} if its standard Jordan canonical form has block sizes given by $\lambda_1$, $\lambda_2$, etc., and no two distinct blocks have the same eigenvalue. Note that if $g \in \GL (n,\C)$, then $\Hess(\Xsf ,h)$ and $\Hess(g\Xsf g^{-1},h)$ can be identified via the action of $\GL (n,\C)$ on $\Flags(\C^n)$~\cite{Tym06}. For concreteness in what follows, for a given $\lambda$ as above we set the notation
%\[
%\Xsf _\lambda \textup{ is a (fixed) matrix in Jordan canonical form, which is regular of Jordan type $\lambda$}
%\]
\[
\Xsf _\lambda \textup{ is a (fixed) regular matrix in Jordan canonical form of Jordan type $\lambda$}
\]
and we refer to the corresponding Hessenberg variety $\Hess(\Xsf _\lambda, h)$ as a \defg{regular Hessenberg variety}.
Two special cases are of particular interest. Namely, if $\lambda=(n,0,\ldots,0)=(n)$, then we may take the corresponding regular operator to be the regular nilpotent operator which we denote by $\mathsf{N}$, \ie $\mathsf{N}$ is the matrix whose Jordan form consists of exactly one Jordan block with corresponding eigenvalue equal to $0$. The regular Hessenberg variety $\Hess(\mathsf{N},h)$ is called a \defg{regular nilpotent Hessenberg variety}. Similarly let $\Ssf $ denote a regular semisimple matrix in $\glfrak (n,\C)$, \ie a matrix which is diagonalizable with distinct eigenvalues. This corresponds to the other extreme case, namely, $\lambda = (1,1,1,\ldots,1)$. We call $\Hess(\Ssf ,h)$ a \defg{regular semisimple Hessenberg variety}.
%%%%%%%%%%%%%%%%%%
\subsection{The Stanley--Stembridge conjecture in terms of Tymoczko's dot action representation} \label{sec: Stanley--Stembridge conjecture and dot action}
As already discussed in the Introduction, the main motivation of this manuscript is to study a graded version of the Stanley--Stembridge conjecture (Conjecture~\ref{conj:Stanley--Stembridge} below), stated in terms of the $\Symm_n$-representation on the cohomology rings of regular semisimple Hessenberg varieties defined by Tymoczko~\cite{Tym08}.
Tymoczko's \defg{dot action} preserves the grading on these cohomology rings (which is concentrated in even degrees).
The structure of this section is as follows. We first review basic facts and establish notation for partitions and $\Symm_n$-representations.
We then give in Conjecture~\ref{conj:Stanley--Stembridge}
a precise statement of the graded Stanley--Stembridge conjecture. We also state the ungraded Stanley--Stembridge conjecture, and briefly recount how a solution to Conjecture~\ref{conj:Stanley--Stembridge} implies the ungraded Stanley--Stembridge conjecture (cf. discussion in~\cite{BrosnanChow2015, ShareshianWachs2016}). The rest of the section is a brief review of standard representation theory facts and a statement of a fundamental result of Brosnan and Chow.
A \defg{partition} of $n$ is a sequence $\lambda=(\lambda_1,\lambda_2,\ldots,\lambda_n) \in \Z^n$ satisfying $\lambda_1+\lambda_2+\cdots+\lambda_n=n$ and $\lambda_1 \geq \lambda_2 \geq \cdots \geq \lambda_n \geq 0$. If $\lambda$ is a partition of $n$ we write $\lambda \vdash n$.
We say a partition $\lambda \vdash n$ \defg{has $k$ parts} and write $\partsrm (\lambda)=k$ if $\lambda_k \neq 0$ and $\lambda_{k+1}=\cdots = \lambda_n=0$. (Alternatively, $\lambda$ has $k$ parts if and only if the Young diagram corresponding to $\lambda$ has precisely $k$ rows.) For simplicity if $\partsrm (\lambda)=k$ then we write $\lambda=(\lambda_1,\ldots,\lambda_k)$ instead of $\lambda=(\lambda_1,\ldots,\lambda_k, 0, 0,\ldots,0)$.
Moreover, for $\nu \vdash n$ a partition of $n$, we let $\Symm_\nu \subseteq \Symm_n$ denote the \defg{Young subgroup} of $\Symm_n$ corresponding to $\nu$. Concretely, if $\nu = (\nu_1, \nu_2, \ldots, \nu_k)$ has $k$ parts then $\Symm_\nu$ is the subgroup
\[
\Symm_{1,\ldots,\nu_1} \times \Symm_{\nu_1+1, \ldots, \nu_1+\nu_2} \times \cdots \times
\Symm_{(\sum_{\ell=1}^{k-1} \nu_{\ell})+1, \ldots, n} \subseteq \Symm_n
\]
where $\Symm_{i, i+1,\ldots, j}$ denotes the permutations of the set $\{i, i+1,\ldots, j\}$ for each $1\leq i h(j)$, and
\item\label{defi2.22_3} if $i\in [n]$ appears immediately below $j\in [n]$ then $j \leq h(i)$.
\end{enumerate}
\end{defi}
\begin{exam} Let $n=5$ and let $h=(2,3,4,5,5)$.
Then there are nine $P_h$-tableaux of shape $(2,2,1)$:
\[
\young(13,24,5)\quad \young(14,25,3) \quad \young(13,25,4)\quad \young(14,35,2)\quad
\young(15,24,3) \quad \young(24,13,5) \quad \young(24,15,3)\quad \young(25,14,3) \quad \young(35,24,1)
\]
\end{exam}
Recall that every partition $\lambda \vdash n$ has a dual partition $\lambda^{\vee}$ whose Young diagram is the transpose of the Young diagram of $\lambda$. The following theorem, which gives a positive, combinatorial formula for the coefficients $d_{\lambda}$, is due to Gasharov~\cite{Gasharov1996}. There is also a graded version of the theorem, due to Shareshian and Wachs~\cite[Theorem~6.3]{ShareshianWachs2016}, but we will only need the ungraded version below.
\begin{theorem}\label{thm: irreducible coefficients} Let $n$ be a positive integer and let $h: [n]\to [n]$ be a Hessenberg function. Let $d_{\lambda}$ denote the coefficients appearing in~\eqref{eq:decomp into Specht}. Then
\[
d_\lambda = \lvert \{ \textup{ $P_h$-tableaux of shape $\lambda^{\vee}$ } \} \rvert.
\]
\end{theorem}
%%%%%%%%%%%%%%%%%%%%%
\section{Abelian Hessenberg varieties} \label{sec: abelian Hessenberg varieties}
In the previous sections we outlined the motivation behind this paper and recalled some background. We are finally ready to begin our own arguments in earnest, and the first task is to establish the terminology (and hypothesis) which allows us to make our arguments -- namely, the definition of an abelian ideal and an abelian Hessenberg variety. We also briefly discuss how our special case relates to other situations that have been studied previously in the literature.
In Section~\ref{sec:background} we defined an ideal of $\Phi^-$ associated to a Hessenberg function $h$. We now introduce the definition which is central to this manuscript.
\begin{defi}\label{def: abelian}
We say that an ideal $I\subseteq \Phi^-$ is \defg{abelian} if $\alpha+\beta\notin \Phi^-$ for all $\alpha, \beta\in I$. \end{defi}
The notion of abelian ideals is not new in the context of Lie theory. However, as far as we are aware, its use in the study of Hessenberg varieties is new. The following definition is not essential to this paper but we include it because it frequently arises in the literature.
\begin{defi}
Let $I$ be an ideal in $\Phi^-$. We say that $I$ is \defg{strictly negative} if $-\Delta\cap I$ is empty.
\end{defi}
Note that if $I=I_h$ is the ideal of $\Phi^-$ associated to a Hessenberg function $h$, then $-\Delta \cap I$ is empty if and only if $-\Delta \subseteq \Phi_h$. The following is well-known, which partly explains why it is common practice in the study of Hessenberg varieties to assume that $I_h$ is strictly negative.
\setbox\toto=\hbox{\cite[Theorem~3.4]{Precup2015}}
\begin{lemma}[\box\toto]
Let $h$ be a Hessenberg function and $\Xsf \in \glfrak (n,\C)$ be a semisimple matrix. Then the corresponding semisimple Hessenberg variety $\Hess(\Xsf ,h)$ is connected if and only if $I_h$ is strictly negative.
\end{lemma}
\begin{exam} In the case $n=4$, there are $8$ abelian ideals in $\Phi^-$. The reader may check that these correspond to the Hessenberg functions $(1,4,4,4)$, $(2,2,4,4)$, $(2,3,4,4)$, $(2,4,4,4)$, $(3,3,3,4)$, $(3,3,4,4)$, $(3,4,4,4)$ and $(4,4,4,4)$. Among these, those that are strictly negative are
\[
(4,4,4,4), (3,4,4,4), (3,3,4,4), (2,4,4,4), (2,3,4,4).
\]
and their corresponding ideals $I_h$ are, respectively,
\[
\emptyset, \{ t_4-t_1 \}, \{ t_4-t_1, t_4-t_2\}, \{ t_4-t_1, t_3-t_1 \}, \{ t_4-t_1, t_4-t_2, t_3-t_1 \}.
\]
\end{exam}
The following extends the notion of abelian ideals to their corresponding Hessenberg varieties.
\begin{defi} We say that the Hessenberg variety $\Hess(\Xsf ,h)$ and the corresponding Hessenberg function $h$ are \defg{abelian}, if $I_h$ is abelian.
\end{defi}
Recall that Hessenberg functions are in bijection with natural unit interval orders as shown by Shareshian and Wachs in~\cite[Proposition~4.1]{ShareshianWachs2016}. Under this identification, the Hessenberg function $h$ is abelian if and only if the longest chain (\ie totally ordered subset) of the associated natural unit interval order has length one. We thank Timothy Chow for the following remark.
\begin{rema}\label{rem: alt-def} There is also a purely combinatorial characterization of abelian Hessenberg functions as follows. Define the \emph{index} $\indexrm (h)$ of a Hessenberg function to be the largest integer $i$ such that $h(i)k>\ell$ and $j>h(k)$ and $k>h(\ell)$. Now, since $h(k) h(\ell_i)$ for all $i \in [k-1]$, and
\item\label{lemma4.1_3} $T$ is an independent set in $\Gamma_h$.
\end{enumerate}
In particular, the cardinality of any maximum independent subset of vertices is equal to the cardinality of any maximum sink set.
\end{lemma}
\begin{proof} We first show that~\eqref{lemma4.1_1} implies~\eqref{lemma4.1_2}. Suppose $T$ is a sink set. We wish to show $ \ell_{i+1}> h(\ell_i)$ for all $i$, $1 \leq i \leq k-1$. If $k=1$, the condition is vacuous and there is nothing to check. If $k>1$, suppose for a contradiction that there exists $i \in [k-1]$ with $\ell_{i+1} \leq h(\ell_i)$. Then by construction of $\Gamma_h$ there exists an edge $e$ between $\ell_i$ and $\ell_{i+1}$. For any orientation $\omega$ of $\Gamma_h$, we must have either $\tgt_{\omega}(e)=\ell_i$ or $\tgt_{\omega}(e)=\ell_{i+1}$, and not both. Thus $\ell_i$ and $\ell_{i+1}$ cannot be simultaneously contained in $\sk(\omega)$, contradicting the fact that $T$ is a sink set.
Next we prove that~\eqref{lemma4.1_2} implies~\eqref{lemma4.1_3}. Note that since $\ell_1 < \ell_2 < \cdots < \ell_k$ by assumption, if $\ell_{i+1} > h(\ell_i)$ for all $i \in [k-1]$ then it follows that $\ell_b>h(\ell_a)$ for any pair $a** h(i)$. Then we must have $\phi_T(h(i)) \leq \phi_T(j)$. This means $\phi_T(j) = \phi_T(h(i))$. Since $\phi_T$ is injective on $[n] \setminus T$ and $j \in [n] \setminus T$, this means $h(i) \in T$. Moreover, it follows from the definition of $\phi_T$ that $h(i) > j$. This contradicts the initial assumption that $j > h(i)$, so we conclude $j \leq h(i)$ as desired. Finally, $\phi_T(j) \leq \phi_T(h(i))$ if and only if $\phi_T(j) \leq h_T(\phi_T(i))$ by definition of $h_T$, completing the proof.
\end{proof}
We have already observed that the edges of an incomparability graph $\Gamma_h$ associated to a Hessenberg function are in one-to-one correspondence with the set of negative roots in $\Phi^-_h$.
Our construction of a ``smaller'' graph $\Gamma_{h_T}\cong \Gamma_h - T$, suggests that there should be a correspondence between negative roots in $\Phi_{h_T}^-$ and a certain subset of $\Phi_h^-$ which is determined by $T$. We now make this precise.
By Lemma~\ref{lem: induction graph}, we may describe the roots $\Phi_{h_T}^-$ and the ideal $I_{h_T}$ corresponding to $h_T$ using those of $h$ as follows:
\[
\Phi_{h_T}^- = \{ t_{\phi_T(i)} - t_{\phi_T(j)} \mid t_i-t_j \in \Phi_h^- \textup{ and } i,j \notin T \}
\]
and
\[
I_{h_T} = \{ t_{\phi_T(i)} - t_{\phi_T(j)} \mid t_i-t_j\in I_h \textup{ and } i,j \notin T \}.
\]
In our computations below, it will also be convenient to consider the subset of negative roots in $\Phi_h^-$ and $I_h$ which correspond to $\Phi_{h_T}^-$ and $I_{h_T}$, respectively, under the map $\phi_T$. We set the notation
\[
\Phi_h^-[T]: = \{ t_i-t_j \mid t_i - t_j \in \Phi_h^- \textup{ and } i,j \notin T \}
\]
and
\[
I_h[T]: = \{ t_i-t_j \mid t_i - t_j \in I_h \textup{ and } i,j\notin T \}.
\]
There is an obvious bijection from $\Phi_h^-[T]$ to $\Phi_{h_T}^-$ and $I_h[T]$ to $I_{h_T}$ given by $t_i-t_j \mapsto t_{\phi_T(i)} - t_{\phi_T(j)}$.
Finally, we observe that the construction of the smaller graph $\Gamma_h - T \cong \Gamma_{h_T}$ from the data of $\Gamma_h$ also extends to orientations. Specifically,
let $\omega\in \mathcal{A}_k(\Gamma_h)$ be any acyclic orientation such that $\sk(\omega)=T$. Then the orientation $\omega$ naturally induces, by restriction, an orientation on $\Gamma_h-T = \Gamma_{h_T}$ (since the edges of $\Gamma_h - T$ are a subset of those of $\Gamma_h$). We denote this acyclic orientation on $\Gamma_{h_T}$ by $\omega_T$.
\begin{exam}\label{ex: omega induces omegaT} We continue with Example~\ref{example: smaller graph}. In the pictures below, we draw an orientation $\omega$ of $\Gamma_h$ on the left, and its corresponding induced orientation $\omega_T$ of $\Gamma_{h_T}$ on the right. For visualization purposes the sink set $T$ and its incident edges are highlighted in red.
\vspace*{.15in}
\[\xymatrix{1 \ar@[red][r] & {\color{red}2} & 3 \ar@[red][l] \ar@/_1.5pc/[ll] \ar@[red]@/^1.5pc/[rr] & 4 \ar[l] \ar@[red][r] \ar@[red]@/_1.5pc/[ll] & {\color{red}5} & & &
1 & 2 \ar[l] & 3 \ar[l]
}\]
\end{exam}
%%%%%%%%%%%%%%%%%%%%%
\subsection{Sink sets of maximal cardinality and an inductive description of acyclic orientations}\label{subsec:sink sets}
The main observation of the present section, recorded in Proposition~\ref{proposition: max sink set induction}, is that if $k$ is maximal, then the sets appearing on the RHS of the sink set decomposition~\eqref{eq: sink set decomposition} are in bijective correspondence with the set of \emph{all} acyclic orientations corresponding to the graphs $\Gamma_{h_T}$ for $T\in \SK_k(\Gamma_h)$. Moreover, this natural bijection gives a tight relationship between the number of ascending edges $\asc(\omega)$ of the orientation $\omega$ of the original graph $\Gamma_h$ with the number $\asc(\omega_T)$ of the induced orientation on the smaller graph $\Gamma_{h_T}$, where $\omega_T$ is described at the end of Section~\ref{sec: Sink sets and induced subgraphs} above. These ascending edge statistics record the degree -- \ie the grading in $H^*(\Hess(\Ssf ,h))$ -- in Theorem~\ref{thm: Stanley} and Corollary~\ref{corollary: triv coeff is nonneg}, so it is this relation which allows us to prove our ``graded'' results in Section~\ref{sec: main proofs}.
We begin by making precise the notion of a sink set of maximum possible size.
\begin{defi}\label{definition: max sink set size}
We define the \defg{maximum sink-set size} $m(\Gamma_h)$ to be the maximum of the cardinalities of the sink sets $\sk(\omega)$ associated to all possible acyclic orientations of $\Gamma_h$, \ie
\begin{equation}\label{eq:definition max sink set length}
m(\Gamma_h) = \max \{ \lvert \sk(\omega) \rvert \mid \omega \in {\mathcal{A}}(\Gamma_h) \}.
\end{equation}
\end{defi}
Note that the maximum clearly exists since $\lvert \sk(\omega) \rvert$ is bounded above by $n$. Furthermore, by Lemma~\ref{lemma: equivalency sink and independence}, the maximal sink-set size of $\Gamma_h$ is also the cardinality of a maximum independent set of vertices in $\Gamma_h$.
\begin{exam}\label{ex: max sink set size} Continuing Example~\ref{example: smaller graph},
the sink set $T=\{2,5\}$ given in that example is in fact maximal, \ie $m(\Gamma_h)=2$. Indeed, in this case any set of three vertices must have at least one edge incident with two of them, and thus cannot be independent. Finally, we note that for this orientation we have $\asc(\omega) = 5$, \ie there are $5$ edges pointing to the right.
\end{exam}
Let $m=m(\Gamma_h)$ be the maximum sink-set size for a fixed incomparability graph $\Gamma_h$ and Hessenberg function $h$ as in Definition~\ref{definition: max sink set size}.
We need some terminology. Suppose $T\in \SK(\Gamma_h)$. Any acyclic orientation $\omega$ with sink set $T$ must have some number of edges oriented to the right, as determined by the vertices in $T$.
\begin{defi} Suppose $T\in \SK(\Gamma_h)$. We define the \defg{degree of $T$} to be
\[
\deg(T): = \min\{\asc(\omega) \mid \omega\in \mathcal{A}(\Gamma_h),\; \sk(\omega)=T \}.
\]
\end{defi}
The next lemma shows that in practice it is easy to compute $\deg(T)$ for any $T\in \SK(\Gamma_h)$. Suppose $T = \{\ell_1 < \ell_2 < \cdots < \ell_k\}$ is an independent set. We explicitly construct an acyclic orientation $\omega$ of $\Gamma_h$ with sink set precisely $T$ as follows. We first consider the set of edges $e$ in $\Gamma_h$ which are incident to a vertex in $T$. Note that any such $e$ is incident to only one vertex, say $\ell_i$, in $T$, because $T$ is independent. We assign an orientation to any such $e$ by requiring $\tgt_\omega(e)=\ell_i$.
Next consider all edges in $\Gamma_h$ which are not incident to any vertex in $T$. To any such edge $e=\{v, v'\}$ where $v < v'$ we assign the orientation which makes the edge ``point to the left''; more precisely, $\tgt_{\omega}(e)=v$. The above clearly defines an acyclic orientation $\omega$ on $\Gamma_h$.
%To finish the argument we must prove that it is acylic. To see this, first note that no cycle can contain any vertex in $T$, since all edges have to ``point in'' to such a vertex (and for a cycle, at least one edge has to ``point out'' and one edge has to ``point in''). So the only possibility is that a cycle contains only vertices that are not in $T$. But all edges of the form $e=\{v,v'\}$ for $vk$. Note that $k\geq 1$ since $I$ is nonempty, so we have $k'\geq 2$. We claim that if this is the case, then $\mathcal{R}_{k'}(I)\neq \emptyset$, contradicting the assumption that $k$ is maximal. Recall that $I_i$ denotes the $i$-th ideal in the lower central series of $I=I_1$. By definition, if $ht(I)=k'$ then $I_{k'}\neq \emptyset$. Let $\gamma_{k'} \in I_{k'}$ so $ \mathfrak{g}_{\gamma_{k'}} \subseteq \mathcal{I}_{k'} = [\mathcal{I}, \mathcal{I}_{k'-1}]$. By definition of the Lie bracket, there exists $\gamma_{k'-1} \in I_{k'-1}$ and $\alpha_{k'} \in I$ such that $\gamma_{k'}=\alpha_{k'}+ \gamma_{k'-1}$. Applying the same reasoning, $\mathfrak{g}_{\gamma_{k'-1}} \subseteq \mathcal{I}_{k'-1} = [\mathcal{I}, \mathcal{I}_{k'-2}]$ so there exists $\gamma_{k'-2} \in I_{k'-2}$ and $\alpha_{k'-1} \in I$ such that $\gamma_{k'-1} = \alpha_{k'-1}+\gamma_{k'-2}$. Continue in this way to obtain $\gamma_i \in I_i$ for each $1\leq i \leq k'$ and $\alpha_i \in I$ for each $2\leq i \leq k'$ such that
\begin{equation}\label{eq: alphas}
\gamma_{i} = \alpha_i + \gamma_{i-1} \textup{ for all $2\leq i \leq k'$}.
\end{equation}
Set $\alpha_1=\gamma_1$ and consider the set $R'=\{ \alpha_1, \alpha_2,\ldots, \alpha_{k'-1}, \alpha_{k'} \}$. For each $i$ such that $1\leq i \leq k'$, $\alpha_i \in \Phi^-$ so we may write $\alpha_i = t_{a_i} - t_{b_i}$ for some $a_i, b_i \in [n]$ such that $b_i h(\ell(i))$ for all $i\in [k-1]$. Therefore each $\beta_i =t_{\ell_{i+1}} - t_{\ell_i} \in I_h$ as desired. The fact that $R_T$ is a subset of height $k-1$ follows directly from the definition.
We now claim~\eqref{eq: map T to RT} is a bijection. From the definition it is straightforward to see that it is injective, so it suffices to prove that it is also surjective.
Suppose $R\in \mathcal{R}_{k-1}(I_h)$ is a set of height $k-1$. By definition there exist $q_1, q_2,\ldots, q_{k-1},q_k\in [n]$ such that $q_1h(q_i)$ for all $i\in [k-1]$. Lemma~\ref{lemma: equivalency sink and independence} now implies that $T$ is a sink set. By definition, $R=R_{T}$ is the image of $T$ under~\eqref{eq: map T to RT} so our function is surjective.
Our last assertion follows directly from the fact that $|\SK_k(\Gamma_h)| = |\mathcal{R}_{k-1}(I_h)|$ for all $k\geq 2$ together with Lemma~\ref{lemma: computing ht}. This completes the proof.
\end{proof}
The above lemma establishes, in particular, a bijection between $SK_2(\Gamma_h)$, the set of sink sets of size $2$, and $\mathcal{R}_1(I_h)$. By Proposition~\ref{lemma: abelian ht 1} we know $\mathcal{R}_1(I_h) \cong I_h$ is the set of all singleton subsets of $I_h$, so this implies that
\[
\lvert\SK_2(\Gamma_h) \rvert = \lvert I_h \rvert.
\]
More concretely, the bijection~\eqref{eq: map T to RT} associates to a sink set $T = \{j, i\} \subseteq [n]$ with $i>j$ the subset $\{t_i - t_j \} \subseteq I_h$ of height $1$.
\begin{exam}
Continuing Example~\ref{example: smaller graph} with the acyclic orientation drawn therein, the sink set is $\{2,5\}$ and
the associated (singleton) subset of $I_h$ of height $1$ is $\{ t_5-t_2 \} \subseteq I_h$.
\end{exam}
Our next proposition makes the connection between the maximum sink-set size and the coefficients $d_\lambda$ determining the representation $H^*(\Hess(\Ssf ,h))$.
\begin{prop}\label{proposition:dlambda zero}
Let $h: [n]\to [n]$ be a Hessenberg function. Then
\[
m(\Gamma_h) = \max\{ i \mid d_{\lambda}\neq 0 \textup{ for some } \lambda \vdash n \textup{ with $i$ parts}\}
\]
where the $d_\lambda$ are the non-negative coefficients appearing in~\eqref{eq:decomp into Specht}.
\end{prop}
We first need the following lemma.
\begin{lemma}\label{lem: P-tab and ind sets}
If $T=\{ i_1, i_2,\ldots, i_k \}$ is a subset of $[n]$ whose elements fill a single row in a $P_h$-tableau, then $T$ is an independent set of vertices in $\Gamma_h$.
\end{lemma}
\begin{proof}
Suppose the elements of $T$ are listed in increasing order (in the order they appear in the row of the $P_h$-tableau). By condition~\eqref{defi2.22_2} in Definition~\ref{def: P-tab}, we get $i_j>h(i_{j-1})$ for all $j$ such that $2\leq j \leq k$. Lemma~\ref{lemma: equivalency sink and independence} now implies that $T$ is an independent set of vertices.
\end{proof}
\begin{proof}[Proof of Proposition~\ref{proposition:dlambda zero}]
Let
\[
\ind(\Gamma_h) := \max\{ |T| \mid T\subseteq V(\Gamma_h) \textup{ and } T \textup{ is independent} \}.
\]
By Lemma~\ref{lemma: equivalency sink and independence} it suffices to show that
\begin{equation}\label{eq: ind sets and dlambda=0}
\ind(\Gamma_h) = \max\{ i \mid d_{\lambda}\neq 0 \textup{ for some } \lambda \vdash n \textup{ with $i$ parts}\}.
\end{equation}
Suppose $\lambda\vdash n$ is a partition of $n$ with $k$ parts such that $d_{\lambda}\neq 0$. By Theorem~\ref{thm: irreducible coefficients} there exists at least one $P_h$-tableau of shape $\lambda^{\vee}$. Since $\lambda$ has $k$ parts, $\lambda^{\vee}$ has $k$ boxes in the first row. By Lemma~\ref{lem: P-tab and ind sets} the entries in the first row of this $P_h$-tableau form an independent set of vertices in $\Gamma_h$. Therefore the LHS of~\eqref{eq: ind sets and dlambda=0} is greater than or equal to the RHS.
To prove the opposite inequality, let $T = \{\ell_1, \ell_2, \ldots, \ell_m\}$, where $\ell_1 < \ell_2 < \cdots < \ell_m$ be an independent subset of vertices in $\Gamma_h$ of maximal size. By Lemma~\ref{lemma: equivalency sink and independence} we know $\ell_{i+1} > h(\ell_{i})$ for all $i\in [m-1]$. Consider the partition $\lambda^\vee = (m, 1, \ldots, 1)$ of $n$ of ``hook shape'' with first row containing $m$ boxes and all other rows containing only one box. Also consider the filling of the Young diagram of shape $\lambda^\vee$ given by filling the top row with $\ell_1, \ldots, \ell_m$ in increasing order, and filling the remaining boxes by $[n] \setminus \{\ell_1, \ldots, \ell_m\}$ in increasing order from top to bottom. We claim that this is a $P_h$-tableau of shape $\lambda^\vee$. By construction, conditions~\eqref{defi2.22_1} and~\eqref{defi2.22_2} of Definition~\ref{def: P-tab} are already met, so we have only to check condition~\eqref{defi2.22_3}. Note that, for a pair $i$ and $j$ with $i$ appearing immediately below $j$, the condition~\eqref{defi2.22_3} (namely, that $j \leq h(i)$) holds automatically if $j < i$ (since $h(i) \geq i$ by definition of Hessenberg functions). Since $\lambda^\vee$ is of hook shape, the only places where condition~\eqref{defi2.22_3} must be checked is along the leftmost column of $\lambda^\vee$, and since by construction the filling contains entries which increase from top to bottom starting at the second row, the argument above implies that the only remaining place where condition~\eqref{defi2.22_3} must be checked is for the entry $\ell_1$ in the top-left box of $\lambda^\vee$ and the entry $\ell' := \min ([n] \setminus \{\ell_1,\ldots,\ell_m\})$ in the unique box in the second row, for which we must show that $\ell_1 \leq h(\ell')$. Suppose for a contradiction that $\ell_1 > h(\ell')$ (and hence $\ell' < \ell_1$). This implies there is no edge connecting $\ell'$ with $\ell_1$ for any $i$, $1 \leq i \leq m$. Thus $T' = \{\ell', \ell_1, \ldots, \ell_m\}$ is a sink set of $\Gamma_h$ by Lemma~\ref{lemma: equivalency sink and independence}. Since $|T'|=m+1$, this contradicts the maximality of $m=|T|$. Thus $\ell_1 \leq h(\ell')$ and hence the above filling is indeed a $P_h$-tableau. By construction of the $\lambda^\vee$, its dual partition $\lambda$ has $m \geq k+1$ parts proving that the RHS of Equation~\eqref{eq: ind sets and dlambda=0} is greater than or equal to the LHS.
\end{proof}
\looseness-1
The following is now straightforward. In the case that $I_h$ is abelian, the corresponding restriction on the partitions that can appear in the RHS of~\eqref{eq: decomp into Mlambda} is quite striking.
\begin{coro}\label{corollary: max sink set gives bound on lambda}
Let $h: [n] \to [n]$ be a Hessenberg function and let $c_{\lambda}$ and $c_{\lambda,i}$ be the coefficients appearing in~\eqref{eq: decomp into Mlambda}. Then $c_{\lambda}=c_{\lambda,i}=0$ for all $\lambda \vdash n$ with more than $m(\Gamma_h) = ht(I_h)+1$ parts and for all $i \geq 0$. In particular, if $I_h$ is abelian, then $c_{\lambda}=c_{\lambda,i}=0$ for all $\lambda \vdash n$ with more than $2$ parts and for all $i \geq 0$.
\end{coro}
\begin{proof}
It follows from Proposition~\ref{proposition:dlambda zero} that, under the hypotheses, $d_{\lambda}=0$ for all $\lambda$ with more than $m(\Gamma_h)$ parts. Now apply Lemma~\ref{lemma: dlambda zero implies all other zero} to $H^*(\Hess(\Ssf ,h))$. For the abelian case, if $I_h$ is non-empty then this follows from Propositions~\ref{lemma: abelian ht 1} and~\ref{lem: root subsets}. If $I_h$ is empty, then $h=(n,n,\ldots,n)$ and $\Hess(\Ssf ,h) = \Flags(\C^n)$. The corresponding graph $\Gamma_h$ has the property that every vertex is connected to every other vertex, implying that $m(\Gamma_h)=1$ and hence $c_{\lambda}=c_{\lambda,i}=0$ for all $\lambda$ with $2$ or more parts and all $i\geq 0$. Hence the conclusion holds in this case as well.
\end{proof}
We have already indicated that our strategy for proving Theorem~\ref{theorem:main} is by induction, using the association of $\Gamma_h$ with $\Gamma_{h_T} = \Gamma_h -T$ for a sink set $T$ as in Lemma~\ref{lem: induction graph}. Let $\Ssf _T$ denote any regular semisimple element in $\glfrak (n-|T|, \C)$.
It will be useful for us to know that vanishing conditions on the coefficients of the dot action representation on $H^*(\Hess(\Ssf ,h))$ imply vanishing conditions for the analogous coefficients of $H^*(\Hess(\Ssf _T,h_T))$. To state the lemma precisely we introduce some terminology. For each partition $\mu$ of $n-|T|$ we define $c_{\mu, i}^T$ (respectively $d_{\mu,i}^T$) to be the coefficient of $M^{\mu}$ (respectively $\mathcal{S}^{\mu}$) for the decomposition of the $\Symm_{n-|T|}$-representation $H^{2i}(\Hess(\Ssf _T, h_T))$ in $\Rep (\Symm_{n-|T|})$.
\begin{lemma}\label{lem: maximal sink set induction}
Let $h: [n] \to [n]$ be a Hessenberg function, and let $T \in\SK_k(\Gamma_h)$ be a sink set of $\Gamma_h$. Then
\begin{enumerate}
\item\label{lemma5.13_1} $m(\Gamma_{h_T}) \leq m(\Gamma_h)$ and
\item\label{lemma5.13_2} $c_\mu^T = 0$ and $c_{\mu,i}^T = 0$ for all $\mu \vdash (n-\lvert T \rvert)$ with more than $m(\Gamma_h)$ parts and all $i \geq 0$.
\end{enumerate}
In particular, if $I_h$ is abelian and $T \in\SK_2(\Gamma_h)$, then $c^T_{\lambda,i}=0$ for all $\lambda$ with more than $2$ parts and all $i \geq 0$.
\end{lemma}
\begin{proof}
We begin with the first claim. Since $\Gamma_{h_T}$ is by definition an induced subgraph of $\Gamma_h$, if there exists an independent set of vertices in $\Gamma_{h_T}$, then the corresponding subset is also independent in $\Gamma_h$.
It follows from Lemma~\ref{lemma: equivalency sink and independence} that $m(\Gamma_{h_T}) = \max \{ \lvert T' \rvert \mid T'\subseteq V(\Gamma_{h_T}) \textup{ is independent in } \Gamma_{h_T} \}$ so we conclude
$m(\Gamma_{h_T}) \leq m(\Gamma_h)$ as desired. The second statement follows from the first by Corollary~\ref{corollary: max sink set gives bound on lambda}.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%
\section{An inductive formula for the coefficients of the dot action}\label{section: inductive formula}
In this section, we state our main theorem, which gives an inductive formula which, in the case when $I_h$ is abelian, expresses Tymoczko's ``dot action'' representation on $H^{2i}(\Hess(\Ssf ,h))$ as a combination of trivial representations together with a sum of tabloid representations with coefficients associated to \emph{smaller} Hessenberg varieties in $\Flags(\C^{n-2})$. To illustrate this result, we give an extended example when $n=6$. We also state three technical results -- one (simple) lemma and two propositions -- and give a proof of Theorem~\ref{theorem:induction} based on these three results. Each of the Propositions below are themselves inductive formulas, and are of interest in their own right. The proofs of the two propositions are postponed to Section~\ref{section: the proofs}.
\begin{theorem}\label{theorem:induction}
Let $n$ be a positive integer and $n \geq 3$. Let $h: [n] \to [n]$ be a Hessenberg function such that $I_h$ is abelian and $i \geq 0$ be a non-negative integer. In the representation ring $\Rep (\Symm_n)$ we have the equality
\begin{equation}\label{eq:main inductive step}
H^{2i}(\Hess(\Ssf , h)) = c_{(n),i} M^{(n)} + \sum_{T \in\SK_2(\Gamma_h)} \left( \sum_{\substack{\mu \,\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T M^{(\mu_1+1,\mu_2+1)} \right).
\end{equation}
\end{theorem}
We first illustrate the theorem via an extended example.
\begin{exam}
Let $n=6$ and $h=(3,4,5,6,6,6)$ as in Example~\ref{ex: pictures}. Then $I_h$ is abelian, and $|I_h| = 6$. Thus, there are six maximum dimensional sink sets in $\SK_2(\Gamma_h)$. The graphs below show the acyclic orientation $\omega\in \mathcal{A}_2(\Gamma_h)$ such that $\asc(\omega)=\deg(T)$ for each $T\in \SK_2(\Gamma_h)$. In each case, the sink set $T$ and incident edges are highlighted in red and we display the corresponding acyclic orientation of $\Gamma_h-T\cong \Gamma_{h_T}$ on the right.
\vspace*{.15in}
\[\xymatrix{ {\color{red}1} & {2} \ar@[red][l] \ar@[red]@/^1.5pc/[rr] & 3 \ar@[red][r] \ar[l] \ar@[red]@/_1.5pc/[ll] & {\color{red}4} & {5} \ar@/_1.5pc/[ll] \ar@[red][l] & {6} \ar[l] \ar@[red]@/_1.5pc/[ll] &
1 & 2 \ar[l] & 3 \ar[l] & 4 \ar[l]
}\]
\vspace*{.05in}
\[\xymatrix{ {\color{red}1} & {2} \ar@[red][l] & 3 \ar[l] \ar@[red]@/_1.5pc/[ll] \ar@[red]@/^1.5pc/[rr] & {4} \ar@/_1.5pc/[ll] \ar@[red][r] \ar[l] & {\color{red}5} & {6} \ar@[red][l] \ar@/_1.5pc/[ll] &
1 & 2 \ar[l] & 3 \ar[l] \ar@/_1.5pc/[ll] & 4 \ar[l]
}\]
\vspace*{.05in}
\[\xymatrix{ {\color{red}1} & {2} \ar@[red][l] & 3 \ar[l] \ar@[red]@/_1.5pc/[ll] & {4} \ar[l] \ar@/_1.5pc/[ll] \ar@[red]@/^1.5pc/[rr] & { 5} \ar[l] \ar@/_1.5pc/[ll] \ar@[red][r] & {\color{red}6} &
1 & 2 \ar[l] & 3 \ar@/_1.5pc/[ll] \ar[l] & 4 \ar@/_1.5pc/[ll] \ar[l]
}\]
\vspace*{.05in}
\[\xymatrix{ {1} \ar@[red][r] & {\color{red}2} & 3 \ar@/_1.5pc/[ll] \ar@[red][l] \ar@[red]@/^1.5pc/[rr] & {4} \ar@[red]@/_1.5pc/[ll] \ar@[red][r] \ar[l] & {\color{red}5} & {6} \ar@[red][l] \ar@/_1.5pc/[ll] &
1 & 2 \ar[l] & 3 \ar[l] & 4 \ar[l]
}\]
\vspace*{.05in}
\[\xymatrix{ {1} \ar@[red][r] & {\color{red}2} & 3 \ar@[red][l] \ar@/_1.5pc/[ll] & {4} \ar[l] \ar@[red]@/_1.5pc/[ll] \ar@[red]@/^1.5pc/[rr] & { 5} \ar[l] \ar@/_1.5pc/[ll] \ar@[red][r] & {\color{red}6} &
1 & 2 \ar[l] & 3 \ar[l] & 4 \ar@/_1.5pc/[ll] \ar[l]
}\]
\vspace*{.05in}
\[\xymatrix{ {1} \ar@[red]@/^1.5pc/[rr] & {2} \ar@[red][r] \ar[l] & {\color{red}3} & {4} \ar@[red][l] \ar@/_1.5pc/[ll] \ar@[red]@/^1.5pc/[rr] & { 5} \ar[l] \ar@[red]@/_1.5pc/[ll] \ar@[red][r] & {\color{red}6} &
1 & 2 \ar[l] & 3 \ar[l] & 4 \ar[l]
}\]
Each of the graphs $\Gamma_{h_T}$ in the right column above corresponds to one of the Hessenberg functions: $(2,3,4,4)$, $(3,3,4,4)$, $(3,4,4,4)$, $(2,4,4,4)$. Since the graphs are symmetric, $\Gamma\setminus\{1,5\} \cong \Gamma\setminus\{2,6\}$ and $\Hess(\Ssf ',(3,3,4,4))\cong \Hess(\Ssf ', (2,4,4,4))$ where $\Ssf '\in \glfrak (n-2,\C)$ is a regular semisimple element. The representation $H^*(\Hess(\Ssf ', h_T))$ for each $T\in \SK_2(\Gamma_h)$ is as shown in the table below. The reader can confirm this using the graded version of Theorem~\ref{thm: irreducible coefficients}, namely~\cite[Theorem~6.3]{ShareshianWachs2016}, together with~\eqref{eq:Mlambda in terms of Slambda}.
\begin{equation*}
\renewcommand{\arraystretch}{1.2}
\begin{array}{c|l|l|l}
\textup{Hessenberg function $h_T$:} & (2,3,4,4) & (3, 3, 4, 4) & (3,4,4,4) \\ \hline
H^0(\Hess(\Ssf ', h_T)) & M^{(4)} & M^{(4)} & M^{(4)}\\
H^2(\Hess(\Ssf ', h_T)) & M^{(4)} + M^{(3,1)} + M^{(2,2)} & 2M^{(4)}+ M^{(3,1)} & 3M^{(4)}\\
H^4(\Hess(\Ssf ', h_T)) & M^{(4)}+ M^{(3,1)} + M^{(2,2)} & 2M^{(4)}+ 2 M^{(3,1)} & 4M^{(4)} + M^{(3,1)} \\
H^6(\Hess(\Ssf ', h_T))& M^{(4)} & 2M^{(4)}+ M^{(3,1)} & 4M^{(4)} + M^{(3,1)}\\
H^8(\Hess(\Ssf ', h_T))& \empty & M^{(4)} & 3M^{(4)}\\
H^{10}(\Hess(\Ssf ', h_T)) & \empty & \empty & M^{(4)}
\end{array}
\end{equation*}
Next we see that $\deg(\{1,4\}) = \deg(\{ 1,5 \}) = \deg(\{1,6\}) = 2$, $\deg(\{ 2,5 \})=\deg(\{ 2,6 \}) = 3$, and $ \deg(\{3,6\}) = 4$ from the graphs above. We now have all the information we need to compute $H^*(\Hess(\Ssf , h))$ in all degrees as the shifted sum of $M^{(\mu_1+1, \mu_2+1)}$'s where $M^{(\mu_1,\mu_2)}$ appears in the representations above. The next two tables show how to shift these representations using $\deg(T)$ in order to obtain $H^*(\Hess(\Ssf ,h))$.
\begin{equation*}
\renewcommand{\arraystretch}{1.2}
\begin{array}{c | c | c | c }
\textup{$T\in \SK_2(\Gamma_h)$:} & \{ 1, 4\} & \{ 1, 5\} & \{ 1,6 \} \\ \hline
H^2(\Hess(\Ssf ,h)) & & & \\
H^4(\Hess(\Ssf ,h)) & M^{(5,1)} & M^{(5,1)} & M^{(5,1)} \\
H^6(\Hess(\Ssf ,h)) & M^{(5,1)} \Mk + \Mk M^{(4,2)}\Mk +\Mk M^{(3,3)} & 2M^{(5,1)}+ M^{(4,2)} & 3M^{(5,1)} \\
H^8(\Hess(\Ssf ,h)) & M^{(5,1)}\Mk +\Mk M^{(4,2)}\Mk +\Mk M^{(3,3)} & 2M^{(5,1)}\Mk +\Mk 2 M^{(4,2)} & 4M^{(5,1)} \Mk +\Mk M^{(4,2)} \\
H^{10}(\Hess(\Ssf ,h)) & M^{(5,1)} & 2M^{(5,1)}+ M^{(4,2)} & 4M^{(5,1)} \Mk + \Mk M^{(4,2)} \\
H^{12}(\Hess(\Ssf ,h)) & \empty & M^{(5,1)} & 3M^{(5,1)} \\
H^{14}(\Hess(\Ssf ,h)) & \empty & \empty & M^{(5,1)} \\
H^{16}(\Hess(\Ssf ,h)) & & & \\ \hline \hline
\textup{$T\in \SK_2(\Gamma_h)$:} & \{ 2, 5 \} & \{ 3,6 \} & \{ 2,6 \}\\ \hline
H^4(\Hess(\Ssf ,h)) & & & \\
H^6(\Hess(\Ssf ,h)) & M^{(5,1)} & & M^{(5,1)} \\
H^8(\Hess(\Ssf ,h)) & M^{(5,1)}\Mk +\Mk M^{(4,2)}\Mk + \Mk M^{(3,3)} & M^{(5,1)} & 2M^{(5,1)}\Mk + \Mk M^{(4,2)} \\
H^{10}(\Hess(\Ssf ,h)) & M^{(5,1)}\Mk +\Mk M^{(4,2)} \Mk +\Mk M^{(3,3)} & M^{(5,1)} \Mk + \Mk M^{(4,2)} \Mk + \Mk M^{(3,3)} & 2M^{(5,1)}\Mk + \Mk 2M^{(4,2)} \\
H^{12}(\Hess(\Ssf ,h)) & M^{(5,1)} & M^{(5,1)}\Mk +\Mk M^{(4,2)} \Mk +\Mk M^{(3,3)} & 2M^{(5,1)} \Mk +\Mk M^{(4,2)} \\
H^{14}(\Hess(\Ssf ,h)) & \empty & M^{(5,1)} & M^{(5,1)} \\
H^{16}(\Hess(\Ssf ,h)) & \empty & \empty & \empty \\
\end{array}
\end{equation*}
For example, we get,
\[
H^{8}(\Hess(\Ssf ,h)) = c_{(6),4} M^{(6)}+11 M^{(5,1)} + 6 M^{(4,2)} + 2M^{(3,3)}.
\]
\end{exam}
As mentioned above, we prove Theorem~\ref{theorem:induction} using the following three results, recorded as a Lemma and two Propositions, each of which are themselves inductive formulas. Indeed, Lemma~\ref{lem: two-part induction} expresses the number $N_{\lambda', \lambda}$ associated to two partitions of $n$ in terms of the same value associated to two partitions of the smaller integer $n-2$. Proposition~\ref{prop: reg step} gives a formula for the Poincar\'e polynomial of $\Hess(\mathsf{N},h) \subseteq \Flags(\C^n)$ in terms of Poincar\'e polynomials of regular nilpotent Hessenberg varieties in $\Flags(\C^{n-2})$, and Proposition~\ref{prop:induction step}
is of a similar flavor.
Throughout the remainder of this section and the next, for a positive integer $n \geq 3$, we let $\mathsf{N}'$ and $\Ssf '$ denote choices of regular nilpotent and regular semisimple elements, respectively, in $\g \ell(n-2,\C)$.
\begin{lemma}\label{lem: two-part induction}
Let $n$ be a positive integer and $n \geq 3$. Let $\mu=(\mu_1, \mu_2)$ and $\mu' = (\mu'_1, \mu'_2)$ be any partitions of $n-2$ with at most $2$ parts. Then
\begin{equation}\label{eq: N equality}
\dim \left(M^{(\mu_1+1, \mu_2+1)}\right)^{\Symm_{(\mu_1'+1, \mu_2'+1)}} =
\dim \left(M^{\mu}\right)^{\Symm_{\mu'}} +1.
\end{equation}
\end{lemma}
\begin{proof}
Recall from~\eqref{eq:def Nlambdanu} and the related discussion that $N_{\mu, \mu'} = \dim (M^{\mu})^{\Symm_{\mu'}}$
and the matrix $N=(N_{\mu, \mu'})$ is symmetric. To prove the lemma it clearly suffices to prove the formula
\begin{equation}\label{eq: formula for N}
N_{(a,b),(c,d)} = b+1
\end{equation}
for any $a,b,c,d \geq 0$ integers with $a+b=c+d=k$ for a fixed positive integer $k$ and $a \geq c$, since this would imply that the LHS and RHS of~\eqref{eq: N equality} are equal, thus proving~\eqref{eq: N equality}. To prove~\eqref{eq: formula for N}, we recall that in general $N_{\mu,\mu'}$ is the number of matrices $A=(a_{ij})$ with $a_{ij} \geq 0$ integers such that $\row(A)=\mu$ and $\col(A)=\mu'$ (see~\cite[Corollary~7.12.3]{Stanley-EnumCombVol2}), where $\row(A)$ is the vector obtained from a matrix by taking row-wise sums, and $\col(A)$ is the vector obtained by taking column-wise sums,
\[
\row(A):=(r_1, r_2,\ldots) % \textup{ where } r_i=\sum_j a_{ij}
\]
where $r_i = \sum_j a_{ij}$
and
\[
\col(A):=(c_1, c_2,\ldots ) % \textup{ where } c_j=\sum_i a_{ij}.
\]
where $c_j = \sum_i a_{ij}$.
In our case, since both $(a,b)$ and $(c,d)$ have only $2$ parts, this is equal to the number of matrices
\[
\begin{pmatrix} \alpha & \beta \\ \gamma & \delta \end{pmatrix} \textup{ such that }\alpha+\beta=a, \gamma + \delta = b, \alpha + \gamma=c, \textup{ and } \beta+\delta = d.
\]
It is both straightforward to see and well-known that this is the number of ways to fill a Young diagram of shape $(a,b)$ with $c$ many $1$'s and $d$ many $2$'s, such that the rows are weakly increasing. Since there are only $2$ rows in this Young diagram, the filling is completely determined by the number of boxes in the 2nd row which contain a $1$. Since $a \geq c$, it follows that $d\geq b$, and this number of boxes is between $0$ and $b$. Thus there are precisely $b+1$ many such fillings, proving~\eqref{eq: formula for N} as desired.
\end{proof}
\begin{rema} It is also a well known fact that $\dim(M^{\lambda})^{\mu} = |\Symm_{\mu}\backslash\Symm_n/\Symm_{\lambda}|$. When both $\lambda$ and $\mu$ have two parts, there is a set of coset representatives for $\Symm_{\mu}\backslash\Symm_n/\Symm_{\lambda}$ known as \emph{bigrassmanian permutations}. These elements play an important role in the combinatorial properties of the symmetric group and in Schubert calculus.
\end{rema}
For any $\Xsf \in \glfrak (n,\C)$ and Hessenberg function $h:[n] \to [n]$, we denote by $P(\Hess(\Xsf ,h),t)$ the Poincar\'e polynomial (with variable $t$) associated to the Hessenberg variety $\Hess(\Xsf ,h)$. For the varieties considered in this paper, all Poincar\'e polynomials are concentrated in even degrees.
\begin{prop}\label{prop: reg step} Let $n$ be a positive integer and $n\geq 3$. Let $h: [n]\to [n]$ be a Hessenberg function such that $I_h$ is abelian.
Let $\mathsf{N}$ be a regular nilpotent element of $\glfrak (n,\C)$ and $\mathsf{N}'$ be a regular nilpotent element of $\glfrak (n-2, \C)$. Then
\[
P(\Hess(\mathsf{N},h), t) = \sum_{i=0}^{|\Phi_h^-|} c_{(n),i} \,t^{2i} +\sum_{T\in \SK_2(\Gamma_h)} t^{2\deg(T)} P(\Hess(\mathsf{N}', h_T), t).
\]
In particular, the $2i$-th Betti number of $\Hess(\mathsf{N},h)$ satisfies
\[
\dim H^{2i}(\Hess(\mathsf{N},h)) = c_{(n),i}+\sum_{T\in \SK_2(\Gamma_h)} \dim H^{2i-2\deg(T)}(\Hess(\mathsf{N}',h_T)).
\]
\end{prop}
\begin{prop}\label{prop:induction step} Let $n$ be a positive integer, $n\geq 3$. Let $h: [n]\to [n]$ be a Hessenberg function such that $I_h$ is abelian. Let $\Xsf _{\nu}$ be the regular element of $\glfrak (n,\C)$ associated to $\nu = (\mu_1+1, \mu_2+1) \vdash n$ and $\Xsf _{\mu}$ be a regular element of $\glfrak (n-2, \C)$ associated to $\mu = (\mu_1, \mu_2) \vdash (n-2)$. Then
\begin{equation}\label{eq:induction equation}
P(\Hess(\Xsf _{\nu}, h), t) = P(\Hess(\mathsf{N}, h), t) + \sum_{T\in \SK_2(\Gamma_h)} t^{2\deg(T)} P(\Hess(\Xsf _{\mu}, h_T), t).
\end{equation}
In particular, the $2i$-th Betti number of $\Hess(\Xsf _{\nu}, h)$ satisfies
\begin{multline*}
\dim H^{2i}(\Hess(\Xsf _{\nu},h))\\ =
\dim H^{2i}(\Hess(\mathsf{N}, h))
+ \sum_{T\in \SK_2(\Gamma_h)} \dim H^{2i-2\deg(T)} (\Hess(\Xsf _{\mu}, h_T)).
\end{multline*}
\end{prop}
Below, we give a proof of Theorem~\ref{theorem:induction} using the three results above.
The basic idea of the proof is as follows. A priori, the assertion of Theorem~\ref{theorem:induction} is an equality in the representation ring $\Rep (\Symm_n)$. We first reduce this problem to a collection of equalities of integers by taking $\Symm_\nu$-invariants for varying $\nu \vdash n$ and using Proposition~\ref{prop: Brosnan Chow}. Next, we repeatedly use Brosnan and Chow's Theorem~\ref{thm: BrosnanChow main thm} to relate these $\Symm_\nu$-invariant subspaces to the Betti numbers of other regular Hessenberg varieties. In this manner, the problem is reduced to an induction on the Poincar{\'e} polynomials of regular Hessenberg varieties.
\begin{proof}[Proof of Theorem~\ref{theorem:induction}]
Since $h$ is an abelian Hessenberg function, we know from Corollary~\ref{corollary: max sink set gives bound on lambda} that $c_{\lambda,i}=0$ for all $\lambda \vdash n$ with $3$ or more parts. In other words, by the abelian assumption, we know
\[
H^{2i}(\Hess(\Ssf ,h)) = \sum_{\substack{\lambda \vdash n \\ \lambda \textup{ has at most $2$ parts} }} c_{\lambda,i} M^\lambda.
\]
Therefore the LHS of~\eqref{eq:main inductive step}, %\eqref{theorem:induction}
can be written as a linear combination of $M^{\lambda}$'s for $\lambda$ with at most $2$ parts.
By inspection, the same is true of the RHS of~\eqref{eq:main inductive step} %\eqref{theorem:induction}.
An application of Lemma~\ref{lemma: restrict to submatrix} implies that in order to prove~\eqref{eq:main inductive step} %\eqref{theorem:induction}
it suffices to prove the equality
\begin{multline} %\label{eq: nu-fixed pts}
%\begin{split}
% &
\dim (H^{2i}(\Hess(\Ssf ,h)))^{\Symm_\nu} = c_{(n),i} \dim (M^{(n)})^{\Symm_\nu} \\
% & \quad\quad\quad\quad\quad\quad\quad\quad\quad
+ \sum_{T\in \SK_2(\Gamma_h)} \left( \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim (M^{(\mu_1+1, \mu_2+1)})^{\Symm_{\nu}}\right)
%\end{split}
\end{multline}
for all $\nu \vdash n$ with at most $2$ parts.
Note that since $M^{(n)}$ is the trivial $1$-dimensional $\Symm_n$-representation, we have $(M^{(n)})^{\Symm_\nu} = M^{(n)}$ for all $\nu \vdash n$, and in particular, $\dim (M^{(n)})^{\Symm_\nu} = 1$ for all $\nu \vdash n$. We also know from Theorem~\ref{thm: BrosnanChow main thm} that
\[
\dim (H^{2i}(\Hess(\Ssf ,h)))^{\Symm_\nu} = \dim H^{2i}(\Hess(\Xsf _{\nu},h))
\]
where $\Xsf _\nu$ denotes a regular element of $\glfrak (n,\C)$ with Jordan block sizes given by $\nu \vdash n$. It follows that it suffices to prove
\begin{multline}\label{eq: nu-fixed pts2}
\dim H^{2i} (\Hess(\Xsf _{\nu} , h))\\ = c_{(n),i}
+ \sum_{T\in \SK_2(\Gamma_h)} \left(\sum_{\substack{\mu \vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim (M^{(\mu_1+1, \mu_2+1)})^{\Symm_{\nu}}\right)
\end{multline}
for all $\nu \vdash n$ with at most two parts.
To prove~\eqref{eq: nu-fixed pts2} we take cases.
First, we consider~\eqref{eq: nu-fixed pts2} for the unique case in which $\nu \vdash n$ has only one part, namely $\nu=(n)$. In this case $\Symm_{\nu}=\Symm_n$ and $\Xsf _{\nu}$ is the $n\times n$ nilpotent matrix in Jordan form with a single Jordan block.
%regular nilpotent element $\mathsf{N}$ of $\glfrak (n,\C)$.
Recall also that $\dim(M^{(\mu_1+1,\mu_2+1)})^{\Symm_n} =1$ since the multiplicity of the trivial representation in any $M^{(\mu_1+1,\mu_2+1)}$ is $1$. Thus, we first observe that the RHS of~\eqref{eq: nu-fixed pts2} is
\begin{equation}\label{eq: (n)-case}
c_{(n),i} + \sum_{T\in \SK_2(\Gamma_h)}\; \sum_{\substack{\mu \vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T
\end{equation}
and second, to simplify~\eqref{eq: nu-fixed pts2} further, we recall that the coefficients $c^T_{\mu, i-\deg(T)}$ appearing there are associated to $H^{2i-2\deg(T)}(\Hess(\Ssf ', h_T))$ by the equality
\[
H^{2i-2\deg(T)}(\Hess(\Ssf ', h_T)) = \sum_{\mu \vdash (n-2)} c^T_{\mu, i-\deg(T)} M^{\mu}.
\]
Now the assumption that $I_h$ is abelian implies $c^T_{\mu, i-\deg(T)} = 0$ for all $\mu \,\vdash (n-2)$ with more than $2$ parts by Lemma~\ref{lem: maximal sink set induction}. Thus we have
\[
H^{2i-2\deg(T)}(\Hess(\Ssf ', h_T)) = \sum_{\substack{\mu \vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c^T_{\mu, i-\deg(T)} M^\mu
\]
and taking $\Symm_{n-2}$-invariants we obtain
\begin{align*}
\dim H^{2i-2\deg(T)}(\Hess(\mathsf{N}', h_T)) &= \dim (H^{2i-2\deg(T)}(\Hess(\Ssf ', h_T)))^{\Symm_{n-2}}\\
&= \sum_{\substack{\mu \vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c^T_{\mu, i-\deg(T)}
\end{align*}
\looseness-1
where the first equality follows from Theorem~\ref{thm: BrosnanChow main thm}.
Therefore~\eqref{eq: (n)-case} can be rewritten as
\begin{equation*}\label{eq: nu-fixed pts trivial piece}
c_{(n),i} + \sum_{T \in\SK_2(\Gamma_h)} \dim H^{2i-2\deg(T)}(\Hess(\mathsf{N}', h_T))
\end{equation*}
and now~\eqref{eq: nu-fixed pts2} follows for the case $\nu=(n)$ and $\Xsf _\nu=\mathsf{N}$ by Proposition~\ref{prop: reg step}.
Next, we consider the case in which $\nu = (\mu'_1+1, \mu_2'+1)\vdash n$ for some $\mu'=(\mu_1', \mu_2') \vdash (n-2)$, \ie the case in which $\nu$ has exactly two parts. Using an argument similar to the above, the RHS of~\eqref{eq: nu-fixed pts2} for $\nu=(\mu_1'+1,\mu_2'+1) \vdash n$ can be expressed as
\begin{multline*}
c_{(n),i} + \sum_{T\in \SK_2(\Gamma_h)} \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim(M^{(\mu_1+1, \mu_2+1)})^{\Symm_{(\mu_1'+1,\mu_2'+1)}} \\
= c_{(n),i} + \sum_{T\in \SK_2(\Gamma_h)} \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \left( \dim (M^{\mu})^{\Symm_{\mu'}} + 1 \right)
\end{multline*}
by Lemma~\ref{lem: two-part induction}. Now the above equation becomes:
\begin{multline*}
c_{(n),i} + \sum_{T\in \SK_2(\Gamma_h)} \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T + \sum_{T\in \SK_2(\Gamma_h)} \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim (M^{\mu})^{\Symm_{\mu'}}\\
= \dim(H^{2i}(\Hess(\mathsf{N}, h))) + \sum_{T\in \SK_2(\Gamma_h)} \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim (M^{\mu})^{\Symm_{\mu'}} \\
\end{multline*}
\looseness-1
where in the last expression, both $\mu=(\mu_1, \mu_2)$ and $\mu'=(\mu_1', \mu_2')$ are partitions of $n-2$, and the last equality follows from the case $\nu=(n)$ proven above.
Since $\nu = (\mu_1'+1, \mu_2'+2)$, it follows from Proposition~\ref{prop:induction step} that to prove~\eqref{eq: nu-fixed pts2} it is enough to prove
\begin{equation}\label{eq: two-parts}
\dim H^{2i-2\deg(T)} (\Hess(\Xsf _{\mu'}, h_T)) = \sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim (M^{\mu})^{\Symm_{\mu'}}
\end{equation}
for each $T\in \SK_2(\Gamma_h)$. To see this, recall that the coefficients $c_{\mu, i-\deg(T)}^T$ are defined by the equality
\[
H^{2i - 2\deg(T)}(\Hess(\Ssf ', h_T)) = \sum_{\mu\vdash (n-2)} c^T_{\mu, i-\deg(T)} M^\mu
\]
in $\Rep (\Symm_n)$. Moreover, as in the argument above, since $I_h$ is abelian we know from Lemma~\ref{lem: maximal sink set induction} that $c^T_{\mu, i-\deg(T)} = 0$ for all $\mu \vdash (n-2)$ with more than $2$ parts. This observation, together with taking $\Symm_{\mu'}$-invariants for $\mu'=(\mu_1',\mu_2') \vdash (n-2)$, yields
\begin{equation}\label{eq: mu-fixed points n-2}
\dim (H^{2i-2\deg(T)}(\Hess(\Ssf ', h_T)))^{\Symm_{\mu'}} =
\sum_{\substack{\mu\vdash (n-2)\\ \mu=(\mu_1,\mu_2)}} c_{\mu, i-\deg(T)}^T \dim(M^\mu)^{\Symm_{\mu'}}.
\end{equation}
Now another application of Theorem~\ref{thm: BrosnanChow main thm} on the LHS of~\eqref{eq: mu-fixed points n-2} yields the equality in~\eqref{eq: two-parts} as desired. Hence~\eqref{eq: nu-fixed pts2} holds for all $\nu$ with at most $2$ parts, concluding the proof.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%
\section{Proofs of Propositions~\ref{prop: reg step} and~\ref{prop:induction step} and the abelian graded Stanley--Stembridge conjecture}\label{section: the proofs}
\looseness-1
In this section, we prove the two main inductive propositions from the previous section. The arguments involve the combinatorics of $\Symm_n$ and root systems. Given all of the preparation in the previous sections, the arguments are lengthy but not particularly difficult. \defg{Throughout this section we work in the setting of Propositions~\ref{prop: reg step} and~\ref{prop:induction step} and Theorem~%\ref{theorem:main}
\ref{theorem:induction}. Thus we always assume that $n \geq 3$, that $h:[n]\to [n]$ is a Hessenberg function such that $I_h$ is abelian, and that any partition has at most two parts.}
\subsection{The proof of Proposition~\ref{prop: reg step}}
We begin with a proof of Proposition~\ref{prop: reg step}. This is much simpler than the proof of Proposition~\ref{prop:induction step}, which occupies the bulk of this section, due to the fact that the cohomology of the regular nilpotent Hessenberg variety $\Hess(\mathsf{N},h)$ is related to the subspace of $H^*(\Hess(\Ssf ,h))$ which is invariant under the entire group $\Symm_n$, as opposed to a Young subgroup $\Symm_\nu$ for some $\nu \vdash n$. The fact that $\dim(M^\lambda)^{\Symm_n}=1$ for any partition $\lambda$ then allows us to use Theorem~\ref{thm: Stanley} to translate our problem into the language of acyclic orientations. Our ``sink-set decomposition''~\eqref{eq: sink set decomposition}, and the inductive description of acyclic orientations given in Proposition~\ref{proposition: max sink set induction}, then yields the result. We now make this sketch precise.
\begin{proof}[Proof of Proposition~\ref{prop: reg step}]
We begin by observing that
\[
P(\Hess(\mathsf{N},h), t) = \sum_{i=0}^{\lvert \Phi_h^- \rvert} \dim H^{2i}(\Hess(\mathsf{N},h)) t^{2i} =
\sum_{i=0}^{\lvert \Phi_h^- \rvert} \dim (H^{2i}(\Hess(\Ssf ,h)))^{\Symm_n}t^{2i}
\]
where the first equality is the definition of the Poincar\'e polynomial, together with the fact that
$\dim_{\C}(\Hess(\mathsf{N},h)) = \lvert \Phi_h^- \rvert$~\cite[Corollary~2.7]{Precup2015}, and the second equality is by Brosnan and Chow's
Theorem~\ref{thm: BrosnanChow main thm}.
Since $H^{2i}(\Hess(\Ssf ,h)) = \sum_{\nu\, \vdash n} c_{\nu,i} M^\nu$ by definition of the coefficients $c_{\nu,i}$, by taking $\Symm_n$-invariants we obtain
\begin{equation*}
\begin{aligned}
P(\Hess(\mathsf{N},h),t)
& = \sum_{i=0}^{\lvert \Phi_h^- \rvert} \left( \sum_{\nu \vdash n} c_{\nu,i} \dim(M^\nu)^{\Symm_n} \right) t^{2i} \\
& = \sum_{i=0}^{\lvert \Phi_h^- \rvert} \left( \sum_{\nu \vdash n} c_{\nu,i} \right) t^{2i} \;\;
\textup{ since $\dim (M^{\nu})^{\Symm_n} = 1$ } \\
& = \sum_{i=0}^{\lvert \Phi_h^- \rvert} \left( \sum_{\substack{\substack{\nu\vdash n \textup{ and } \nu \\ \textup{ has at most $2$ parts}}}} c_{\nu,i} \right) t^{2i} \;\; \textup{ by Lemma~\ref{lem: maximal sink set induction} }\\
%since $I_h$ is abelian} \\
& = \sum_{i=0}^{\lvert \Phi_h^- \rvert} c_{(n),i} t^{2i} + \sum_{i=0}^{\lvert \Phi_h^- \rvert} \left( \sum_{\substack{\substack{\nu\vdash n \textup{ and } \nu \\ \textup{has $2$ parts}}}} c_{\nu,i} \right) t^{2i}.
\end{aligned}
\end{equation*}
A similar argument yields
\[
P(\Hess(\mathsf{N}', h_T),t) = \sum_{i=0}^{\lvert \Phi_{h_T}^- \rvert} \left( \sum_{\mu \vdash (n-2)} c^T_{\mu, i} \right) t^{2i}
\]
for any $T \in\SK_2(\Gamma_h)$. The above equalities imply that in order to prove the proposition it suffices to prove
\begin{equation}\label{eq: triv case0}
\sum_{i=0}^{|\Phi_h^-|} \left( \sum_{\substack{\nu\vdash n \textup{ and } \nu \\ \textup{has $2$ parts}}} c_{\nu,i} \right) \,t^{2i} = \sum_{T\in \SK_2(\Gamma_h)} t^{2\deg(T)}
\sum_{i=0}^{\lvert \Phi_{h_T}^- \rvert} \left( \sum_{\mu \vdash (n-2)} c^T_{\mu, i} \right) t^{2i}.
\end{equation}
Applying Theorem~\ref{thm: Stanley} and our previous combinatorial analysis of acyclic orientations we have
\begin{equation}\label{eq: triv case1}
\sum_{i=0}^{|\Phi_h^-|} \left( \sum_{\substack{\nu\vdash n \textup{ and } \nu \\ \textup{has $2$ parts}}} c_{\nu,i}\,\right) \, t^{2i} = \sum_{i=0}^{|\Phi_h^-|} | \{ \omega \in \mathcal{A}_2(\Gamma_h) \mid \asc(\omega)=i \}|\,t^{2i}
\end{equation}
By our sink-set decomposition from~\eqref{eq: sink set decomposition} and an application of Proposition~\ref{proposition: max sink set induction}, the above equation becomes:
\begin{multline}\label{eq: triv case1b}
%\begin{aligned}
\sum_{T\in \SK_2(\Gamma_h)} \sum_{i=0}^{|\Phi_h^-|} |\{\omega\in \mathcal{A}_2(\Gamma_h) \mid \asc(\omega)=i \textup{ and } \sk(\omega)=T\} |\,t^{2i} \\
%&\quad\quad\quad\quad\quad\quad\quad\quad\quad\textup{by our sink-set decomposition from~\eqref{eq: sink set decomposition}} \\
= \sum_{T\in \SK_2(\Gamma_h)} \sum_{i=\deg(T)}^{|\Phi_h^-|} |\{\omega_T \in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega_T)
%\\
=i-\deg(T)\} |\,t^{2i}\\
%%%%&\quad\quad\quad\quad\quad\quad\quad\quad\quad\textup{ by Proposition~\ref{proposition: max sink set induction}, } \\
%\end{aligned}
\end{multline}
where the sum over the index $i$ ranges between $\deg(T)$ and $\lvert \Phi_h^- \rvert$ because it follows from Proposition~\ref{proposition: max sink set induction} that if $\sk(\omega)=T$ then $\asc(\omega)\geq \deg(T)$.
For each $T\in \SK_2(\Gamma_h)$ we shift the index $i$ of the sum appearing on the RHS of~\eqref{eq: triv case1b} to get
\begin{multline}\label{eq: triv case2}
\sum_{i=\deg(T)}^{|\Phi_h^-|} |\{\omega_T \in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega_T) =i-\deg(T)\} |\,t^{2i}\\
\begin{aligned}
&= t^{2\deg(T)} \sum_{i=0}^{|\Phi_h^-|-\deg(T)} |\{ \omega_T\in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega_T) = i \}|\,t^{2i} \\
&= t^{2\deg(T)} \sum_{i=0}^{|\Phi_{h_T}^-|} |\{ \omega_T\in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega_T) = i \}|\,t^{2i}
\end{aligned}
\end{multline}
where the last equality follows from that fact that $|\Phi_h^-|-\deg(T) \geq |\Phi_{h_T}^-|$ by Lemma~\ref{lem: deg(T) property1} and $|\{ \omega\in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega) = i \}|=0$ for all $i>|\Phi_{h_T}^-|$ since $\asc(\omega_T)\leq |E(\Gamma_{h_T})|=|\Phi_{h_T}^-|$ for all $\omega_T\in \mathcal{A}(\Gamma_{h_T})$.
Putting together Corollary~\ref{corollary: sum clambda for all i} with the above equation~\eqref{eq: triv case2} we obtain
\begin{multline}\label{eq: triv case3}
%\begin{aligned}
\sum_{i=\deg(T)}^{|\Phi_h^-|} |\{\omega_T \in \mathcal{A}(\Gamma_{h_T}) \mid \asc(\omega_T)
=i-\deg(T)\} |\,t^{2i}\\
= t^{2\deg(T)} \sum_{i=0}^{|\Phi^-_{h_T}|} \left( \sum_{\mu \vdash (n-2)} c_{\mu,i}^T\,\right) \, t^{2i}
%\end{aligned}
\end{multline}
for each $T\in \SK_2(\Gamma_h)$. Finally, Equations~\eqref{eq: triv case1},~\eqref{eq: triv case1b}, and~\eqref{eq: triv case3} together imply~\eqref{eq: triv case0} as desired.
\end{proof}
%%%%%%%%%%%%%%%%%%%%%
\subsection{Proof of Proposition~\ref{prop:induction step}} \label{sec: main proofs}
In this section we prove Proposition~\ref{prop:induction step}. This argument is the technical heart of this paper and is rather involved, so a sketch of the overall picture may be helpful. Our starting point is the explicit and purely combinatorial formula for the Betti numbers $b_{2i}$ of the regular Hessenberg variety $\Hess(\Xsf _{\nu}, h)$ given by the second author in~\cite{Precup2016} which expresses $b_{2i}$ as the number of permutations $w\in \Symm_n$ satisfying certain conditions related to $\nu \vdash n$ and $h$. Our assumptions that $I_h$ is abelian and that all partitions have at most $2$ parts simplifies the combinatorics of the Poincar\'e polynomial. From there, the remainder of the argument is a careful analysis of the sets of permutations in question, which boils down to the combinatorics of $\Symm_n$ and the root system of type A. There are two points worth mentioning. First, it turns out to be important that the formula for the Poincar\'e polynomial in~\cite{Precup2016} is valid for any two-part composition $n=\nu_1+\nu_2$ where $\nu=(\nu_1,\nu_2)$ is not necessarily a partition, \ie we may have $\nu_1 < \nu_2$ instead of the more customary $\nu_1 \geq \nu_2$. Accordingly, in this section, the standing hypotheses on $\nu=(\nu_1, \nu_2)$ are as follows:
\[
\nu_1, \nu_2 \in \Z, \quad \nu_1+\nu_2=n, \quad \nu_1 \geq 0, \nu_2 \geq 0.
\]
Allowing this level of generality allows us to prove an important special case in our arguments below. Secondly, in order to reduce the argument to the special case mentioned in the previous sentence, we make use of a set of shortest coset representatives (also used by the second author in~\cite{Precup2016}) for the right cosets $W\backslash \Symm_n$ where $W\subseteq \Symm_n$ is a certain Young subgroup of $\Symm_n$.
To begin, we state the formula for the Betti numbers of regular Hessenberg varieties given in~\cite{Precup2016}. We prepare some terminology. For each $w\in \Symm_n$ we define the \defg{inversion set of $w$} as
\[
N^-(w):= \{ \gamma\in \Phi^- \mid w(\gamma)\in \Phi^+ \},
\]
\ie for each $w\in \Symm_n$, the set $N^-(w)$ consists of the negative roots which become positive under the action of $w$. In Lie type A this can be expressed quite concretely. Indeed, let $\gamma=t_i-t_j$ for some $i>j$. The action of $\Symm_n$ on roots is given by
\[
w(t_i - t_j) = t_{w(i)} - t_{w(j)}.
\]
Thus $\gamma\in N^-(w)$ if and only if $w(i)j \textup{ and } w(i)j ,\; w(i)b$ and $a>h(b)$. If $w \in \mathcal{D}_{\nu}(\beta)$ then by definition of $\mathcal{D}_{\nu}(\beta)$ we must have
\[
w^{-1}(t_{\nu_1}-t_{\nu_1+1}) = t_{w^{-1}(\nu_1)} - t_{w^{-1}(\nu_1+1)} = t_a - t_b,
\]
or equivalently
\begin{equation}\label{eq:wa and wb}
w(a)=\nu_1 \textup{ and } w(b)=\nu_1+1,
\end{equation}
so the $b$-th entry in the one-line notation of $w$ is $\nu_1+1$ and the $a$-th entry is $\nu_1$.
In the arguments that follow it will be useful to choose a specific element of $\mathcal{D}_\nu(\beta)$ for each $\beta \in I_h$. We define this element as follows.
\begin{defi} \label{def: w-nu-beta}
Suppose $\beta=t_a-t_b \in I_h$. We define a permutation in $\Symm_n$, denoted $w_{\nu, \beta}$, by:
\begin{enumerate}
\item\label{defi7.3_1} $w_{\nu,\beta}(a)=\nu_1$ and $w_{\nu,\beta}(b)=\nu_1+1$ (\ie $w_{\nu,\beta}$ satisfies the condition~\eqref{eq:wa and wb}) and
\item\label{defi7.3_2} the remaining entries in the one-line notation of $w_{\nu,\beta}$ list the integers $[n] \setminus \{\nu_1, \nu_1+1\}$ in increasing order from left to right.
\end{enumerate}
\end{defi}
\begin{exam} \label{ex: w-nu-beta example}
Let $n=6$ and $\beta= t_5-t_2$, so $a=5$ and $b=2$. Let $\nu=(4,2)$. Then $w_{\nu,\beta}(2)=\nu_1+1=5$ and $w_{\nu,\beta}(5)=\nu_1=4$, and the remaining entries are filled, in increasing order, by $[6] \setminus \{4,5\} = \{1,2,3,6\}$. The one-line notation of $w_{\nu,\beta}$ is
$[
\begin{matrix}
1 & \mathbf{5} & 2 & 3 & \mathbf{4} & 6
\end{matrix}]$
where condition~\eqref{eq:wa and wb} determines the entries in bold.
\end{exam}
We need the following.
\begin{lemma} \label{lem: wmin satisfies the Hess. condition}
Let $w_{\nu,\beta}$ be as above and suppose $I_h$ is abelian. Then
\begin{enumerate}
\item\label{lemma7.5_1} If $(i,j)\in \inv(w_{\nu,\beta})$ then $\{ i,j \} \cap \{ a,b \}\neq \emptyset$, and
\item\label{lemma7.5_2} $w_{\nu,\beta} \in \mathcal{D}_{\nu}(\beta)$.
\end{enumerate}
\end{lemma}
\begin{proof}
To prove~\eqref{lemma7.5_1}, we will show the contrapositive, \ie if $\{ i,j \}\cap \{a,b\} = \emptyset $ then $(i,j)\notin \inv(w_{\nu,\beta})$. Suppose $(i,j)$ is such that $i>j$ and $\{i,j\}\cap \{a,b\} = \emptyset$. Since $\{i,j\}\cap \{a,b\} = \emptyset$ we have $\{ w_{\nu,\beta}(i), w_{\nu,\beta}(j) \} \cap \{\nu_1, \nu_1+1\} =\emptyset$, and it follows that $w_{\nu, \beta}(i)>w_{\nu, \beta}(j)$ by condition~\eqref{defi7.3_2} in Definition~\ref{def: w-nu-beta}. Therefore $(i,j) \notin \inv(w_{\nu, \beta})$.
Now we prove~\eqref{lemma7.5_2}. By definition, $w_{\nu, \beta}^{-1}(\alpha_{\nu})=\beta$ so we need only show that $w_{\nu,\beta}^{-1}(J_{\nu}) \subseteq \Phi_h$. We take cases. First consider the case in which $\alpha\in J_{\nu}$ and $\alpha+\alpha_{\nu}\in \Phi$, \ie $\alpha$ and $\alpha_\nu$ correspond to adjacent vertices in the Dynkin diagram. Seeking a contradiction, suppose $w^{-1}_{\nu, \beta}(\alpha)\in I_h$. Since $\alpha+\alpha_\nu \in \Phi$ we also have
$w_{\nu,\beta}^{-1}(\alpha + \alpha_\nu) = w_{\nu,\beta}^{-1}(\alpha) + w_{\nu,\beta}^{-1}(\alpha_\nu) \in I_h$ since $I_h$ is an ideal. On the other hand, this is a contradiction since $I_h$ is abelian.
Next, consider the case in which $\alpha+\alpha_{\nu}\notin \Phi$, \ie $\alpha$ and $\alpha_\nu$ are not adjacent in the Dynkin diagram. This means that $\alpha=t_{i}-t_{i+1}$ where $\{ i,i+1 \}\cap \{ \nu_1, \nu_1+1 \}= \emptyset$. Condition~\eqref{defi7.3_2} in Definition~\ref{def: w-nu-beta} implies
$w_{\nu, \beta}^{-1}(i) \nu_1+1 \end{array}\right. .
\]
\end{enumerate}
\end{defi}
Note that $\sigma_{\nu}$ is uniquely determined by the value of $\nu_1$.
\begin{exam}\label{ex: sigma-nu translation}
Using the same set-up as in Examples~\ref{ex: w to tau correspondence} and~\ref{ex: Phi to Phi[T] correspondence}, recall that $n=5$, $\nu=(3,2)$, and $\beta=t_5-t_2$. Since $\nu_1=3$ and $\nu_1+1=4$ we get
\[
\sigma_{\nu} = \left[
\begin{matrix}
3 & 4 & \mathbf{1} & \mathbf{2} & 5
\end{matrix}
\right]
\]
where condition~\eqref{defi7.14_1} in Definition~\ref{def: sigma-nu} determines the entries in bold and condition~\eqref{defi7.14_2} determines the rest. Consider the translation $w\mapsto \sigma_{\nu}w$ for each $w\in \mathcal{D}_{\nu}(\beta)$, displayed in the table below.
\begin{equation*}
\begin{array} {c|c}
w\in \mathcal{D}_{\nu}(\beta) & \sigma_{\nu}w \in \Symm_5 \\ \hline
\vrule height4mm depth0pt width0pt
\left[
\begin{matrix}
1& \mathbf{4} & 2 & 5 & \mathbf{3}
\end{matrix}
\right] & \left[
\begin{matrix}
3& \mathbf{2} & 4 & 5 & \mathbf{1}
\end{matrix}
\right] \\[2pt]
\left[
\begin{matrix}
2& \mathbf{4} & 1 & 5 & \mathbf{3}
\end{matrix}
\right] & \left[
\begin{matrix}
4& \mathbf{2} & 3 & 5 & \mathbf{1}
\end{matrix}
\right] \\[2pt]
\left[
\begin{matrix}
1& \mathbf{4} & 5 & 2 & \mathbf{3}
\end{matrix}
\right] &\left[
\begin{matrix}
3& \mathbf{2} & 5 & 4 & \mathbf{1}
\end{matrix}
\right] \\[2pt]
\left[
\begin{matrix}
5& \mathbf{4} & 1 & 2 & \mathbf{3}
\end{matrix}
\right] & \left[
\begin{matrix}
5& \mathbf{2} & 3 & 4 & \mathbf{1}
\end{matrix}
\right]\\[2pt]
\left[
\begin{matrix}
5& \mathbf{4} & 2 & 1 & \mathbf{3}
\end{matrix}
\right] &\left[
\begin{matrix}
5& \mathbf{2} & 4 & 3 & \mathbf{1}
\end{matrix}
\right]
\end{array}
\end{equation*}
Note that translation by $\sigma_{\nu}$ sends $4\mapsto 2$ and $3\mapsto 1$ in the one-line notation for $w$, but the rest of the entries of $w$ remain in the same relative order in the one-line notation for $\sigma_{\nu}w$ as they were in the one-line notation of $w$.
\end{exam}
The next lemma shows that translating the set $\mathcal{D}_{\nu}(\beta)$ by $\sigma_{\nu}$ does not change the inversions of $w$ that are also elements of $\Phi^-[T]$.
\begin{lemma} \label{lem: sigma-nu translation}
Suppose $\sigma_{\nu}$ is defined as above. For all $w\in \mathcal{D}_{\nu}(\beta)$ we have
\[
N^-(\sigma_\nu w) \cap \Phi^-[T] = N^-(w) \cap \Phi^-[T].
\]
\end{lemma}
\begin{proof}
Recall that $\Phi^-[T] = \{ t_i - t_j\in \Phi^- \mid \{i,j\}\cap \{a,b\} = \emptyset \}$. By definition, $w(a)=\nu_1$ and $w(b) = \nu_1+1$ so $\sigma_{\nu}w(a) = 1$ and $\sigma_{\nu}w(b)=2$. Thus the $a$-th and $b$-th entry of $\sigma_{\nu}w$ in one-line notation is determined. By Condition~\eqref{defi7.14_2} in Definition~\ref{def: sigma-nu}, $\sigma_{\nu}$ preserves the relative order of the values in the one-line notation of $w$ which are not in positions $a$ or $b$. It follows that for $(i,j)$ with $\{i,j\} \cap \{a,b\} = \emptyset$ and $i>j$, we have $(i,j) \in \inv(w)$ if and only if $(i,j) \in \inv(\sigma_\nu w)$.
\end{proof}
The next lemma relates the grading computation for $\sigma_\nu w$ to the grading computation for $\tau$, up to a translation by $\deg(T)$. This explains why it is useful to introduce the translation by $\sigma_{\nu}$. One of the key points in the proof is that the LHS of~\eqref{eq:shift by degT} can be related to the edges of $\Gamma_h$ which contribute to the computation of $\deg(T)$.
\begin{lemma} \label{lem: degree shifts}
Let $w\in \mathcal{D}_{\nu}(\beta)$ for some $\beta= \beta_T\in I_h$ corresponding to $T\in \SK_2(\Gamma_h)$. Let $w=w_{\nu, \beta}\tau$ be the decomposition of $w$ given in Lemma~\ref{claim: Stab map} for a unique $\tau\in \Stab(a,b)$. Then
\begin{equation}\label{eq:shift by degT}
|N^-(\sigma_{\nu}w)\cap \Phi_h^-| = \deg(T) + |N^-(\tau)\cap \Phi_h^-[T]|.
\end{equation}
\end{lemma}
\begin{proof} Since $\Phi^- = (\Phi^- \setminus \Phi^-[T]) \sqcup \Phi^-[T]$ we also have
\[
\Phi_h^- = (\Phi_h^- \cap (\Phi^- \setminus \Phi^-[T])) \sqcup (\Phi_h^- \cap \Phi^-[T]).
\]
Since $\Phi_h^- \cap \Phi^-[T]$ is the set $\Phi_h^-[T]$ by definition, we conclude
\[
|N^-(\sigma_{\nu}w)\cap \Phi_h^-| = |N^-(\sigma_{\nu}w) \cap \Phi_h^-\cap(\Phi^- \setminus \Phi^-[T])| + |N^-(\sigma_{\nu}w)\cap \Phi_h^-[T]|.
\]
Hence to prove~\eqref{eq:shift by degT}
it suffices to prove that
\begin{equation}\label{eq: eq1}
|N^-(\sigma_{\nu}w) \cap \Phi_h^-\cap(\Phi^- \setminus \Phi^-[T])| = \deg(T)
\end{equation}
and
\begin{equation}\label{eq: eq2}
N^-(\sigma_{\nu}w)\cap \Phi_h^-[T] = N^-(\tau)\cap \Phi_h^-[T].
\end{equation}
We first prove~\eqref{eq: eq1}. By definition, $\Phi^- \setminus \Phi^-[T] = \{ t_i - t_j \in \Phi^- \mid \{i,j\}\cap \{a,b\}\neq \emptyset \}$. Since $w \in \mathcal{D}_{\nu}(\beta)$, we know $w(a)=\nu_1$ and $w(b) = \nu_1+1$ by~\eqref{eq:wa and wb}, and by construction of $\sigma_\nu$ this implies $\sigma_{\nu} w (a)= 1$ and $\sigma_{\nu} w(b) = 2$. It follows that $1$ is in the $a$-th position of the one-line notation for $\sigma_{\nu} w$ and $2$ is in the $b$-th position. Using the identification $N^-(\sigma_\nu w) \cong \inv (\sigma_\nu w)$, we obtain
\begin{equation*}
N^-(\sigma_{\nu}w) \cap (\Phi^- \setminus \Phi^-[T]) = \{ (b,j) \mid 1\leq j < b \} \cup \{ (a,j) \mid 1\leq j < a \}
\end{equation*}
and therefore
\begin{multline*}
N^-(\sigma_{\nu}w)\cap \Phi_h^- \cap (\Phi^- \setminus \Phi^-[T])
\\
= \{ (b,j) \mid 1\leq j < b \textup{ and } b \leq h(j) \} \cup
%\\
\{ (a,j) \mid 1\leq j < a \textup{ and } a\leq h(j) \}.
\end{multline*}
Since $T=\{a,b\}$, the elements in the sets above correspond to edges of $\Gamma_h$ that are incident to the vertices in $T$ and which must be oriented to the right in order for $a$ and $b$ to be sinks. Thus,~\eqref{eq: eq1} now follows immediately from Lemma~\ref{lem: deg(T) property1}.
Next, in order to prove~\eqref{eq: eq2} we note that $N^-(\sigma_\nu w) \cap \Phi^-[T] = N^-(w) \cap \Phi^-[T]$ by Lemma~\ref{lem: sigma-nu translation}. Intersecting both sides with $\Phi_h^-$ we obtain
\begin{equation}\label{eq:intersect with PhihT}
N^-(\sigma_\nu w) \cap \Phi_h^-[T] = N^-(w) \cap \Phi_h^-[T].
\end{equation}
Next we claim
\begin{equation}\label{eq:w and tau}
N^-(w) \cap \Phi_h^-[T] = N^-(\tau) \cap \Phi_h^-[T].
\end{equation}
As in the argument above, to see this it suffices to prove $N^-(w) \cap \Phi^-[T] = N^-(\tau) \cap \Phi^-[T]$, since~\eqref{eq:w and tau} follows by intersecting both sides with $\Phi_h^-$.
Suppose $t_i-t_j \in N^-(w) \cap \Phi^-[T]$. We wish to show $t_i-t_j \in N^-(\tau) \cap \Phi^-[T]$. By assumption we know $i>j$ and $\{i,j\} \cap \{a,b\} = \emptyset$ and $w_{\nu,\beta} \tau(i)=w(i) < w(j) =w_{\nu,\beta} \tau(j)$. Suppose in order to obtain a contradiction that $\tau(i) > \tau(j)$. Then $(\tau(i), \tau(j)) \in \inv(w_{\nu,\beta})$ by the above. Moreover, since $\tau \in \Stab(a,b)$ and $\{i,j\} \cap \{a,b\} = \emptyset$, we have $\{\tau(i), \tau(j)\} \cap \{a,b\} = \emptyset$ also. This contradicts part~\eqref{lemma7.5_1} of Lemma~\ref{lem: wmin satisfies the Hess. condition}. Thus $\tau(i) < \tau(j)$, or equivalently $t_i-t_j \in N^-(\tau) \cap \Phi^-[T]$ as desired. Conversely, suppose $t_i-t_j \in N^-(\tau) \cap \Phi^-[T]$.
Then $\tau(i)<\tau(j)$ and $\{\tau(i), \tau(j)\} \cap \{(a,b)\}= \emptyset$ and $w_{\nu, \beta}\tau(i)**