%~Mouliné par MaN_auto v.0.21.1 2018-12-14 10:15:11
\documentclass[ALCO, ThmDefs, Unicode]{cedram}
\OneNumberAllTheorems
%\usepackage{url}
%\usepackage{graphicx}
%\usepackage{amsmath}
%\usepackage{amssymb}
%\usepackage{pgf}
\usepackage{tikz}
%\usepackage[all]{xy}
%\usepackage{enumitem}
%\usepackage[margin=3.5cm]{geometry}
%\usepackage{amsthm}
%\usepackage{todonotes}
\usepackage{mathrsfs}
%\usepackage{bbm}
\DeclareMathOperator{\id}{id}
\DeclareMathOperator{\Inv}{Inv}
\DeclareMathOperator{\BoundedPairs}{BoundedPairs}
\DeclareMathOperator{\rpp}{rpp}
\newcommand{\lessthan}{\prec}
\newcommand{\MacdonaldMap}{M}
\newcommand{\TransitionMap}{T_{\pi}}
\newcommand{\BoundedPairTransMap}{BT_{\pi}}
\newcommand{\RT}{RT_{\pi}}
\newcommand{\Bpoly}{\mathfrak{F}}
\newcommand{\SpecializedBpoly}{f}
\DeclareMathOperator{\defect}{Defect}
\DeclareMathOperator{\comaj}{comaj}
\newcommand{\codes}{\mathcal{C}}
\newcommand{\given}{\ : \ }
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\Tset}{\mathcal{U}}
\newcommand{\Xset}{\mathcal{X}}
\newcommand{\Yset}{\mathcal{Y}}
\newcommand{\bfa}{\mathbf{a}}
\newcommand{\bfb}{\mathbf{b}}
\newcommand{\deleted}{\mathsf{deleted}}
\newcommand{\bumped}{\mathsf{bumped}}
\newcommand{\outcome}{\mathsf{outcome}}
\newcommand{\dvalue}{k}
\newcommand{\rp}{\mathcal{RP}}
\newcommand{\cd}{cD}
\newcommand{\Push}{\mathscr{P}}
\newcommand{\Bump}{\mathscr{B}}
\newcommand{\Delete}{\mathscr{D}}
\newcommand{\Insert}{\mathscr{I}}
\newcommand{\q}{\mathfrak{q}}
%%%%%%%%%%%%%%
\newcommand{\Input}{\textbf{Input}}
\newcommand{\Output}{\textbf{Output}}
%%%%%%%%%%%%%%%%
\setlength{\unitlength}{0.06em}
\newlength{\cellsize} \setlength{\cellsize}{18\unitlength}
\newsavebox{\cell}
\sbox{\cell}{
\begin{picture}(18,18)
\put(0,0){\line(1,0){18}}
\put(0,0){\line(0,1){18}}
\put(18,0){\line(0,1){18}}
\put(0,18){\line(1,0){18}}
\end{picture}}
\newcommand\cellify[1]{
\newcommand*\thearg{[1]}
\newcommand*\nothing{}
\ifx\thearg\nothing
\vrule width0pt height\cellsize depth0pt\else
\hbox to 0pt{\usebox{\cell} \hss}\fi
\vbox to \cellsize{
\vss
\hbox to \cellsize{\hss$#1$\hss}
\vss}}
%\newcommand\tableau[1]{\vtop{\let\\\\ \baselineskip -16000pt \lineskiplimit 16000pt \lineskip 0pt\ialign{&\cellify{##}\\#1\crcr}}}
\DeclareSymbolFont{bbsymbol}{U}{bbold}{m}{n}
\DeclareMathSymbol{\Fact}{\mathbin}{bbsymbol}{"21}
%\newcommand{\Fact}{\boldsymbol{!\!!}}
\newcommand{\elbows}{\mathbin{\text{\rotatebox[origin=c]{45}{$\asymp$}}}}
\newenvironment{open}{\begin{enonce}{Open Problem}}{\end{enonce}}
\newenvironment{algorithm}{\begin{enonce}{Algorithm}}{\end{enonce}}
\graphicspath{{./figures/}}
\newcommand*{\mk}{\mkern -1mu}
\newcommand*{\Mk}{\mkern -2mu}
\newcommand*{\mK}{\mkern 1mu}
\newcommand*{\MK}{\mkern 2mu}
\hypersetup{urlcolor=purple, linkcolor=blue, citecolor=red}
\newcommand*{\romanenumi}{\renewcommand*{\theenumi}{\roman{enumi}}}
\newcommand*{\Romanenumi}{\renewcommand*{\theenumi}{\Roman{enumi}}}
\newcommand*{\alphenumi}{\renewcommand*{\theenumi}{\alph{enumi}}}
\newcommand*{\Alphenumi}{\renewcommand*{\theenumi}{\Alph{enumi}}}
\title{A bijective proof of Macdonald's reduced word formula}
\author{\firstname{Sara} \middlename{C.} \lastname{Billey}}
\address{Department of Mathematics, University of Washington, Box 354350, Seattle, WA 98195, USA}
\thanks {Billey was partially supported by grant DMS-1101017 from the NSF} \email{billey@math.washington.edu}
\author{\firstname{Alexander} \middlename{E.} \lastname{Holroyd}}
\address{Microsoft Research, 1 Microsoft Way, Redmond, WA 98052, USA}
\email{holroyd@math.ubc.com}
\author{\firstname{Benjamin} \middlename{J.} \lastname{Young}}
\address{Department of Mathematics, 1222 University of Oregon, Eugene, OR 97403, USA}
\email{bjy@uoregon.edu}
\begin{abstract}
We give a bijective proof of Macdonald's reduced word identity using pipe dreams and Little's bumping algorithm. This proof extends to a principal specialization due to Fomin and Stanley. Such a proof has been sought for over 20 years. Our bijective tools also allow us to solve a problem posed by Fomin and Kirillov from 1997 using work of Wachs, Lenart, Serrano and Stump. These results extend earlier work by the third author on a Markov process for reduced words of the longest permutation.
\end{abstract}
\begin{document}
%\newtheorem{open}{Open Problem}
\maketitle
\section{Introduction}\label{s:intro}
Macdonald gave a remarkable formula connecting a weighted sum of reduced words for a permutation $\pi$ with the number of terms in a Schubert polynomial $\mathfrak{S}_\pi(x_1,\ldots, x_n)$. For a permutation $\pi\in S_n$, let $\ell(\pi)$ be its inversion number and let $R(\pi)$ denote the set of its reduced words. (See Section~2 for definitions.)
\begin{theo}[Macdonald {\cite[(6.11)]{M2}}] \label{t:macdonald}
Given a permutation $\pi \in S_n$ with $\ell(\pi)=p$, one has
\begin{equation}\label{e:macdonald.formula}
\sum_{(a_1,a_2,\ldots, a_p) \in R(\pi)} a_1\cdot a_2 \cdots a_p \ = p! \
\mathfrak{S}_\pi(1,\ldots, 1).
\end{equation}
\end{theo}
For example, the permutation $[3,2,1] \in S_3$ has 2 reduced words, $R([3,2,1])=\{(1,2,1), (2,1,2)\}$. The inversion number is $\ell([3,2,1])=3$, and the Schubert polynomial $\mathfrak{S}_{\pi}(x_1,x_2,x_3)$ is the single term $x_1^2x_2$. We observe that Macdonald's formula holds: $1\cdot 2\cdot 1 + 2 \cdot 1 \cdot 2 =3!\cdot 1$.
In this paper, we give a bijective proof of Theorem~\ref{t:macdonald}. Such a proof has been sought for over 20 years. It has been listed as an open problem in both~\cite{Fomin-Kirillov} and~\cite{Stanley.perms}. Fomin and Sagan have stated that they have a bijective proof, but that it is unpublished due to its complicated nature; see~\cite{Fomin-Kirillov}. Moreover, we give several generalizations as discussed below. Our proof builds on the work of the third author on a Markov process on reduced words for the longest permutation~\cite{BY2014}.
The Schubert polynomial $\mathfrak{S}_\pi$ can be expressed as a sum over \emph{reduced pipe dreams} (or \emph{RC graphs}) corresponding to $\pi$, and its evaluation at $(1,\ldots,1)$ is simply the number of such pipe dreams. (See Section~2 for definitions, and~\cite{LS1,BJS,Fomin-Stanley,FK,billey-bergeron} for history and proofs.) Thus, the right side of~\eqref{e:macdonald.formula} is the number of pairs $(\mathbf{c},D)$, where $\mathbf{c}=(c_1,\ldots, c_p)$ is a word with $1 \leq c_i \leq i$ for each $i$, and $D$ is a pipe dream for $\pi$. A word $c$ with this property is sometimes called a \emph{sub-staircase word}. The left side is the number of pairs $(\mathbf{a},\mathbf{b})$ where $\mathbf{a}\in R(\pi)$ and $\mathbf{b}$ is a word satisfying $1\leq b_i\leq a_i$ for each $i=1,\ldots,p$. Our bijection is between pairs $(\mathbf{a},\mathbf{b})$ and $(\mathbf{c},D)$ that satisfy these conditions. The bijection and its inverse are presented in the form of explicit algorithms. Moreover, both maps are uniform over the permutation $\pi$ in the sense that they have natural descriptions that explicitly involve only $(\mathbf{a},\mathbf{b})$ (respectively, $(\mathbf{c},D)$), and not $\pi$ (although of course $\pi$ can be recovered from $\mathbf{a}$ or $D$). Indeed, if we interpret permutations $\pi\in S_n$ as permutations of $\Z$ that fix all but finitely many elements, then our maps do not even explicitly involve $n$.
The outline of the bijection is quite simple given some well-known properties of Schubert polynomials, together with the \emph{bumping algorithm} for reduced words. The bumping algorithm is an important tool for studying reduced words, originally introduced and developed by Little~\cite{little2003combinatorial} and further studied by Garsia in~\cite{saga}. These properties and objects will be defined in Section~\ref{s:background}.
In the first step, we give a modification of Little's bumping algorithm that also acts on pipe dreams, and use it to give a bijective interpretation to the Lascoux--Schützenberger transition equation for Schubert polynomials. Essentially the same construction has been given by Buch~\cite[p.~11]{Knutson.2012}. The key idea is to iteratively apply the corresponding transition map to $D$ until we reach the empty pipe dream, while recording a sequence of instructions that encode which inversions/insertions are needed in order to reverse the process. We call the resulting sequence a transition chain, denoted $Y(D)$.
\begin{figure}
\begin{center}
\includegraphics[width=\textwidth]{chain2.pdf}
\caption{An example of the bijection $\MacdonaldMap$ for $\pi=[1,4,3,2]$ where the pair $(\mathbf{a}, \mathbf{b})$ is mapped to $(\mathbf{c}, D)$ with $\mathbf{a} =(2,3,2)$, $\mathbf{b}=(2, 1, 2)$, $\mathbf{c}=(1,1,2)$, and $D$ is the pipe dream in the top left corner of the picture. Its transition chain is $Y(D)=((1,3), (2,2),(2,2),(1,1))$. Each vertical pair in the picture is also demonstrating the bijection for a different permutation; note the permutations on the wires agree on the vertical pairs.}\label{fig:bijection}
\end{center}
\end{figure}
Next we apply the bumping algorithm on reduced words and their wiring diagrams, using the reverse of the transition chain $Y(D)$ to provide instructions. The word $\mathbf{c}$ tells us where to insert new crossings; when adding the $i$th new crossing it should become the $c_i$th entry in the word. The height of the added crossing is determined by placing its feet on the wire specified by the corresponding step of the transition chain. Each new crossing is immediately pushed higher in value, initiating a Little bump. The result is a reduced wiring diagram for $\pi$ corresponding to a reduced word $\mathbf{a}=(a_1,a_2,\ldots, a_p)$. If we keep track of how many times each column is pushed in the bumping processes, we obtain a word $\mathbf{b}=(b_1,\ldots,b_p)$ of the same length such that $a_i \geq b_i$ for all $i$, as required. See Figure~\ref{fig:bijection} for an illustration of the algorithm. It turns out that each step is reversible.
Our bijective proof extends to a $\q$-analog of~\eqref{e:macdonald.formula} that was conjectured by Macdonald and subsequently proved by Fomin and Stanley. To state this formula, let $\q$ be a formal variable. Define the $\q$-analog of a positive integer $k$ to be $[k]=[k]_{\q}:=1+\q+\q^2+\cdots + \q^{k-1}$. The $\q$-analog of the factorial $k!$ is defined to be $[k]\Fact=[k]_\q\Fact := [k][k-1]\cdots [1]$. (We use the blackboard bold symbol $\Fact$ to distinguish it from the ordinary factorial, and the symbol $\q$ for the formal variable to avoid notation conflicts.) For $\mathbf{a}=(a_1,a_2,\ldots, a_p)
\in R(\pi)$, define the co-major index to be the sum of the ascent locations:
\[
\comaj(\mathbf{a}) := \sum_{\substack{1 \leq i < p: \\ a_i < a_{i+1}}} i.
\]
%\begin{samepage}
\begin{theo}[Fomin and Stanley {\cite{Fomin-Stanley}}] \label{t:macdonald.q.analog}
Given a permutation $\pi \in S_n$ with $\ell(\pi)=p$, one has
\begin{equation}\label{e:fomin.stanley.formula}
\sum_{\mathbf{a}=(a_1,a_2,\ldots, a_p) \in R(\pi)} [a_1]\cdot [a_2] \cdots [a_p]\ \q^{\comaj(\mathbf{a})} \ = {[p]\Fact} \, \mathfrak{S}_\pi(1,\q,\q^2,\ldots, \q^{n-1}).
\end{equation}
\end{theo}
%\end{samepage}
Continuing with the example $\pi=[3,2,1]$, we observe that the $\q$-analog formula indeed holds: $[1]\cdot [2]\cdot [1]\q + [2] \cdot [1] \cdot [2]\q^2 = (1+\q)\q + (1+ \q)^2\q^2 = (1+\q+\q^2)(1+\q)\q = [3]{\Fact} \cdot \mathfrak{S}_{[3,2,1]} (1,\q,\q^2) $.
In 1997, Fomin and Kirillov published a further extension to Theorem~\ref{t:macdonald.q.analog}. They interpreted the right side of the formula in terms of reverse plane partitions, and asked for a bijective proof. See Theorem~\ref{t:fomin.kirillov.2}. Using our methods together with results of Lenart~\cite{Lenart.2004}, and Serrano and Stump~\cite{Serrano.Stump.FPSAC,Serrano.Stump.2012}, we provide a bijective proof.
We want to comment briefly on how the bijections in this paper were found. We were fully aware of Little's bumping algorithm so we hoped it would play a role. Many details of the exact formulation we describe here were found through extensive experimentation by hand and by computer. Experimentally, we found the transition chains to be the key link between a bounded pair and its image under $M$. As the proof was written up, we chose to suppress the dependence on the transition chains in favor of clearer descriptions of the maps.
The outline of the paper is as follows. In Section~\ref{s:background}, we give the notation and background information on reduced words, Schubert polynomials, Little bumps, etc. The key tool for our bijection comes from the Transition Equation for Schubert polynomials. We give a bijective proof of this equation in Section~\ref{s:trans}. In Section~\ref{s:trans.bounded.pairs}, we extend the Transition Equation to bounded pairs, by which we mean pairs $(\bfa,\bfb)$ satisfying $1\leq b_{i}\leq a_{i}$, as discussed above. In Section~\ref{s:bij.proof}, we spell out the main bijection proving Theorem~\ref{t:macdonald}. The principal specialization of Macdonald's formula given in Theorem~\ref{t:macdonald.q.analog} is described in Section~\ref{s:specialization}, along with some interesting properties of the co-major index on reduced words. In Section~\ref{s:fk}, we discuss the Fomin--Kirillov theorems and how our bijection is related to them. Finally, in Section~\ref{s:future} we discuss some intriguing open problems and other formulas related to Macdonald's formula.
\eject
\section{Background}\label{s:background}
\subsection{Permutations}\label{ss:permutations}
We recall some basic notation and definitions relating to permutations which are standard in the theory of Schubert polynomials. We refer the reader to~\cite{LS1,M2,manivel-book} for more information.
Let $S_n$ be the symmetric group of all permutations $\pi=[\pi(1),\ldots,\pi(n)]$ of $\{1,\ldots,n\}$. An \emph{inversion} of $\pi\in S_n$ is an ordered pair $(i,j)$, such that $ i < j $ and $\pi(i) > \pi(j)$. The \emph{length} $\ell(\pi)$ is the number of inversions of $\pi$. We write $t_{ij}$ for the transposition which swaps $i$ and $j$, and we write $s_i = t_{i, i+1}$ $(1 \leq i \leq n-1)$. The $s_i$ are called \emph{simple transpositions}; they generate $S_n$ as a Coxeter group. Composition of permutations is defined via $\pi\tau(i):=\pi(\tau(i))$.
An alternate notation for a permutation $\pi \in S_n$ is its
\emph{Lehmer code}, or simply its \emph{code}, which is the $n$-tuple
\[
(L(\pi)_1, L(\pi)_2, \ldots, L(\pi)_n)
\]
where $L(\pi)_i$ denotes the number of inversions $(i,j)$ with first coordinate $i$. Note, $0 \leq L(\pi)_i \leq n-i$ for all $1 \leq i \leq n$. The permutation $\pi$ is said to be
\emph{dominant} if its code is a weakly decreasing sequence.
\subsection{Reduced words}\label{ss:reduced words}
A \emph{word} is a $k$-tuple of integers. The \emph{ascent set} of a word $\mathbf{a}=(a_1, \ldots, a_{k})$ is $\{i: a_{i}\pi(j)$. This occurs if and only if $(i,j)$ is an inversion of $\pi$, which in turn is equivalent to the wire labels $(\pi(j),\pi(i))$ being an inversion of $\pi^{-1}$. Many of the arguments below depend on the positions of the inversions for $\pi$ not for $\pi^{-1}$. Reversing any word for $\pi$ gives a word for $\pi^{-1}$. Thus, if we label the wires $1,2,3,\ldots$ in increasing order down the right side of a wiring diagram instead of the left, then the corresponding wires travel right to left, and appear in the order $\pi^{-1}$ down the left side. Thus, the $i$-wire and the $j$-wire cross in the \emph{right-labeled wiring diagram} for $\bfa \in R(w)$ if and only if $(i,j)$ is an inversion of $\pi$.
The wiring diagrams shown on the first row of Figure~\ref{fig:bijection} are all right-labeled wiring diagrams. For example, the word $(1,3,2)$ corresponding to the second wiring diagram from the left is a reduced word for the permutation $[2,4,1,3]=[3,1,4,2]^{-1}$.
\subsection{Bounded bumping algorithm}\label{ss:little algorithm}
Little's bumping algorithm~\cite{little2003combinatorial}, also known as a ``Little bump'', is a map on reduced words. It was introduced to study the decomposition of Stanley symmetric functions into Schur functions in a bijective way. Later, the Little algorithm was found to be related to the Robinson--Schensted--Knuth map~\cite{little2} and the Edelman--Greene map~\cite{hamaker-young}; it has been extended to signed permutations~\cite{billey-hamaker-roberts-young}, affine permutations~\cite{lam-shimozono}, and the subset of involutions in $S_{n}$~\cite{Hamaker-Marberg-Pawlowski.2016}. The key building block of our bijective proofs is an enhancement of Little's algorithm which we call the bounded bumping algorithm. We describe it below, after setting up notation.
\begin{defi}
Let $\mathbf{a} = (a_1, \ldots, a_k)$ be a word. Define the \emph{decrement-push}, \emph{increment-push}, \emph{deletion} and \emph{insertion} of $\mathbf{a}$ at column $t$, respectively, to be
\begin{align*}
\Push^-_t \bfa &= (a_1, \ldots, a_{t-1}, a_t-1, a_{t+1}, \ldots, a_k); \\
\Push^+_t \bfa &= (a_1, \ldots, a_{t-1}, a_t+1, a_{t+1}, \ldots, a_k); \\
\Delete_t \bfa &= (a_1, \ldots, a_{t-1}, a_{t+1}, \ldots, a_k); \\
\Insert_t^{x} \bfa &= (a_1, \ldots, a_{t-1},x, a_{t}, \ldots, a_k).
\end{align*}
\end{defi}
In~\cite{BY2014}, the notation $\Push^{\uparrow}$ was used to represent $\Push^-$, and $\Push^{\downarrow}$ was used to represent $\Push^+$ based on the direction of a crossing in the wiring diagram.
\begin{defi}
Let $\mathbf{a}$ be a word. If $\Delete_t \bfa$ is reduced, then we say that $\mathbf{a}$ is \emph{nearly reduced at $t$}.
\end{defi}
The term ``nearly reduced'' was coined by Lam~\cite[Chapter~3]{LLMSSZ}, who uses ``$t$-marked nearly reduced''. Words that are nearly reduced at $t$ may or may not also be reduced; however, every reduced word $\bfa$ is nearly reduced at some index $t$. For instance, a reduced word $\bfa$ of length $k$ is nearly reduced at $1$ and at $k$.
In order to define the bounded bumping algorithm, we need the following lemma, which to our knowledge first appeared in~\cite[Lemma~4]{little2003combinatorial}, and was later generalized to arbitrary Coxeter systems by Lam and Shimozono using the strong exchange property. The statement can also be checked for permutations by considering the wiring diagram.
\begin{lemm}[{\cite[Lemma~21]{lam-shimozono}}]\label{lem:nearly_reduced}
If $\bfa$ is not reduced, but is nearly reduced at $t$, then $\bfa$ is nearly reduced at exactly one other column $t' \neq t$. In the wiring diagram of $\bfa$, the two wires crossing in column $t$ cross in exactly one other column $t'$.
\end{lemm}
\begin{defi}
In the situation of Lemma~\ref{lem:nearly_reduced}, we say that $t'$ \emph{forms a defect with} $t$ in $\bfa$, and write $
\defect_t(\bfa) = t'$.
\end{defi}
\begin{figure}
\centering
\includegraphics[width=1.65in]{bump1.pdf}
\includegraphics[width=1.65in]{bump2.pdf}
\includegraphics[width=1.65in]{bump3.pdf} \\[.2in]
\includegraphics[width=1.65in]{bump4.pdf}
\includegraphics[width=1.65in]{bump5.pdf}
\includegraphics[width=1.65in]{bump6.pdf}
\caption{An example of the sequence of wiring diagrams for the words $\bfa'$ which appear when running the bounded bumping algorithm on input $\bfa =(4, 3, 5, 6, 4, 3, 5), \ \bfb = (2,2,2,2,2,2,2),\ t_0=4,
\text{ and } \epsilon=-.$ The arrows indicate which crossing will move in the next step. After the first step, row $7$ contains a wire with no swaps, which is therefore not shown.}
\label{fig:little.bump}
\end{figure}
A crucial point is that the definitions of ``reduced'', ``nearly reduced'', and the $
\defect$ map make sense even if we are given only the word $\bfa$, but not the corresponding permutation $\pi\in S_n$, nor even its size $n$. Indeed, we can take $n$ to be any integer greater than the largest element of $\bfa$; it is easily seen that the three notions coincide for all such $n$. An alternative, equivalent viewpoint is to interpret all our permutations as permutations of $\Z^+:=\{1,2,\ldots\}$ that fix all but finitely many elements; we can abbreviate such a permutation $\pi=[\pi(1),\pi(2),\ldots]$ to $\pi=[\pi(1),\ldots,\pi(n)]$ where $n$ is any integer such that all elements greater than $n$ are fixed. Let $S_{\infty}$ be the set of all such permutations on $\Z^{+}$.
Our central tool is a modification of the bumping algorithm introduced by Little in~\cite{little2003combinatorial}. We call our modified version the \emph{bounded} bumping algorithm. This algorithm will be used twice in the proof of Theorem~\ref{t:macdonald}, in two different contexts.
\begin{defi}
A word $\bfb$ is a \emph{bounded word} for another word $\bfa$ if the words have the same length and $1\leq b_i\leq a_i$ for all $i$. A \emph{bounded pair} (for a permutation $\pi$) is an ordered pair $(\bfa,\bfb)$ such that $\bfa$ is a reduced word (for $\pi$) and $\bfb$ is a bounded word for $\bfa$. Let $\BoundedPairs(\pi)$ be the set of all bounded pairs for $\pi$.
\end{defi}
For example, for the simple transposition $s_k$, the set is
\[
\BoundedPairs(s_k) = \bigl\{\bigl((k),(i)\bigr): 1\leq i \leq k\bigr\}.
\]
\begin{algorithm}[Bounded Bumping Algorithm]\label{algorithm:little bump}
\
\noindent \Input: $(\bfa, \bfb, t_0, \epsilon)$, where $\bfa$ is a word that is nearly reduced at $t_0$, and $\bfb$ is a bounded word for $\bfa$, and $\epsilon \in \{-, +\}=\{-1,+1 \}$ is a direction.
\noindent \Output: $\Bump^{
\epsilon}_{t_0}(\bfa, \bfb) = (\bfa', \bfb', i, j, \outcome)$, where $\bfa'$ is a reduced word, $\bfb'$ is a bounded word for $\bfa'$, $i$ is the row and $j$ is the column of the last crossing pushed in the algorithm, and $\outcome$ is a binary indicator explained below.
\begin{enumerate}
\item Initialize $\bfa'\leftarrow \bfa,\, \bfb'
\leftarrow \bfb, \, t \leftarrow t_0$.
\item Push in direction $\epsilon$ at column $t$, i.e. set $\bfa' \leftarrow
\Push^{\epsilon}_{t}\bfa'$ and $\bfb' \leftarrow
\Push^{\epsilon}_{t}\bfb'$.
\item If $b'_t = 0$, return $(\Delete_t \bfa', \Delete_t
\mathbf{b}', \bfa'_{t}, t, \deleted)$ and
\textbf{stop}.
\item If $\bfa'$ is reduced, return $(\bfa', \bfb',
\bfa'_{t}, t, \bumped)$ and \textbf{stop}.
\item Set $t \leftarrow
\defect_t(\bfa')$ and
\textbf{return to step~2.}
\end{enumerate}
\end{algorithm}
The principal difference between the above algorithm and Little's map $\theta_r$ in~\cite{little2003combinatorial} is the presence of the bounded word $\bfb$, which indicates the number of times each column is allowed to be decremented before being deleted. The stopping rule in step~3 is not present in Little's algorithm. As discussed above, one consequence is that when $\epsilon=-$, our map can never output a word containing a $0$: if a push results in a $0$ then it is immediately deleted and the algorithm stops. In contrast, in Little's original algorithm, the entire word is instead shifted by $+1$ in this situation, changing the permutation (and also stopping, since the word is reduced). Indeed, Little's bumping algorithm in the $+1$ direction on a reduced word $\bfa$ maps to $\bfa'$ if and only if
\[
\Bump^{+}_j(\mathbf{a}, \mathbf{b}) = (\mathbf{a}',\mathbf{b}', i,j,
\bumped)
\]
regardless of the choice of bounded word $\bfb$ for $\bfa$.
Since this algorithm is the main tool used in the paper, we will give several examples. In Figure~\ref{fig:little.bump}, we show the sequence of wiring diagrams for the words $\bfa'$ in the algorithm when it is run on the input
\[
\bfa =(4, 3, 5, 6, 4, 3, 5), \ \bfb = (2,2,2,2,2,2,2),\ t_0=4, \text{ and } \epsilon=-.
\]
The result is
\[
\Bump^{-}_{4}(\bfa, \bfb) =
\bigl((3, 2, 4, 5, 4, 3, 4), (1,1,1,1,2,2,1),2,2, \bumped\bigl).
\]
Note that Little's bumping algorithm maps $(4, 3, 5, 6, 4, 3, 5)$ to $(3, 2, 4, 5, 4, 3, 4)$ using the exact same sequence of pushes as in Figure~\ref{fig:little.bump} as expected since $\outcome=\bumped$.
On the other hand, with input $\widetilde{\mathbf{b}} = (2,2,2,2,2,2,1)$ the bounded bumping algorithm stops after the third push in the sequence because $\widetilde{b}_7=1$, so
\[
\Bump^{-}_{4}(\bfa, \widetilde{\mathbf{b}}) =
\bigl((4, 3, 4, 5, 4, 3), (2,2,1,1,2,2), 4, 7, \deleted\bigr).
\]
Another good example for the reader to consider is when the input word $\bfa$ is a consecutive sequence such as
\[
\Bump^{-}_{1}\bigl((6,5,4,3), (3,3,3,3)\bigr)=\bigl((5,4,3,2),
(2,2,2,2),2,4,\bumped\bigr).
\]
We now make some remarks about this algorithm. The initial input word $\bfa$ may or may not be reduced, but, if we reach step~$5$ then $\bfa'$ is always not reduced but nearly reduced at $t$, so the $\defect$ map makes sense.
Suppose that the input word $\bfa$ is a word for a permutation $\pi\in S_n$. Pushes may in general result in words with elements outside the interval $[1,n-1]$. Specifically, in the case $\epsilon=+$, step~2 may result in a word $\bfa'$ with an element $a_t'=n$. As mentioned above, this can be interpreted as a word for a permutation in $S_{n+1}$. In fact, in this case the algorithm will immediately stop at step~4, since this new word is necessarily reduced. On the other hand, in the case $\epsilon=-$, if step~2 ever results in a word with $a_t'=0$, we must have $b_t'=0$ as well, so the algorithm will immediately stop at step~3, and the $0$ will be deleted. Note that it is also possible for a non-zero element of $\bfa$ to be deleted at step~3, since $b_{i}'\pi(s)$. Note that $r$ is the position of the largest descent in $\pi$, and $s$ is the largest value such that $\pi(r) > \pi(s)$. If $\bfa$ is a reduced word for $\pi$, then there exists a unique column $t_0$ containing the $\{r,s\}$-wire crossing in the right-labeled wiring diagram for $\bfa$. One can easily verify that $\ell(\pi t_{rs})=\ell(\pi) -1$, and hence $\bfa$ is nearly reduced in column $t_0$. The original proof due to Lascoux and Schützenberger~\cite{LS2} uses Monk's formula for computing products of Schubert classes in the cohomology ring of the flag manifold. See also~\cite[4.16]{M2}. We give a bijective proof using pipe dreams in the next section.
%\begin{samepage}
\begin{theo}[Transition Equation for Schubert polynomials \cite{LS2}] \label{t:transitionA}
For all permutations $\pi$ with $\pi \neq \id$, the Schubert polynomial $\mathfrak{S}_{\pi}$ is determined by the recurrence
\begin{equation}\label{e:transA}
\mathfrak{S}_{\pi}= x_{r}\mathfrak{S}_{\nu} + \sum_{\substack{ q0$, the number of bounded pairs satisfies the following recursive formula:
\begin{equation}\label{e:transB}
\Bpoly_{\pi}(\mathbf{1})= p\ \Bpoly_{\nu}(\mathbf{1}) \ + \sum_{\substack{ q0$.
\begin{algorithm}[Bounded Transition]\label{algorithm:bt}
Suppose $\pi \neq \id$ is given, and let $(r,s)$ and $\nu$ be defined as in Theorem~\ref{t:transitionBP}.
\noindent \Input: A bounded pair $(\mathbf{a}, \mathbf{b})$ for $\pi$.
\noindent \Output: $\BoundedPairTransMap(\mathbf{a},\mathbf{b})=((\mathbf{a}',
\bfb'), \dvalue)\in \Xset(\pi)$.
\begin{enumerate}
\item Let $t_{0}$ be the unique column containing the $\{r,s\}$-wire crossing in the right-labeled wiring diagram for $\bfa$.
\item Compute $\Bump^{-}_{t_0}(\mathbf{a},\mathbf{b}) = (\bfa',\bfb',i, j, \outcome)$.
\item If $\outcome=\deleted$, then $j$ is the last crossing pushed in the bounded bumping algorithm before being deleted so $j \in [1,p]$. Return $((\mathbf{a}', \bfb'), j)$ and \textbf{stop}. By Proposition~\ref{t:little}$\MK$\eqref{pr2.7item:3}, we know $(\mathbf{a}',
\bfb')$ is a bounded pair for $\nu$.
\item If $\outcome=\bumped$, return $((\mathbf{a}', \bfb'), 0)$ and
\textbf{stop}. Proposition~\ref{t:little}$\MK$\eqref{pr2.7item:2} shows that one of the wires crossing in column $j$ of the right-labeled wiring diagram for $\mathbf{a}'$ is the $r$-wire, the other is labeled by some $q|\TransitionMap(D)|
\\ (\mathbf{c}, \TransitionMap(D),0) & |D|=|\TransitionMap(D)|,
\end{cases}
\]
where $\widehat{\mathbf{c}}:=(c_1,\ldots,c_{p-1})$.
%\noindent
Since $\TransitionMap$ is a bijection, $\RT$ is a bijection with well defined inverse $\RT^{-1}:
\Yset(\pi) \longrightarrow \codes(\pi) \times \rp(\pi)$.
The map $\MacdonaldMap_\pi: \BoundedPairs(\pi) \longrightarrow
\codes(\pi) \times \rp(\pi)$ can be written as the composition of three bijections, $\MacdonaldMap_\pi = \RT^{-1} \circ (\MacdonaldMap
\times \id) \circ \BoundedPairTransMap$, hence is itself a bijection. This concludes the induction.
\end{proof}
Observe from the proof above, we can show by induction that $M_{\pi}^{-1} = \BoundedPairTransMap ^{-1}\circ (\MacdonaldMap^{-1} \times\id) \circ \RT$. Thus, we can write out the algorithm for $M^{-1}$ analogously with Algorithm~\ref{algorithm:mac}.
\begin{coro}
The inverse of $M$ is given by Algorithm~\ref{algorithm:inverse.mac}.
\end{coro}
\begin{algorithm}[Inverse Macdonald Map]\label{algorithm:inverse.mac}
\
\noindent \Input: $(\mathbf{c}, D)$ where $D$ is a reduced pipe dream for some permutation $\pi$ and $\mathbf{c}=(c_1,c_2,\ldots, c_p)$ is a sub-staircase word of length $p =\ell(\pi)$.
\noindent \Output: $\MacdonaldMap^{-1}(\mathbf{c},D)= (\mathbf{a},\mathbf{b})$, a bounded pair for $\pi$.
\begin{enumerate}
\item If $\pi$ is the identity, then we must have $\mathbf{c}=()$ and $D =\{\}$. Return $(\mathbf{a}, \mathbf{b}) =((),())$ and \textbf{stop}.
\item Compute $\TransitionMap(D)=D' \in \Tset(\pi)$. Say $D'$ is a reduced pipe dream for $\nu t_{q,r}$ where $(r,s)$ is the lex largest inversion for $\pi$, $\nu = \pi t_{r,s}$ and $1 \leq q
\leq r$. If $q0$, the polynomials $\SpecializedBpoly_{\pi}(\q)$ satisfy the following recursive formula:
\begin{equation}\label{e:trans.qanalog}
\SpecializedBpoly_{\pi}(\q)= (1+\q+\cdots + \q^{p-1}) \q^{r-1} \SpecializedBpoly_{\nu}(\q) \ + \sum_{\substack{ qj$, then $\comaj(\mathbf{a}_{p+1}^j)-\comaj(\mathbf{a})=0$.
Next, consider the relative order of $a_{p}, j, k $ and how it affects the comaj difference word. The possible orders correspond with the 6 permutations in $S_3$. For instance, if $ja_{1}+1.
\end{cases}
\]
Thus, all 4 cases for $p=1$ satisfy the statements in the lemma.
Assume the lemma holds by induction for all reduced words up to length $p\geq 1$. We will show it holds for all reduced words of length $p+1$. Let
\[
\mathbf{a'}=(a_{1},\ldots,a_{p},k)
\]
be a reduced word extending $\mathbf{a}$. Thus, $h^j_i(\mathbf{a'})= h^j_i(\mathbf{a})$ for $1\leq i \leq p+1$. Let
\[
h=h^j_{p+1}(\mathbf{a})=s_{a_{p}}\cdots s_{a_{1}}(j),
\]
then $h^j_{p+2}(\mathbf{a'}) = s_k(h)$.
Our goal is to compute the augmented comaj differences $v^j_i(\mathbf{a}')$ for $1\leq i\leq p+2$. We will treat the three cases $1\leq i\leq p$, $i=p+1$ and $i=p+2$ separately.
First consider the case $1 \leq i \leq p$. The bounded bumping algorithm preserves the ascent set of a word by Proposition~\ref{t:little}$\MK$\eqref{pr2.7item:4}, so one can observe
that
\[
\comaj(y_i^j(\mathbf{a})) =
\comaj(a_1,a_2,\ldots,a_{i-1},h^j_i(\mathbf{a})-1/2,a_i,\ldots,
a_p).
\]
This fact will allow us to compute the augmented comaj differences
without knowing the exact sequence of pushes required in the bounded bumping algorithm. Using the above observation,
\begin{align*}
\comaj(y_i^j(\mathbf{a'})) =&\comaj(y_i^j(\mathbf{a}))+ (p+1)\cdot \delta,
\end{align*}
while
\[
\comaj(\mathbf{a'}) = \comaj(\mathbf{a}) + p\cdot \delta,
\]
where
\[
\delta:=
\begin{cases}
1,&a_pk\geq h\\
h-1 & a_pk\geq h$, the interval is $[h,h+p]$, and in the other two cases the interval is $[h-1,h+p-1]$.
Finally, consider the case $i=p+2$. Again, the value $v_{p+2}^j(\mathbf{a'})$ is straightforward to calculate from the definition of the augmented comaj vector given $k$ and the fact that $s_{k}(h)=h^j_{p+2}(\mathbf{a'})$ mentioned above:
\[
v_{p+2}^j(\mathbf{a'}) =
\begin{cases}
s_{k}(h)-1 & k\geq s_k(h)\\
p+s_{k}(h) & k < s_k(h).
\end{cases}
\]
Thus, $v_{p+2}^j(\mathbf{a'})$ will be an extreme value in the interval $[s_{k}(h) -1, s_{k}(h) +p]$ as required for the lemma. All that remains to prove the lemma is to ascertain how $v_{p+2}^j(\mathbf{a'})$ relates to $[h,h+p]$ when $a_{p}>k\geq h$ or $[h-1,h+p-1]$ otherwise. This again breaks into cases depending on if $s_{k}(h)=h,h-1,h+1$. We leave this straightforward verification to the reader.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{t:transitionBP.qanalog}]
As mentioned in the introduction to this section, we will show that the bijection $\BoundedPairTransMap$ from Algorithm~\ref{algorithm:bt} preserves the $\q$-weight in the following sense. Assume $\BoundedPairTransMap(\mathbf{a},\mathbf{b}) = ((\mathbf{e},\mathbf{f}), \dvalue) \in \Xset(\pi)$. Let $j = \pi(r)$ so that $h_{p+1}^j(\bfa)=r$. We will show that the combined weight defined in~\eqref{e:combined.weight} satisfies
\begin{equation}\label{e:q.weights}
\q^{(\mathbf{a},\mathbf{b})} =
\begin{cases}
\q^{v_\dvalue^j(\mathbf{e})}
\q^{(\mathbf{e},\mathbf{f})} & \dvalue >0 \\
\q^{(\mathbf{e},\mathbf{f})} & \dvalue = 0.
\end{cases}
\end{equation}
Once this is complete, we know from Lemma~\ref{l:aug.comaj.diff} that $v^j(\bfa)$ is a permutation of $[r-1,r+p-1]$. Hence, Theorem~\ref{t:transitionBP.qanalog} follows by a straightforward verification.
Every $((\mathbf{e},\mathbf{f}), \dvalue) \in \Xset(\pi)$ corresponds with a pair $q\leq r$ such that $(\mathbf{e},\mathbf{f})$ is a bounded pair for $\nu t_{q,r}$. Recall, $\dvalue =0$ if and only if $q0$, $(\mathbf{e},\mathbf{f})$ is a bounded pair for $\nu$. The computation for $\BoundedPairTransMap(\mathbf{a},\mathbf{b})$ removed a letter from column $\dvalue$ on the last step. The crossing removed had its right foot on the wire labeled $r$ in the right-labeled diagram for $\mathbf{e}$, or equivalently the wire labeled $j=\pi(r)$ when the diagram is labeled increasing along the left side. Therefore, the row of the removed crossing is $h_\dvalue^j(\mathbf{e})-1$. We also must have $y_\dvalue^j(\mathbf{e}) = \mathbf{a}$ by definition of the $y_\dvalue^j$ map and the fact that the bounded bumping algorithm is reversible by Proposition~\ref{t:little}$\MK$\eqref{pr2.7item:1}. So $h_\dvalue^j(\mathbf{e})-1 = a_\dvalue - b_\dvalue$. In all columns $i \neq \dvalue$, the difference $a_i -b_i$ is preserved by every push step in the bounded bumping algorithm. Using the
notation
\[
v_\dvalue^j(\mathbf{e}) = \comaj(\mathbf{a}) -
\comaj(\mathbf{e}) + h_\dvalue^j(\mathbf{e})-1,
\]
we have shown
$\q^{(\mathbf{a},\mathbf{b})} = \q^{v_\dvalue^j(\mathbf{e})}
\q^{(\mathbf{e},\mathbf{f})}$.
\end{proof}
\section{Fomin--Kirillov Formulas}\label{s:fk}
Fomin and Kirillov~\cite{Fomin-Kirillov} gave several identities generalizing Macdonald's formula, and posed the problem of finding bijective proofs. We show that our bijection implies a bijective proof of one of these identities involving dominant permutations. We first state the identity, starting with an important special case. In the interest of brevity, we will assume the reader has some familiarity with plane partitions and standard Young tableaux. More information on these objects may be found in the cited references.
Let $w_{0}=[n,n-1,\ldots, 1] \in S_{n}$. The following formula specializes to Macdonald's formula~\eqref{e:macdonald.formula} when $x=0$ and the coefficient of the leading term is $\#R(w_{0})$. The last quantity equals the number of standard Young tableaux of staircase shape with $n-1$ rows, as proved by Stanley~\cite{S3}, and later bijectively by Edelman and Greene~\cite{edelman-greene}.
\begin{theo}[{\cite[Theorem~1.1]{Fomin-Kirillov}}] \label{t:fomin.kirillov}
We have the following identity of polynomials in $x$ for the permutation $w_0 \in S_n$:
\begin{equation}\label{e:fomin.kirillov.formula}
\sum_{(a_1,\ldots, a_{\binom{n}{2}}) \in R(w_0)} (x+a_1) \cdots (x+a_{\binom{n}{2}}) \ = \binom{n}{2} ! \prod_{1\leq i < j\leq n} \frac{2x+i+j-1}{i+j-1}.
\end{equation}
\end{theo}
The second factor on the right side of~\eqref{e:fomin.kirillov.formula} counts the number of plane partitions with maximum entry $x$. For a permutation $\pi=[\pi(1),\ldots, \pi(n)]\in S_n$, write $1^{x} \times
\pi =[1,2,\ldots, x, \pi(1)+x, \pi(2)+x, \ldots,
\pi(n)+x]$. Via theorems of Wachs~\cite{wachs} and Proctor~\cite{proctor1, proctor2, koike-terada}, the second factor on the right of~\eqref{e:fomin.kirillov.formula} is also the number of terms in the Schubert polynomial for $1^{x} \times w$ when $x$ is a nonnegative integer. A bijection between the sets $R(1^{x} \times \pi)$ and $R(\pi)$ is given by $(a_1,a_2,\ldots, a_p) \mapsto (x+a_1,x+a_2,\ldots, x+a_p)$.
Fomin and Kirillov gave a $\q$-analog of the above identity in which, moreover, $w_0$ is generalized to an arbitrary dominant permutation. A \emph{dominant permutation} is one whose code is weakly decreasing. For any partition $\lambda \vdash p$, let $\sigma_{\lambda}$ be the dominant permutation in $S_{p+1}$ whose code is $\lambda$ followed by zeros. Let $\rpp^\lambda(x)$ be the set of \emph{weak reverse plane partitions} whose entries are all in the range $[0,x]$ for $x \in \mathbb{N}$. This is the set of $x$-bounded fillings of $\lambda$ with rows and columns weakly increasing to the right and down. Given a weak reverse plane partition $P$, let $|P|$ be the sum of its entries. Let
\[
[rpp^\lambda(x)]_\q = \sum_{P\in rpp^\lambda(x)} \q^{|P|}.
\]
\begin{theo}[{\cite[Theorem~3.1]{Fomin-Kirillov}}] \label{t:fomin.kirillov.2}
For any partition $\lambda \vdash p$ and its associated dominant permutation $\sigma_\lambda$, we have the following identity for all $x \in \mathbb{N}$:
\begin{align}\label{e:fomin.kirillov.formula.2}
\sum_{(a_1,a_2,\ldots, a_p) \in R(\sigma_{\lambda})} \q^{\comaj(a_1,a_2,\ldots, a_p)} &[x+a_1]\cdot [x+a_2] \cdots [x+a_p] \\
&= [p]! \ \mathfrak{S}_{1^{x} \times \sigma_{\lambda}}(1,\q,\q^2,\ldots, \q^{x+p})\\
&= [p]! \ \q^{b(\lambda)}\ [rpp^{\lambda}(x)]_{\q}
\end{align}
where $b(\lambda) = \sum_i (i-1) \lambda_i$.
\end{theo}
The first equality is given by Macdonald's $\q$-formula. The second follows from the theorem of Wachs~\cite{wachs} proving that for every vexillary permutation $\pi$, its Schubert polynomial is a flagged Schur function of shape determined by sorting the Lehmer code of $\pi$. (Dominant permutations are vexillary). Using our bijection for Macdonald's formula, we can now give a complete bijective proof of Theorem~\ref{t:fomin.kirillov.2} as requested in~\cite[Open Problem~1]{Fomin-Kirillov}.
\begin{proof}
Fix a partition $\lambda$ and $x \in \mathbb{N}$. We construct a bijection~$FK$ from bounded pairs for $\sigma_{\lambda}$ to the set of sub-staircase words of length $|\lambda|$ times the set of reverse plane partitions for $\lambda$ bounded by $x$ as follows.
\begin{enumerate}
\item Given a bounded pair $(\mathbf{a},\mathbf{b})$ for $\sigma_{\lambda}$, let $(\mathbf{c},D) = \MacdonaldMap(\mathbf{a},\mathbf{b})$ be the corresponding $\cd$-pair using the Macdonald Map specified in Section~\ref{s:bij.proof}.
\item From $D$, read the vectors of row numbers $\mathbf{i}_D=(i_1,\ldots,i_p)$ and diagonal numbers $\mathbf{r}_{D}$, as described in Section~\ref{s:trans}. (Note the contrast with earlier proofs, where we used column numbers; the vector $\mathbf{i}_D$ is sometimes called a compatible sequence for $\mathbf{r}_{D}$ -- see~\cite{BJS}.)
\item Let $(P_D,Q_D)$ be the insertion tableau and recording tableau of the Edelman--Greene bijection~\cite{edelman-greene} applied to the reduced word $\mathbf{r}_{D}$. Let $(P_D^T,Q_D^T)$ be the transposes of these tableaux. (In the terminology of~\cite{Serrano.Stump.2012}, this is Edelman--Greene ``column insertion''.)
\item Let $I_D=\mathbf{i}_D\circ Q_D^T$ be the tableau with the same shape as $Q_D^T$ in which the entry $t$ replaced with $i_t$, for each $t=1,\ldots,p$. By Lenart's bijection~\cite[Remark~4.12$\MK$(2)]{Lenart.2004} (see also~\cite[Theorem~3.3]{Serrano.Stump.2012}), the map $D \mapsto I_D$ is a weight preserving bijection from reduced pipe dreams for $\sigma_{\lambda}$ to column strict tableaux of shape $\lambda$ with row bounds $(1+x, 2+x, 3+x, \ldots)$. Call this family of $x$-flagged tableaux $\mathcal{FT}(\lambda,x)$. (The terminology of flagged tableaux is related to flagged Schur functions, see~\cite{wachs}.)
\item From the $x$-flagged tableau $I_D$, construct the filling $K_D$ by subtracting $u$ from every entry in row $u$. Note that the rows and columns are weakly increasing in $K_D$ and every entry is in the interval $[0,x]$, so $K_D$ is a reverse plane partition. Serrano and Stump prove in~\cite{Serrano.Stump.FPSAC} that this is the bijection used by Fomin and Kirillov in Theorem~\ref{t:fomin.kirillov.2} for the second equality.
\end{enumerate}
The resulting map $FK: (\mathbf{a},\mathbf{b}) \to (\mathbf{c}, K_D)$ is a bijection since each step is a bijection. It remains only to show that the $\q$-weight is preserved. This follows from our bijective proof of Theorem~\ref{t:macdonald.q.analog} and the fact that specializing $x_i$ to $\q^{i-1}$ in the Schubert polynomial specializes $x^T$ to $\q^{b(\lambda)} \q^{|K_D|}$.
\end{proof}
\section{Future Directions}\label{s:future}
We briefly mention some related open problems and connections to the literature here. Recall that pipe dreams can be encoded as bounded pairs, but most bounded pairs do not encode pipe dreams. In fact, in Section~\ref{s:trans}, we gave a simple test for this in terms of lexicographic order on certain related pairs. Perhaps there is another statistic based on these pairs which could be added to Macdonald's formula to find another generalization.
\begin{open}
Is there an analog of the bounded pair polynomial in Definition~\ref{d:bounded.pair.poly} which specializes to the Schubert polynomial when certain parameters are set to 0? Is there a common generalization for the Transition Equation for Schubert polynomials, bounded pairs, and its $\q$-analog?
\end{open}
Proctor's formula for plane partitions of staircase shape has a particularly nice factored form. This was key to the elegant formula in~\eqref{e:fomin.kirillov.formula}. Can the staircase shape be replaced, in any sense, with a more general partition $\lambda$?
In some sense the answer is ``no''. There exist rather general determinantal formulas for the partition function of the dimer model on a planar bipartite graph~\cite{kasteleyn}, and for ensembles of nonintersecting lattice paths in a directed acyclic graph~\cite{gessel-viennot}; it is famously possible to apply either formula to yield a determinantal formula for reverse plane partitions of arbitrary shapes (see, for instance, the book~\cite{bressoud} for an introduction to this approach to plane partition enumeration). As observed in~\cite{Fomin-Kirillov}, typically this determinant cannot be written as a product of nice factors. Nonetheless, the more general Fomin--Kirillov formula in Theorem~\ref{t:fomin.kirillov.2} makes it desirable to improve these enumerative results as much as possible.
\begin{open}
Is there a nice formula for $|rpp^{\lambda}(x)|$ or $[rpp^{\lambda}(x)]_\q$ for any large class of partitions $\lambda$, as in the case of staircase shapes as noted in Theorem~\ref{t:fomin.kirillov}?
\end{open}
Stembridge~\cite[Theorem~1.1]{stembridge} gives a formula for a weighted enumeration of maximal saturated chains in the Bruhat order for any Weyl group which is very similar to Macdonald's formula. This formula is related to the study of degrees of Schubert varieties, see~\cite{chevalley, postnikov-stanley}, and has no obvious direct connection to Theorem~\ref{t:macdonald}. Stanley~\cite[Equation~(23)] {Stanley.perms} stated the following version of Stembridge's weighted enumeration formula in the case of $S_{n}$ and noted the similarity to Macdonald's formula. Given $w \in S_n$ of length $p$, let
\[
\begin{split}
\mathcal{T}(w) := \Bigl\{&\bigl((i_1, j_1), (i_2, j_2), \ldots, (i_p, j_p)\bigr) \;:\; w = t_{i_1,j_1}t_{i_2,j_2}\cdots t_{i_p,j_p} \\ &\quad\text{and }
\ell(t_{i_1,j_1}t_{i_2,j_2}\cdots t_{i_k,j_k}) = k \text{ for all } 1 \leq k
\leq p \Bigr\}.
\end{split}
\]
\begin{theo}
For $w=w_0 \in S_n$, we have
\begin{equation}\label{eqn:mysterious}
\sum_{((i_1, j_1), (i_2, j_2), \ldots, (i_p, j_{p})) \in \mathcal{T}(w_0)} (j_1-i_1)(j_2-i_2) \cdots(j_{p} - i_{p}) = \binom{n}{2}!.
\end{equation}
\end{theo}
\begin{open}
Can~\eqref{eqn:mysterious} be proven bijectively, using a similar technique to our $\MacdonaldMap$ bijection?
\end{open}
The left side of~\eqref{eqn:mysterious} has a natural interpretation in terms of pairs $(\bfa, \mathbf{b})$ where $\bfa$ is a word of transpositions $(i_k,j_k)$, and $\mathbf{b} = (b_1, \ldots, b_p)$ is a sort of ``bounded word'' with bounds $i_k \leq b_k < j_k$. However, no analogue of Little's bumping map is known for maximal saturated Bruhat chains. Worse,~\eqref{eqn:mysterious} is only known to hold for the longest word $w_0$ and a few special cases. It would be necessary to find a generalization of~\eqref{eqn:mysterious} to other $w$ before the strategy outlined in this paper could apply.
The Grothendieck polynomials are the $K$-theory analog of Schubert polynomials for the flag manifolds~\cite{lascoux.schutzenberger.grothendieck}. There is a Transition Formula for these polynomials~\cite{lascoux.grothendieck.transition}. Anders Buch asked the following question.
\begin{open}
What is the analog of Macdonald's formula for Grothendieck polynomials and what is the corresponding bijection?
\end{open}
Fomin--Kirillov~\cite{fomin-kirillov-yang-baxter} state a Macdonald-type formula for the longest word $w_0$, as a corollary of their work on the degenerate Hecke algebra. Curiously the Stirling numbers of the second kind appear on the right side of the formula. There are also some partial recent partial results on this open problem due to Reiner, Tenner and Yong~\cite{RTY.2016}. In particular, see their Definition~6.2 and Conjecture~6.3.
\section{Appendix}\label{s:appendix}
For $\mathbf{a} = (4)$ with $\comaj(\mathbf{a}) = 0$ and $j=5$, we have $v^5(\mathbf{a})=(4, 3)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4) & (5 4) & 0 & 4 & 4
\\
1 & 4 & (4 3) & (5 4) & 0 & 3 & 3
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3)$ with $\comaj(\mathbf{a}) = 0$ and $j=5$, we have $v^5(\mathbf{a})=(4, 3, 2)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3) & (5 4 3) & 0 & 4 & 4
\\
1 & 4 & (4 3 3) & (5 4 3) & 0 & 3 & 3
\\
2 & 3 & (4 3 2) & (5 4 3) & 0 & 2 & 2
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3, 5)$ with $\comaj(\mathbf{a}) = 2$ and $j=5$, we have $v^5(\mathbf{a})=(5, 4, 3, 2)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3 5) & (5 4 3 5) & 3 & 4 & 5
\\
1 & 4 & (4 3 3 5) & (5 4 3 5) & 3 & 3 & 4
\\
2 & 3 & (4 3 2 5) & (5 4 3 5) & 3 & 2 & 3
\\
3 & 3 & (4 3 5 2) & (5 4 5 3) & 2 & 2 & 2
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3, 5, 6)$ with $\comaj(\mathbf{a}) = 5$ and $j=5$, we have $v^5(\mathbf{a})=(6, 5, 4, 3, 2)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3 5 6) & (5 4 3 5 6) & 7 & 4 & 6
\\
1 & 4 & (4 3 3 5 6) & (5 4 3 5 6) & 7 & 3 & 5
\\
2 & 3 & (4 3 2 5 6) & (5 4 3 5 6) & 7 & 2 & 4
\\
3 & 3 & (4 3 5 2 6) & (5 4 5 3 6) & 6 & 2 & 3
\\
4 & 3 & (4 3 5 6 2) & (5 4 5 6 3) & 5 & 2 & 2
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3, 5, 6, 4)$ with $\comaj(\mathbf{a}) = 5$ and $j=5$, we have $v^5(\mathbf{a})=(6, 5, 4, 3, 7, 2)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3 5 6 4) & (5 4 3 5 6 4) & 7 & 4 & 6
\\
1 & 4 & (4 3 3 5 6 4) & (5 4 3 5 6 4) & 7 & 3 & 5
\\
2 & 3 & (4 3 2 5 6 4) & (5 4 3 5 6 4) & 7 & 2 & 4
\\
3 & 3 & (4 3 5 2 6 4) & (5 4 5 3 6 4) & 6 & 2 & 3
\\
4 & 3 & (4 3 5 6 2 4) & (5 4 5 6 3 4) & 10 & 2 & 7
\\
5 & 3 & (4 3 5 6 4 2) & (4 3 5 6 4 3) & 5 & 2 & 2
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3, 5, 6, 4, 3)$ with $\comaj(\mathbf{a}) = 5$ and $j=5$, we have $v^5(\mathbf{a})=(6, 5, 4, 3, 7, 8, 9)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3 5 6 4 3) & (5 4 3 5 6 5 4) & 7 & 4 & 6
\\
1 & 4 & (4 3 3 5 6 4 3) & (5 4 3 5 6 5 4) & 7 & 3 & 5
\\
2 & 3 & (4 3 2 5 6 4 3) & (5 4 3 5 6 5 4) & 7 & 2 & 4
\\
3 & 3 & (4 3 5 2 6 4 3) & (5 4 5 3 6 5 4) & 6 & 2 & 3
\\
4 & 3 & (4 3 5 6 2 4 3) & (5 4 5 6 3 5 4) & 10 & 2 & 7
\\
5 & 3 & (4 3 5 6 4 2 3) & (5 4 5 6 5 3 4) & 11 & 2 & 8
\\
6 & 4 & (4 3 5 6 4 3 3) & (5 4 5 6 5 3 4) & 11 & 3 & 9
\\
\hline
\end{array}
\]
For $\mathbf{a} = (4, 3, 5, 6, 4, 3, 5)$ with $\comaj(\mathbf{a}) = 11$ and $j=5$, we have $v^5(\mathbf{a})=(7, 6, 5, 4, 8, 9, 10, 3)$.
\[
\begin{array}{|c|c|c|c|c|c|c|}
\hline i & h^5_i(\mathbf{a}) & \text{insert} & y_i^5(\mathbf{a}) & \comaj(y_i^5(\mathbf{a})) &h^5_i(\mathbf{a})-1 & v^5_i(\mathbf{a}) \\
\hline 0 & 5 & (4 4 3 5 6 4 3 5) & (5 4 3 5 6 5 4 5) & 14 & 4 & 7
\\
1 & 4 & (4 3 3 5 6 4 3 5) & (5 4 3 5 6 5 4 5) & 14 & 3 & 6
\\
2 & 3 & (4 3 2 5 6 4 3 5) & (5 4 3 5 6 5 4 5) & 14 & 2 & 5
\\
3 & 3 & (4 3 5 2 6 4 3 5) & (5 4 5 3 6 5 4 5) & 13 & 2 & 4
\\
4 & 3 & (4 3 5 6 2 4 3 5) & (5 4 5 6 3 5 4 5) & 17 & 2 & 8
\\
5 & 3 & (4 3 5 6 4 2 3 5) & (5 4 5 6 5 3 4 5) & 18 & 2 & 9
\\
6 & 4 & (4 3 5 6 4 3 3 5) & (5 4 5 6 5 3 4 5) & 18 & 3 & 10
\\
7 & 4 & (4 3 5 6 4 3 5 3) & (4 3 5 6 4 3 5 4) & 11 & 3 & 3
\\
\hline
\end{array}
\]
%\section*{Acknowledgments}\label{s:ack}
\longthanks{Many thanks to Connor Ahlbach, Sami Assaf, Anders Buch, Sergey Fomin, Ira Gessel, Zachary Hamaker, Avi Levy, Peter McNamara, Maria Monks Gillespie, Alejandro Morales, Richard Stanley, Dennis Stanton, Joshua Swanson, and Marisa Viola for helpful discussions on this work.}
\bibliographystyle{amsplain-ac}
\bibliography{ALCO_Billey_42}
\end{document}