% The Chicago Journal of Theoretical Computer Science, Volume 1996, Article 5
% Published by MIT Press, Cambridge, Massachusetts USA
% Copyright 1996 by Massachusetts Institute of Technology
%
\newtoks\rstyle
\rstyle={cjlook}
%
\ifx\documentclass\undeftex
\documentstyle[v1.1,published]{cjstruct}
\else
\documentclass[v1.1,published]{cjstruct}
\fi
%
\usestyleresource{amstex}
\usestyleresource{multirow}
%\usestyleresource{inputvb}
%
% Local macro definitions for this article.
%
\ifltwoe\else\newcommand{\textit}[1]{{\it #1\/}}\fi
\ifltwoe\else\newcommand{\emph}[1]{{\em #1\/}}\fi
\ifltwoe\else\newcommand{\textup}[1]{{\rm #1\/}}\fi
\ifltwoe\else\newcommand{\textbf}[1]{{\bf #1\/}}\fi
\ifltwoe\newcommand{\ltwoemm}{\relax}\else\newcommand{\ltwoemm}{$}\fi
%
\catcode`\@=11
\ifx\nopagebreakbeginpar\undeftex
\newcommand{\nopagebreakbeginpar}{\@beginparpenalty10000}
\fi
\catcode`\@=12
%
\newcommand\nohyphenl[1]{{\discretionary{}{}{}{#1}}}
\newlength{\customtabcolsep}
\setlength{\customtabcolsep}{0.25em}
%
\mathstyleclass{\statepredicate}{\mathord}{\textup}
%
\newcommand{\treesize}{\mathord{\mbox{\textup{size}}}}
\ifx\filename\undeftex
\newcommand{\filename}[1]{\emph{#1}}
\else
\renewcommand{\filename}[1]{\emph{#1}}
\fi
\newcommand{\orderof}[1]{O(#1)}
\newcommand{\emailaddr}[1]{\texttt{#1}}
\newcommand{\url}[1]{\texttt{#1}}
\newcommand{\urltilde}{\char`~}
\newcommand{\setof}[2]{\{\,#1\mid#2\,\}}
\newcommand{\condval}[1]{\left\{\begin{array}{ll}#1\end{array}\right.}
\newcommand{\floor}[1]{\left\lfloor#1\right\rfloor}
\newcommand{\vertorientation}{o}
\newcommand{\vertmode}{m}
%
\newenvironment{progstmtlist}{%
\begingroup\raggedright
\begin{list}{}{}
\setlength{\itemsep}{0ex}
\setlength{\parsep}{0ex}
}{%
\end{list}
\endgroup
}
%
\renewcommand{\kwd}[1]{\textbf{#1}}
\renewcommand{\comment}[1]{(#1)}
\renewcommand{\idr}[1]{\text{\textit{#1}}}
\newcommand{\oftype}{\colon}
\newcommand{\equtest}{\mathrel{=}}
\newcommand{\assigned}{\mathrel{:=}}
\mathstyleclass{\progfunc}{\mathord}{\textit}
\newcommand{\progand}{\mathbin{\wedge}}
%
\newcommand{\procstate}[1]{\langle #1 \rangle}
%
\newcommand{\setsize}[1]{\mathopen{|}#1\mathclose{|}}
\newcommand{\leftportstate}[1]{(\stackrel{#1}{\leftarrow})}
\newcommand{\rightportstate}[1]{(\stackrel{#1}{\rightarrow})}
\newcommand{\orientededge}[1]{\stackrel{#1}{\rule[.4ex]{4ex}{.3ex}}}
\newcommand{\disorientededge}[1]{%
\stackrel{#1}{%
\rule[.4ex]{1ex}{.3ex}%
\rule[0ex]{1ex}{.3ex}%
\rule[.4ex]{1ex}{.3ex}%
\rule[0ex]{1ex}{.3ex}%
}%
}
%
\newcommand{\interactionmark}{*}
\newcommand{\numberignored}{\#_I}
\newcommand{\numberdisoriented}{\#_D}
%
\newcommand{\prooffontshape}{\fontshape{n}\selectfont}
%
% Macros for the publisher's comment
%
% The circle R symbol for a registered trade name is adapted from the
% circled c copyright symbol defined in The TeXBook by Donald Knuth.
%
\def\registered{\({}^{\ooalign%
{\hfil\raise.07ex\hbox{{\tiny\textup{R}}}%
\hfil\crcr\scriptsize\mathhexbox20D\cr}}\)}
%
% Macros for the editors' comment
%
% The cross symbol indicates a recently deceased editor.
%
\ifx\deceased\undeftex
\newcommand{\deceased}[1]{\dag#1}
\fi
%
% end of local macro definitions for this article
%
\begin{document}
%
\begin{articleinfo}
%
\publisher{The MIT Press}
%
\publishercomment{%
{\Large
\begin{center}
Volume 1996, Article 5\\
\textit{5 December 1996}
\end{center}
}
\par\noindent
ISSN 1073--0486\@. MIT Press Journals, 55 Hayward St., Cambridge, MA
02142 USA; (617)253-2889;
\emph{journals-orders@mit.edu, journals-info@mit.edu}\@.
Published one article at a time in \LaTeX\ source form on the
Internet\@. Pagination varies from copy to copy\@. For more
information and other articles see:
\begin{itemize}
\item \emph{http://www-mitpress.mit.edu/jrnls-catalog/chicago.html}
\item \emph{http://www.cs.uchicago.edu/publications/cjtcs/}
\item \emph{gopher.mit.edu}
\item \emph{gopher.cs.uchicago.edu}
\item anonymous \emph{ftp} at \emph{mitpress.mit.edu}
\item anonymous \emph{ftp} at \emph{cs.uchicago.edu}
\end{itemize}
\sentence
\par\noindent
The \emph{Chicago Journal of Theoretical Computer Science} is
abstracted or indexed in \emph{Research Alert,\registered
SciSearch,\registered Current Contents\registered/Engineering
Computing \& Technology}, and \emph{CompuMath Citation Index\@.\registered}
}
%
\copyrightstmt{%
\copyright 1996 The Massachusetts Institute of Technology\@.
Subscribers are licensed to use journal articles in a variety of
ways, limited only as required to insure fair attribution to authors
and the journal, and to prohibit use in a competing commercial
product\@. See the journal's World Wide Web site for further
details\@. Address inquiries to the Subsidiary Rights Manager, MIT
Press Journals; (617)253-2864;
\emph{journals-rights@mit.edu}\@.\pagebreak[2]
}
%
\journal{Chicago\nolinebreak[1] Journal of Theoretical Computer\nolinebreak[1] Science}
%
\journalcomment{%
The \emph{Chicago Journal of Theoretical Computer Science}
is a peer-reviewed scholarly
journal in theoretical computer science\@. The journal is committed
to providing a forum for significant results on theoretical
aspects of all topics in computer science\@.
\par
\begin{trivlist}
\item[\emph{Editor in chief:}] Janos Simon
\item[\emph{Consulting editors:}]
Joseph Halpern, Stuart A.\ Kurtz, Raimund Seidel
\item[\emph{Editors:}]
\begin{tabular}[t]{lll}
Martin Abadi & Greg Frederickson & John Mitchell \\
Pankaj Agarwal & Andrew Goldberg & Ketan Mulmuley \\
Eric Allender & Georg Gottlob & Gil Neiger \\
Tetsuo Asano & Vassos Hadzilacos & David Peleg \\
Laszl\'o Babai & Juris Hartmanis & Andrew Pitts \\
Eric Bach & Maurice Herlihy & James Royer \\
Stephen Brookes & Ted Herman & Michael Merritt \\
Jin-Yi Cai & Steve Homer & Alan Selman \\
Anne Condon & Neil Immerman & Nir Shavit \\
Cynthia Dwork & \deceased{Paris Kanellakis} & Eva Tardos \\
David Eppstein & Howard Karloff & Sam Toueg \\
Ronald Fagin & Philip Klein & Moshe Vardi \\
Lance Fortnow & Phokion Kolaitis & Jennifer Welch \\
Steven Fortune & Stephen Mahaney & Pierre Wolper
\end{tabular}
\vspace{1ex}
\item[\emph{Managing editor:}] Michael J.\ O'Donnell
\vspace{1ex}
\item[\emph{Electronic mail:}] \emph{chicago-journal@cs.uchicago.edu}
\end{trivlist}
\sentence
\pagebreak[0]
}
%
\bannerfile{cjtcs-banner.tex}
%
\volume{1996}
%
\articleid{5}
%
\selfcitation{
@article{cj96-05,
author={H.\ James Hoover and Piotr Rudnicki},
title={Uniform Self-Stabilizing Orientation of Unicyclic
Networks under Read/Write Atomicity},
journal={Chicago Journal of Theoretical Computer Science},
volume={1996},
number={5},
publisher={MIT Press},
month={December},
year={1996}
}
}
%
\begin{retrievalinfo}
\end{retrievalinfo}
%
\title{Uniform Self-Stabilizing Orientation of Unicyclic Networks
under Read/Write\nolinebreak[1] Atomicity}
\shorttitle{Uniform Self-Stabilizing Orientation}
\collectiontitle{Special Issue on Self-Stabilization}
\editorname{Shlomi Dolev, Jennifer Welch}
%
\editorcomment{\samepage%
This article is included in the \emph{Special Issue on
Self-Stabilization}, edited by Shlomi Dolev and Jennifer Welch\@.
The article presents a randomized self-stabilizing orientation
algorithm for unicyclic networks\@. The algorithm is designed for a
system of identical processors that supports only read/write
atomicity\@. Under such settings, randomization is necessary to
break symmetry and orient the ring\@. The expected stabilization
time of the algorithm is \(\orderof{n^2}\)\@. The correctness of the
algorithm depends in part on a two-processor token-passing
protocol\@. The correctness of the token-passing protocol is
established by a state-space search computing the probability that
the scheduling adversary can avoid self-stabilization\@.
\par
The editor for this article is Vassos Hadzilacos\@.
}
%
\begin{authorinfo}
%
% Correction to error in cjstruct.sty version 1.1.
%
\def\fixversion{1.1}
\ifx\cjversion\fixversion
\renewcommand{\printname}[1]{
\global\authorstring={#1}
\global\authorstrue
\ifinfo\item Printed name: #1\fi
}
\fi
%
% End of correction.
%
\printname{H.\ James Hoover}
\collname{}{Hoover, H. James}
\begin{institutioninfo}
\instname{University of Alberta}
\department{Department of Computing Science}
\address{}{Edmonton}{Alberta}{Canada}{T6G 2H1}
\end{institutioninfo}
\email{hoover@cs.ualberta.ca}
\end{authorinfo}
%
\begin{authorinfo}
\printname{Piotr Rudnicki}
\collname{}{Rudnicki, Piotr}
\begin{institutioninfo}
\instname{University of Alberta}
\department{Department of Computing Science}
\address{}{Edmonton}{Alberta}{Canada}{T6G 2H1}
\end{institutioninfo}
\email{piotr@cs.ualberta.ca}
\end{authorinfo}
%
\shortauthors{Hoover, Rudnicki}
%
\support{%
H.~James Hoover's research was supported by the Natural
Sciences and Engineering Research Council of Canada, grant OGP
38937\@. Piotr Rudnicki's reseach was supported by the Natural
Sciences and Engineering Research Council of Canada, grant OBP
09207\@.
}
%
\begin{editinfo}
\submitted{25}{7}{1995}\reported{30}{10}{1995}
\submitted{26}{2}{1996}\reported{11}{5}{1996}
\submitted{23}{8}{1996}\reported{6}{9}{1996}
\submitted{6}{9}{1996}\reported{16}{9}{1996}\published{5}{12}{1996}
\end{editinfo}
%
\end{articleinfo}
%
\begin{articletext}
%
\begin{abstract}
%
\apar{Abstract-1}
A distributed system is \emph{self-stabilizing} if its behavior is
correct regardless of its initial state\@. A \emph{ring} is a
distributed system in which all processors are connected in a cycle
and each processor communicates directly with only its two neighbors\@.
A ring is \emph{oriented} when all processors have a consistent notion
of their left and right neighbors\@. A ring is \emph{uniform} when all
processors run the same program and have no distinguishing attributes,
such as processor IDs\@. A well-known self-stabilizing uniform
protocol for ring orientation is that of \cite{cj96-05-17}\@. For a ring
of size \(n\), this protocol will stabilize in expected
\(\orderof{n^2}\) processor activations\@. This assumes that processors
are scheduled by a \emph{distributed demon}---one in which the
communication registers between processors can be atomically updated
(a read followed by a write), and the processors have the ability to
make random choices\@.
%
\apar{Abstract-2}
This paper generalizes the notion of orienting a ring to that of
orienting a \emph{unicyclic} network, that is, a ring with trees
attached\@. We present a very simple protocol for the self-stabilizing
orientation of such unicyclic networks\@. It has the same expected
\(\orderof{n^2}\) processor activation performance as the Israeli and Jalfon
protocol for rings\@. But ours works under the more general scheduling
assumption of a \emph{read/write demon}---one in which a read or write
of a communication register is atomic, but an update (a read followed
by a write) is not\@. We similarly assume the ability to make random
choices\@. A subresult of our protocol is a uniform self-stabilizing
algorithm for the two processor token-passing problem under the
read/write demon\@.
%
\apar{Abstract-3}
Our protocol is uniform in the sense that all processors of the same
degree are identical\@. In addition, observations of the behavior of
the protocol on an edge yield no information about the degree of the
processors at the ends of the edge\@.
%
\end{abstract}
%
\asection{1}{Self-Stabilization and Ring Orientation}
%
\apar{1-1}
The prototypical distributed system is a collection of discrete-state
machines connected by a network\@. Such a system is
\emph{self-stabilizing} if it has the property that regardless of its
current state, it will eventually enter and remain within a
well-defined set of stable states, called the \emph{target set} (or
the \emph{safe} or \emph{legitimate set})\@. Although a longstanding
notion in control theory, its first application to computing systems
is generally credited to Dijkstra~\cite{cj96-05-06,cj96-05-07}\@.
%
\apar{1-2}
Dijkstra's original paper was virtually ignored for a decade, with the
exception of \cite{cj96-05-21}\@. It took almost another decade before
there was sufficient material\footnote{Self-stabilization is now an
active research area, with a number of World Wide Web sites easily
located by a search engine\@.} to warrant the survey by
\cite{cj96-05-25}\@. Although our paper is self-contained, we assume
that the reader is familiar with the general perspective of
self-stabilization as would be provided by Schneider's survey\@.
%
\begin{sloppypar}
\apar{1-3}
Two fundamental problems of self-stabilization are \emph{mutual exclusion}
\cite{cj96-05-01,cj96-05-03,cj96-05-04,cj96-05-05,cj96-05-06,cj96-05-07,%
cj96-05-09,cj96-05-11,cj96-05-12,cj96-05-13,cj96-05-18,cj96-05-20,%
cj96-05-21,cj96-05-22,cj96-05-23,cj96-05-24}
and \emph{ring orientation}
\cite{cj96-05-02,cj96-05-15,cj96-05-17,cj96-05-26}\@.
Mutual exclusion can be cast in terms of a \emph{token-passing}
problem, in which the privilege of entering the critical section is
conferred by the possession of a token that is passed among the
processors\@. Ring orientation is the problem of obtaining consensus
among the processors for a consistent notion of left and right\@. That
is, if processor \(A\) thinks its right neighbor is processor \(B\),
then processor \(B\) must think that its left neighbor is
processor~\(A\)\@.
\end{sloppypar}
%
\apar{1-4}
On a ring network topology, mutual exclusion and orientation are
closely related\@. For example, orienting a ring first, and then passing
a token from left to right is one way to ensure fair exclusive access
to a resource\@. On the other hand, if a token is being passed among
processors in a ring, as it moves between processors it induces an
orientation (call left the direction it arrives from, and right the
direction it leaves)\@. If the token ever arrives at a processor from a
different direction than it departed, then the passage of the token
through the ring will have established a consistent orientation\@.
%
\apar{1-5}
Both of these problems become more difficult when processors are
required to be identical and anonymous (i.e., they lack any
distinguishing attribute such as a processor
ID)\@. Self-stabilization becomes even more interesting in a
message-passing model of communication, rather than the
shared-variable model we use here\@. (See \cite{cj96-05-19}, for
example\@.)
%
\apar{1-6}
Israeli and Jalfon \cite{cj96-05-17} solve the problem of the uniform
self-stabilizing orientation of a ring of processors\@. In their model,
each processor is a finite-state machine that can make random choices
and can communicate directly only with its two neighbors\@. Processors
communicate through shared variables (or registers)\@. Activation of
the processors is under the control of a \emph{distributed scheduling
demon}\@. Under the distributed demon, an atomic transition of a
processor consists of reading all input registers, changing the state,
and then writing all output registers\@.
%
\apar{1-7}
In their paper, Israeli and Jalfon show that no deterministic protocol
exists for the self-stabilizing orientation of a ring\@. They then give
a protocol that achieves orientation in expected \(\orderof{n^2}\)
processor activations under the distributed demon\@. Their protocol is
moderately complex\@. It is composed of two self-stabilizing
subprotocols, one of which is randomized, the other which is
deterministic\@. The subprotocols are then combined into the main
protocol using Dolev, Israeli, and Moran's technique~\cite{cj96-05-09}
for the fair composition of self-stabilizing protocols\@.
%
\apar{1-8}
In more recent results, Hoepman~\cite{cj96-05-14} gives a
deterministic uniform algorithm for orienting rings of odd length, and
Tsai and Huang~\cite{cj96-05-27} give results for rings of arbitrary
size under the central demon, and odd-length rings under the
distributed demon\@.
%
\apar{1-9}
Our result has much in common with the scheduler-luck games of Dolev,
Israeli, and Moran \cite{cj96-05-10,cj96-05-08}, as will be mentioned
in Section~2.4\@.
%
\asubsection{1.1}{Outline}
%
\begin{sloppypar}
\apar{1.1-1}
This paper is organized as follows\@. Section~2 introduces our standard
model of computation, scheduling demon, and the general-state
exploration method for determining self-stabilization\@. Section~3 then
applies the state-exploration method to show that the two-processor
token-passing problem is self-stabilizing (in the standard model)\@.
Section~4 introduces the extension of orientation from rings to
unicyclic networks\@. Section~5 then develops a uniform protocol for
orienting unicyclic networks on a custom computation model that
assumes a primitive operation for arbitrating conflicts between
adjacent processors\@. Section~6 implements the custom model
on the standard model by implementing the arbitration primitive using
the self-stabilizing token-passing protocol, thus establishing the
main result of the paper\@.
\end{sloppypar}
%
\asection{2}{The Standard Model}
%
\asubsection{2.1}{Processor Networks}
%
\apar{2.1-1}
Our orientation protocol will ultimately be executed on what we call
the \emph{standard model} of computation\@. It is a \emph{processor
network} in which processors are placed at the vertices of the
network, and all communication between processors occurs over I/O
ports associated with the edges\@. Each processor at a vertex of degree
\(k\) in the network has exactly \(k\) ports, numbered \(0\), \(1\),
\ltwoemm\ldots\ltwoemm, \(k-1\)\@. Each port consists of an input register and an output
register, details of which appear later\@.
%
\apar{2.1-2}
A processor network is described by a connected graph \(G=(V,E)\) with
vertex set \(V=\{0,\dots,n-1\}\) and edge set
\(E\subset(V\times\{0,1,2,\dots\})\times{(V\times\{0,1,2,\dots\})}\)\@.
Processor \(P_{v}\) is associated with vertex \(v\)\@. Each port of
\(P_{v}\) is assigned to a distinct edge of \(E\) incident on vertex
\(v\)\@. An edge \(((v,i),(w,j))\) of \(E\) indicates that processor
\(P_{v}\) has its \(i\) port connected to port \(j\) of processor
\(P_{w}\)\@. The connection ensures that the output register of port
\(i\) is connected to the input register of port \(j\), and vice
versa\@. We use this elaborate edge description because the notion of
orientation requires the ability to specify a particular port number,
and it is possible that two processors may be connected by multiple
edges\@.
%
\asubsection{2.2}{Basic Processor Operations}
%
\apar{2.2-1}
The \emph{state} of a processor consists of the contents of its port
registers, along with the current state of the program controlling the
processor\@. The basic atomic operations of a processor are as follows:
\emph{read} on port \(i\), which transfers the contents of the
neighbor's output register to the input register of port \(i\);
\emph{write} on port \(i\), which writes a new value into the output
register of port \(i\); make an \emph{internal step}, which simply
changes the state of the internal program of the processor without
altering the port registers; and make a uniform \emph{random choice}
from a finite set\@. In the case that the two processors on an edge
activate simultaneously, a read on a port will complete before the
write of the other processor to that port\@. Thus, when multiple
processors are activated, executions in this model can be serialized
by the simple expedient of scheduling all the read operations before
all the write operations\@.
%
\asubsection{2.3}{The Scheduling Demon}
%
\apar{2.3-1}
Reasoning about distributed systems usually assumes that state changes
cannot be observed while they are occurring\@. That is, the operations
that change the state are atomic, and are considered to occur
instantaneously\@. Alternatively, one can think of atomic operations as
sequences of internal actions that must be completed before the
scheduler can activate another processor\@.
%
\apar{2.3-2}
Reasoning proceeds by analyzing how the system behaves when the
execution of the atomic operations are scheduled by various styles of
demon\@. For example the \emph{central demon} (introduced by Dijkstra in
\cite{cj96-05-06}) schedules only one atomic shared-variable update
operation at a time, while the \emph{distributed demon} (introduced by
Brown and co-workers in \cite{cj96-05-03}) is permitted to schedule
simultaneous atomic update operations from different processors\@. The
highly adversarial \emph{read/write demon} (introduced by Dolev,
Israeli, and Moran in \cite{cj96-05-09}) breaks shared-variable-update
operations into read and write phases, and allows interleaving of the
phases among processors\@.
%
\apar{2.3-3}
We assume the read/write demon for our execution scheduler\@. In this
model, the actions of the processors are serializable, that is, if two
or more processors are activated simultaneously, it is possible to
activate them sequentially and achieve the same result\@. The scheduler
decides which single processor to activate at each step, and actively
works to defeat the protocol\@.
%
\apar{2.3-4}
Because of the way processors are defined, it is always possible for a
processor to be activated at a scheduling step\@. Thus, without any
restrictions on the scheduler, it could simply activate the same
processor continuously, and avoid stabilization\@. One kind of
restriction is that the scheduler be \emph{fair}, as in
\cite{cj96-05-09}\@. A fair scheduler must ensure that no processor has
a monopoly on activations, nor is any processor excluded from
activations\@.
%
\apar{2.3-5}
Instead of worrying about the fairness of the scheduler, we will
require that any activations by the scheduler must, if possible, make
progress with nonzero probability\@. Intuitively this means that the
scheduler can be as unfair as it wants, so long as it does not
simply keep activating one processor in a busy-wait cycle\@. This
serves to abstract out any busy-wait cycles from the execution of the
protocol, and enables us to mechanically explore the state space\@.
%
\begin{alabsubtext}{Definition}{2.1}{}
%
The read/write scheduling demon makes \emph{progress} if it
activates a processor that:
%
\begin{enumerate}
%
\item[1.] executes a write, or
%
\item[2.] executes a read that obtains a value different from the current
contents of the input port,~or
%
\item[3.] executes a read that obtains the same value as the current
contents of the input port, but for which the next write, with nonzero
probability, would result in a change in the writing processor's
state\@.
%
\end{enumerate}
%
\end{alabsubtext}
%
\apar{2.3-6}
These conditions prevent the scheduler from activating a processor in
the two-step cycle that first does a read that obtains no new data,
and then does a write that writes no new data, leaving the processor
back in its original state\@. Such a sequence is completely invisible to
the other processors, and so could continue forever\@.
%
\asubsection{2.4}{State Space Exploration}
%
\apar{2.4-1}
Reasoning about the behavior of a supposed self-stabilizing system is
complicated by the fact that the system can be in any possible state,
yet must eventually enter one of the target states\@. The reasoning is
further complicated by the ability of processors to make random
choices and the fact that the scheduler is an adversary that is
working to defeat the protocol\@.
%
\apar{2.4-2}
Let \(Q\) be the state space of the system\@. We assume that the
designer has specified a target set \(T\subset Q\) which contains the
states that characterize the desirable behavior of the system\@. We
also assume that \(T\) is \emph{closed}, that is, once the system
enters set \(T\), all future states remain in~\(T\)\@.
%
\begin{alabsubtext}{Definition}{2.2}{}
%
Let target set \(T\) be a subset of the state space \(Q\) of a system\@.
We say that the system is \emph{self-stabilizing} iff for any initial
state from \(Q\), with probability 1 the state of the system
eventually enters and remains in the set~\(T\)\@.
%
\end{alabsubtext}
%
In any state \(q\), the scheduler can choose to activate any processor
it desires so long as it makes progress according to Definition~2.1\@. Our
state-exploration approach views the scheduler as an adversary that
attempts to deter self-stabilization as long as possible\@.
%
\apar{2.4-3}
Let \(\Gamma(q)\) denote the set of possible scheduling choices for
state \(q\)\@. For each possible scheduling choice
\(\gamma\in\Gamma(q)\) that the scheduler can make, there is a set of
possible states \(\delta(q,\gamma)\subseteq Q\) that the system can
enter\@. Each state of \(\delta(q,\gamma)\) may or may not make
progress relative to \(q\)\@. The precise content of
\(\delta(q,\gamma)\) is determined by the random choices that the
activated processors make, and by other details of the model (for
example, how read/write conflicts are resolved)\@.
%
\apar{2.4-4}
Let \(\statepredicate{progress}[q,q']\) be the predicate that is
satisfied iff state \(q'\) results from the activation in state \(q\)
of exactly one processor that satisfies the three conditions of
Definition~2.1\@.
%
\apar{2.4-5}
Suppose that the scheduler makes scheduling decision \(\gamma\)\@. Then
the scheduler makes progress according to Definition~2.1 iff there is at
least one state \(q'\) in \(\delta(q,\gamma)\) such that
\(\statepredicate{progress}[q,q']\)\@.
%
\apar{2.4-6}
Let
%
\[\sigma(q)=\setof{\delta(q,\gamma)}%
{\text{scheduling decision }\gamma\in\Gamma(q)\text{ makes progress}}\]
%
that is, \(\sigma(q)\) contains all the possible next-state sets that
result from taking a progress-making scheduling choice from
\(\Gamma(q)\) at state \(q\)\@. Note that the elements of \(\sigma(q)\)
are sets of states\@.
%
\begin{alabsubtext}{Definition}{2.3}{}
%
A state \(q\in Q\) is \emph{deadlocked} iff \(\sigma(q)\) is empty\@.
%
\end{alabsubtext}
%
\apar{2.4-7}
Now we consider the behavior of the scheduler\@. Its goal is to maximize
its probability of avoiding the target set\@.
%
\begin{alabsubtext}{Definition}{2.4}{}
%
Let \(T\) be the target set of the system\@. We say that a scheduler can
\emph{avoid \(T\) from \(q\) for at least \(i\) steps} iff starting at
state \(q\) the scheduler in the next \(i\) steps can keep the system
outside \(T\), or can force the system into a deadlocked state outside
\(T\)\@. For each scheduler, the \emph{avoidance probability}
\(p_{i}(q)\) of a state \(q\) is the probability that it can avoid \(T\)
when starting from \(q\) for at least \(i\) steps\@.
%
\end{alabsubtext}
%
This definition is convoluted, because the notion of a scheduling step
makes sense only if the current state is not deadlocked, that is, the
scheduler can actually make progress\@.
%
\apar{2.4-8}
What kind of scheduling strategy delivers the ``best'' avoidance
probabilities\@? Given that the scheduler cannot predict the results of
the independent equiprobable random choices made by the system, the
best it can do is to maximize its probability of avoiding the target
set\@.
%
\begin{alabsubtext}{Proposition}{2.5}{}
%
The following avoidance probabilities are tight upper bounds for the
avoidance probabilities of all schedulers\@. Let \(q\in Q\)\@. For
\(i=0\), or \(\sigma(q)\) being empty:
%
\[
p_i(q)=\condval{%
0 & \text{if } q\in T\\
1 & \text{if } q\notin T\\
}
\]
\sentence
%
For \(\sigma(q)\) being nonempty:
%
\[
p_{i+1}(q)=\max_{S\in\sigma(q)}
\left(\frac{1}{\setsize{S}}\sum_{q'\in S}p_i(q')\right)
\]
\sentence
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Proposition}{2.5}{}
\prooffontshape
%
The scheduler is attempting to stay away from the target set as long
as possible\@. If it can make progress, then to avoid the target set
\(T\) for at least \(i+1\) steps, it should make a choice from
\(\Gamma(q)\) that leads to a set of next states \(S\in\sigma(q)\)
that has maximum probability of avoiding the target set for at least
\(i\) steps, or that leads to a deadlock state not in~\(T\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Proposition 2.5}}
\end{alabsubtext}
%
\apar{2.4-9}
We can now give a sufficient condition for nonstabilization\@.
%
\begin{alabsubtext}{Proposition}{2.6}{}
%
Let \(n=\setsize{Q}\)\@. Let \(q\in Q\) be such that \(p_{n}(q)=1\)\@. Then
\(p_{i}(q)=1\) for all \(i\ge 0\), and the scheduler can prevent the
system from stabilizing when started in state~\(q\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Proposition}{2.6}{}
\prooffontshape
%
\apar{Prove Prop 2.6-1}
Consider how the computation proceeds from state \(q\) for \(n\)
activations\@. The scheduler makes choices of which processors to
activate\@. There may be many possible successor states resulting from
the choice, so the possible future of the computation for the next
\(n\) steps can be described by a tree of depth~\({\le n}\)\@.
%
\apar{Prove Prop 2.6-2}
Consider any path from root \(q\) to a leaf \(l\)\@. Either this path
ends in a deadlocked state, or it has length \(n\)\@. If the path ends
in a deadlocked state, that state must not be in \(T\), because
\(p_{n}(q)=1\)\@. If the path does not end in a deadlocked state, then it
has length \(n\), and so it contains \(n+1\) states\@. By the
``pigeon-hole principle,'' there must be two identical states \(q'\)
on the path\@. Break the transition to the deeper instance of \(q'\),
and connect it to the one closer to the root\@. This transformation can
be performed for each leaf in the tree, and the result is a graph
whose only leaf nodes are deadlocked states not in \(T\)\@. All future
state changes remain in the graph\@.
%
\apar{Prove Prop 2.6-3}
Thus the scheduler can avoid \(T\) forever starting in state \(q\),
and thus \(p_{i}(q)=1\) for all \({i\ge 0}\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Proposition 2.6}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{2.7}{}
%
For all \(q\in Q\), \(i\ge 0\), we have \(0\le p_{i}(q)\le 1\), and
\({p_{i}(q)\ge p_{i+1}(q)}\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{2.7}{}
\prooffontshape
%
By induction on \(i\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 2.7}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{2.8}{}
%
Suppose that there exists an \(n>0\) such that for every \(q\in Q\),
\(p_{n}(q)<1\)\@. Let \(p=\max_{q\in Q}({p_{n}(q)})\)\@. Then for all
\(q\in Q\), \(i>0\), \({p_{ni}(q)\le p^{i}}\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{2.8}{}
\prooffontshape
%
Consider avoiding \(T\) for segments of \(n\) steps\@. Now the
probability of avoiding \(T\) for \(\ge n\) steps is \(\le p\)\@. Given
that \(T\) has been avoided for \(n\) steps, by
Lemma~2.7, avoiding \(T\) for another \(n\) steps or
more has probability at most \(p\)\@. Thus, the probability of avoiding
\(T\) for \(\ge 2n\) steps is \(\le p^{2}\), and by induction, the
probability of avoiding \(T\) for \(\ge ni\) steps is~\({\le p^{i}}\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 2.8}}
\end{alabsubtext}
%
\apar{2.4-10}
We can now give a sufficient condition for self-stabilization\@.
%
\begin{alabsubtext}{Proposition}{2.9}{}
%
Let \(Q\) be the state space of a system with target set \(T\)\@.
Suppose \(T\) is closed\@. If there exists an \(n\),
\(0\le n\le\setsize{Q}\), such that for every \(q\in Q\),
\(p_{n}(q)<1\), then the system is self-stabilizing\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Proposition}{2.9}{}
\prooffontshape
%
If \(n=0\), then \(Q=T\) and the system is trivially self-stabilizing\@.
Suppose that there exists an \(n>0\) such that for every \(q\in Q\),
\(p_{n}(q)<1\)\@. By Proposition~2.6, if such an \(n\)
exists, it must be \(\le\setsize{Q}\)\@. Then by Lemma~2.8 and
Lemma~2.7, for every \({q\in Q}\),
%
\[\lim_{i\rightarrow\infty}p_{i}(q)=0\]
\sentence
%
Thus, the probability of reaching the target set \(T\) from any state
is 1, and the system is self-stabilizing\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Proposition 2.9}}
\end{alabsubtext}
%
\apar{2.4-11}
Propositions~2.6 and~2.9 combine to give a necessary and sufficient
condition for self-stabilization\@.
%
\begin{alabsubtext}{Theorem}{2.10}{}
%
Let \(Q\) be the state space of a system with target set \(T\)\@.
Suppose \(T\) is closed\@. There exists an \(n\), \(0\le n\le\setsize{Q}\),
such that for every \(q\in Q\), \(p_{n}(q)<1\) iff the system is
self-stabilizing\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{2.10}{}
\prooffontshape
%
Observe that the negation of the conditions for Proposition~2.6 imply
the conditions for Proposition~2.9, and vice-versa\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Theorem 2.10}}
\end{alabsubtext}
%
\asubsection{2.5}{Expected Time to Self-Stabilization}
%
\apar{2.5-1}
We now consider how to estimate the expected time that the scheduler
can avoid the target set when starting at some state \(q\)\@. The
expected path length before intersecting the target set \(T\)~is
%
\[
l(q)=\sum_{i=0}^{\infty}ip_{i}(q)(1-p_{i+1}(q))
\]
\sentence
%
This comes from observing that the probability that \(T\) can be
avoided for exactly \(i\) steps is the probability that it can be
avoided for at least \(i\) steps times the probability that it cannot
be avoided for at least \(i+1\) steps\@.
%
\apar{2.5-2}
By directly computing the \(p_{i}(q)\), \(0\le i\le n\), we can obtain
a value for the initial part of the summation\@.
%
\[
\sum_{i=0}^{n-1}ip_{i}(q)(1-p_{i+1}(q))
\]
And we need an estimate for the tail
\[
e_{n}(q)=\sum_{i=n}^{\infty}ip_{i}(q)(1-p_{i+1}(q))
\]
\sentence
%
\begin{alabsubtext}{Proposition}{2.11}{}
%
Let \(n\) be such that for all \(q\), \(p_{n}(q)<1\)\@. Let
\(p=\max_{q\in Q}{p_n(q)}\)\@. Then
%
\[e_{n}(q)=\sum_{i=n}^{\infty}ip_{i}(q)(1-p_{i+1}(q))\le
\frac{np(3n+p-1-np)}{2(1-p)^{2}}\]
\sentence
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Proposition}{2.11}{}
\prooffontshape
%
We have
%
\[e_{n}(q)\le\sum_{i=n}^{\infty}ip_{i}(q)\]
\sentence
%
By Lemma~2.8, the probability of avoiding \(T\) for
\(\ge i\) steps is \(\le p^{\floor{\frac{i}{n}}}\)\@. Thus
%
\begin{align*}
%
e_{n}(q) &\le\sum_{i=1}^{\infty}ip^{\floor{\frac{i}{n}}}\\
%
&=\sum_{i=1}^{\infty}\sum_{j=0}^{n-1}(ni+j)p^{\floor{\frac{ni+j}{n}}}\\
%
&=\sum_{i=1}^{\infty}\sum_{j=0}^{n-1}(ni+j)p^{i}\\
%
&=\sum_{i=1}^{\infty}p^{i}\sum_{j=0}^{n-1}ni+j\\
%
&=\sum_{i=1}^{\infty}p^{i}(n^{2}i+n(n-1)/2)\\
%
&=n^{2}(\sum_{i=1}^{\infty}ip^{i})+n(n-1)/2(\sum_{i=1}^{\infty}p^{i})\\
%
&=\frac{n^{2}p}{(1-p)^{2}}+\frac{n(n-1)p}{2(1-p)}\\
%
&=\frac{np(3n+p-1-np)}{2(1-p)^2}
%
\end{align*}
%
as required\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Proposition 2.11}}
\end{alabsubtext}
%
\asubsection{2.6}{The Scheduler-Luck Game}
%
\apar{2.6-1}
Another approach to reasoning about self-stabilization is with the
scheduler-luck game of Dolev, Israeli, and Moran \cite{cj96-05-10}\@.
Self-stabilization is viewed as a game in which the players, scheduler
and luck, take turns\@. The scheduler has control over which processor
is activated, while luck can control the outcomes of the random coin
tosses used by processors\@. The scheduler must be fair, but is
otherwise unrestricted\@. Luck need not intervene to affect the outcome
of a coin toss\@.
%
\apar{2.6-2}
The scheduler is attempting to prevent entry into the target set,
while luck is attempting to force entry by intervening to avoid
undesirable random state transitions\@. Self-stabilization is
established by finding a winning strategy for luck\@. That is, luck has
to be able to force a path from any member of the initial-state set to
some member of the target state\@. Luck is said to have an
\((f,r)\)-strategy iff for every initial state and every scheduler,
the system reaches the target set in expected number of at most \(r\)
rounds, and at most \(f\) interventions of luck\@. The scheduler-luck
game provides an upper bound on the time to stabilize: if an
\((f,r)\)-strategy exists, then the system will stabilize from that
initial state in at most \(r2^{f}\) expected number of rounds\@.
%
\apar{2.6-3}
In the usual application of the scheduler-luck model, one determines a
strategy for luck and then proves that it is a winning one\@. This can
be quite difficult, since the difference between a system that is
obviously self-stabilizing and one that is not is often quite subtle
(see Section~3.1 for an example)\@.
%
\apar{2.6-4}
On the other hand, one could take a state-space-exploration approach
to the scheduler-luck game\@. It would require a search to show that
luck, from every initial state, could intervene to force the path to
reach the target set\@. It is not obvious how to determine the minimum
number of interventions by luck in order to minimize the upper bound
on time to stabilize\@.
%
\asection{3}{Token Passing in the Standard~ Model}
%
\apar{3-1}
The token-passing problem is to find a protocol for a pair of
processors such that exactly one token is continuously passed between
the processors\@. We will apply the state-exploration method of
Section~2.4 to show that token passing has a uniform
self-stabilizing solution under the read/write demon\@. Although this
result stands on its own, it will be key to the ultimate
implementation of the orientation protocol of Section~6\@.
%
\apar{3-2}
This particular algorithm, in minor variations, appears in
\cite{cj96-05-16} and in \cite{cj96-05-15}, but this is the first
complete proof for the case of the read/write demon\@. A nonuniform
version of this problem appears in \cite{cj96-05-09}\@. In that proof,
the processors run different programs, but the system is also
self-stabilizing under the read/write demon\@.
%
\apar{3-3}
Consider a pair of processors, executing the protocol of
Figure~1\@. The port alphabet is \(\{W,P,R\}\),
with the intuition behind the symbols as follows: at an input port, a
\(W\) means that the generating processor is waiting to receive the
token; a \(P\) means that it is in the process of passing the token to
the other processor; and an \(R\) means that it has received the token
and is willing to pass it back\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{algorithm}{informal}{}
%
\begin{progstmtlist}
%
\item \(\idr{mode}\oftype\{R,W,P\}\) \comment{Current state of protocol}
%
\item \(\idr{inport}\oftype\{R,W,P\}\) \comment{Input port}
%
\item \(\idr{outport}\oftype\{R,W,P\}\) \comment{Output port}
%
\end{progstmtlist}
%
\begin{progstmtlist}
%
\item \kwd{do forever} \{
%
\begin{progstmtlist}
%
\item \comment{Read Phase}
%
\begin{progstmtlist}
%
\item \kwd{read} \(\idr{inport}\);
%
\end{progstmtlist}
%
\item \comment{Write Phase}
%
\item \comment{Compute next mode from the table below}
%
{\renewcommand{\multirowsetup}{\relax}
\newlength{\lablntha}\settowidth{\lablntha}{Next Mode}
\begin{tabular}{|rr|ccc|}
%
\hline
\multicolumn{2}{|c|}{\multirow{2}{\lablntha}{Next Mode}} &
\multicolumn{3}{c|}{Input}\\
%
& & \(W\) & \(P\) & \(R\)\\
\hline
%
\multirow{3}{4em}{Current Mode} & \(W\) & \((W,P,R)\) & \(R\) & \(W\)\\
%
& \(P\) & \(P\) & \((W,P,R)\) & \(W\)\\
%
& \(R\) & \(P\) & \(R\) & \((W,P,R)\)\\
\hline
%
\end{tabular}
}
%
\begin{progstmtlist}
%
\item \kwd{if} \((\idr{mode}\equtest\idr{inport})\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned\progfunc{random}\{W,P,R\}\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest W\progand\idr{inport}\equtest P)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned R\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest P\progand\idr{inport}\equtest R)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned W\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest R\progand\idr{inport}\equtest W)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned P\);
%
\end{progstmtlist}
%
\item \}
%
\item \kwd{write} \(\idr{mode}\) \kwd{to} \(\idr{outport}\);
%
\end{progstmtlist}
%
\end{progstmtlist}
%
\item \}
%
\end{progstmtlist}
%
\end{algorithm}
}}
%
\afigcap{1}{Pair token-passing protocol}
%
\end{figure*}
%
The intent is that two processors executing this protocol will, when
stabilized, go through the cycle of modes:
\[WP,RP,RW,PW,PR,WR,WP,\dots\]
\sentence
%
\apar{3-4}
We now analyze the behavior of the scheduling demon in terms of this
pair of processors\@. First, we have to identify the state space\@. Each
processor has a mode, the contents of its port registers, and a
position in the program\@. We examine where the processor actually
interacts with its neighbor, and consider the program position as two
phases\@. The read phase is the single instruction that reads the input
port\@. The write phase consists of the sequence of instructions that
computes the next value of \algo{informal}{}{\(\idr{mode}\)} and writes
it to the output port\@. For the purposes of the scheduling demon, we
can consider the phases to be atomic operations, as each phase
contains exactly one information exchange with the neighboring
processor\@. Note that the read and write phases are serializable with
respect to processor activations\@.
%
\apar{3-5}
The state of an individual processor can then be captured as a triple
%
\[\procstate{\algo{informal}{}{\idr{phase}},
\algo{informal}{}{\idr{inport}},
\algo{informal}{}{\idr{mode}}}\]
%
where \(\algo{informal}{}{\idr{phase}}\in \{r,w\}\) indicates the
phase that is next to be executed on activation,
\(\algo{informal}{}{\idr{inport}}\in\{R,W,P\}\) is the current
contents of the input port, and
\(\algo{informal}{}{\idr{mode}}\in\{R,W,P\}\) is the current value of
the mode\@.
%
\apar{3-6}
The state of the system is the pair of states of the processors\@. The
state space \(Q\) is all possible such pairs of processor states\@. The
\(\sigma\) function of Section~2.4 is then defined
under the assumption that scheduling is being done by a read/write
demon\@.
%
\begin{sloppypar}
\begin{alabsubtext}{Proposition}{3.1}{}
%
The token-passing algorithm is self-stabilizing under the read/write
scheduling demon\@. The expected number of activations before
stabilization is at most 166, and the average over all states is at
most~81\@.
%
\end{alabsubtext}
\end{sloppypar}
%
\begin{alabsubtext}{Proof of Proposition}{3.1}{}
\prooffontshape
%
\apar{Prove Prop 3.1-1}
We prove self-stabilization by a computation that establishes the
conditions of Theorem~2.10\@. This is performed by the C code of
\filename{explore.c}\@. The results of running this code, given in
\filename{explore.out}, substantiate the claim\@. These files, plus
supporting materials, are provided alongside this article in the
journal archives (see the Appendix)\@.
%
\apar{Prove Prop 3.1-2}
What remains to be argued is the correctness of the program\@. This
issue has two components: the correctness of the overall algorithm,
and the correctness of the specific portions related to the
token-passing problem\@.
%
\apar{Prove Prop 3.1-3}
The overall algorithm is quite simple:
%
\begin{enumerate}
%
\item[1.] Define the problem-specific state-space data, \(Q\) and \(\sigma\)\@.
%
\item[2.] Verify that there are no deadlocks (as per Definition~2.3)\@.
%
\item[3.] Define the problem-specific target set, \(T\)\@.
%
\item[4.] Compute avoidance probabilities (as per Proposition~2.5), and
expected times to stabilization (as per Proposition~2.11)\@.
%
\end{enumerate}
%
The implementation of the overall algorithm (i.e., \filename{explore.h},
\filename{explore.c}) requires problem-specific information, that is, \(Q\),
\(\sigma\), and \(T\)\@. Assuming the correctness of this data, verifying
that the main exploration algorithm is correct is a matter of a
straightforward code walk-through of the simple computations in
steps 2 and~4\@.
%
\apar{Prove Prop 3.1-4}
The C code for generating the problem-specific data, given in
\filename{problem.h}, \filename{problem.c}, is not particularly subtle
in design or implementation\@. Its verification involves the following
main components:
%
\begin{enumerate}
%
\item[1.] The state of an individual processor is defined, and then the
state of the system is defined as a pair of processor states\@. One
must ensure that the states of the system can be mapped 1-1 onto
indexes in the range \(0\dots\setsize{Q}-1\)\@. The conversions between
states and indexes are simple modular arithmetic\@.
%
\item[2.] It must be checked that the function
\algo{C}{}{\idr{next\_proc\_state}} properly captures
the result of activating one processor executing the program of
Figure~1\@.
%
\item[3.] The \algo{C}{}{\idr{progress}} function must be checked for a
proper implementation of the \(\statepredicate{progress}\) predicate\@.
%
\item[4.] The function \algo{C}{}{\idr{sigma\_fn}} must be checked for
a proper implementation of the \(\sigma\) function\@.
%
\item[5.] One must verify that the function
\algo{C}{}{\idr{compute\_target}} follows a cycle in the state space\@.
%
\end{enumerate}
\sentence
%
All of these functions have simple structures and trivial invariants\@.
%
\apar{Prove Prop 3.1-5}
The final step is to verify that the target set determined (and
output) by \algo{C}{}{\idr{compute\_target}} really is that of the pair
token-passing protocol\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Proposition 3.1}}
\end{alabsubtext}
%
\asubsection{3.1}{Remarks on Computational Verification}
%
\apar{3.1-1}
First, we note that if instead of randomly choosing from three modes,
we select from \(\{R,P\}\) (or any other two of the three), then the
resulting protocol is still self-stabilizing, but the maximum expected
stabilization time increases to 350, and the average is 142 steps\@.
%
\apar{3.1-2}
The subtlety of the token-passing problem and the advantage of
mechanical state exploration are illustrated by one of our initial
attempts at a protocol:
%
\begin{center}
%
{\renewcommand{\multirowsetup}{\relax}
\newlength{\lablnthb}\settowidth{\lablnthb}{Next Mode}
\begin{tabular}{|rr|ccc|}
%
\hline
\multicolumn{2}{|c|}{\multirow{2}{\lablnthb}{Next Mode}} &
\multicolumn{3}{c|}{Input}\\
%
& & \(W\) & \(P\) & \(R\)\\
\hline
%
\multirow{3}{4em}{Current Mode} & \(W\) & \((W,R)\) & \(R\) & \(W\)\\
%
& \(P\) & \(P\) & \(W\) & \(W\)\\
%
& \(R\) & \(P\) & \(R\) & \(W\)\\
\hline
%
\end{tabular}
}
%
\end{center}
\sentence
%
The intuition here is that arbitrations only occur in one state\@. This
protocol was proven correct by hand, but, in fact, it does not
self-stabilize\@. An instrumented version of the state-space exploration
program found the counter-example strategy of Figure~2\@. This protocol
does work if the random choice from \(\{W,R\}\) is replaced by one
from \(\{W,R,P\}\)\@. In this case, the maximum expected stabilization
time is 597 activations, and the average over all starting states
is~296\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{center}
%
\begin{tabular}{|rlll|}
\hline
%
Label & \multicolumn{2}{c}{State} & Action \\
& \multicolumn{1}{c}{\(A\)} & \multicolumn{1}{c}{\(B\)} & \\
\hline
%
174: & \(wRR\) & \(wWR\) & Activate \(B\) \\
%
167: & \(wRR\) & \(rWP\) & Activate \(B\)\\
%
173: & \(wRR\) & \(wRP\) & Activate \(A\)\\
%
29: & \(rRW\) & \(wRP\) & Activate \(A\)\\
%
299: & \(wPW\) & \(wRP\) & Activate \(B\)\\
%
289: & \(wPW\) & \(rRW\) & Activate \(B\)\\
%
301: & \(wPW\) & \(wWW\) & Activate \(B\), randomly goto 291 or 292\\
%
292: & \(wPW\) & \(rWW\) & Activate \(B\), goto 301\\
%
291: & \(wPW\) & \(rWR\) & Activate \(A\)\\
%
300: & \(wPW\) & \(wWR\) & Activate \(A\)\\
%
120: & \(rPR\) & \(wWR\) & Activate \(A\), goto 174\\
\hline
%
\end{tabular}
%
\end{center}
%
The current state is for a pair of processors, \(A\) and~\(B\)\@.
The triple in the state of the processor indicates the
\(\procstate{\algo{informal}{}{\idr{phase}},\algo{informal}{}{\idr{inport}},
\algo{informal}{}{\idr{mode}}}\) as described above\@. All
transitions go to the next row of the table unless indicated by a
goto\@. The state labels are the same ones as generated by the
exploration code\@.
}}
%
\afigcap{2}{Counterexample for bad pair token-passing protocol}
%
\end{figure*}
%
\apar{3.1-3}
One concern of the exploration method is that state spaces grow
exponentially, and so exploring them quickly becomes infeasible\@. But
large problems are usually solved on a custom abstract machine with
primitive operations tailored to the task at hand\@. The custom machine
simplifies the design of, and reasoning about, the algorithm that
solves the problem\@. It is typically not nearly as rich in terms of
scheduling possibilities as the standard model, and so is easier to
reason about\@. Of course, the custom machine then must be implemented
correctly on that standard model of comutation\@. For example, in this
paper, token passing is used to implement the edge-arbitration
primitive of the custom machine for the orientation protocol\@. By
building a custom model of computation, full state-space exploration
is then reserved for the task of verifying the implementation of the
custom machine's primitives on the standard model\@. In these cases, the
state space is much smaller and more tractable to explore\@.
%
\apar{3.1-4}
Also, from our limited observations, if a system does stabilize, the
conditions for Theorem~2.10 are satisfied for small values of \(n\)
relative to the size of the state space\@.
%
\apar{3.1-5}
As a final note, one could raise the issue of the correctness of the
program used in this kind of proof\@. But in practice, it is no more
difficult to check the correctness of a program than it is to check
the correctness of a proof\@. Like programs, proofs that are well
structured with simple components are easy to check, while baroque,
convoluted ones are not\@. Once verified, a program for testing
self-stabilization can be used on many different instances of the
problem\@.
%
\asection{4}{Overview of Unicyclic Network Orientation}
%
\apar{4-1}
The notion of orienting a ring can be generalized to orienting any
unicyclic graph, that is, a ring with attached trees\@. Consider a
processor network with vertices of possibly different degree\@. To each
vertex \(v\) of degree \(k\) is assigned an \emph{orientation}
\(\vertorientation(v)\in\{0,\dots,k-1\}\)\@. It is convenient to
visualize each vertex as having an internal pointer whose head is
directed toward port \(\vertorientation(v)\)\@. The orientation of a
vertex \(v\) is generally a function of the state of the processor
\(P_{v}\)\@. The orientation of the vertices at the ends of an edge
induces a state for that edge\@. We say that an edge is \emph{oriented}
when exactly one of the vertices at the ends of each edge is oriented
toward that edge\@.
%
\apar{4-2}
We say that a network is \emph{oriented} iff every edge is oriented\@.
There must be exactly as many vertices as edges for a network to be
orientable, so such a network consists of a single ring with trees
attached\@. There are precisely two ways to orient the ring, and it is
easy to show that each edge outside of the ring must be oriented to
point toward the ring\@. Such an oriented network is illustrated in
Figure~3\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{center}
\setlength{\unitlength}{2.8ex}
%
\begin{picture}(30,10)(0,3)
%
\put(6,4){\circle{2}}
\put(6.5,5.0){\(0\)}
\put(5.75,3.75){\vector(1,1){.7}}
%
\put(6,12){\circle{2}}
\put(6.5,10.5){\(0\)}
\put(5.75,12.25){\vector(1,-1){.7}}
%
\put(10,8){\circle{2}}
\put(8.5,8.5){\(0\)}
\put(8.5,7.0){\(1\)}
\put(11.2,8.2){\(2\)}
\put(9.75,8){\vector(1,0){.9}}
%
\put(15,8){\circle{2}}
\put(13.5,8.2){\(1\)}
\put(16.2,8.5){\(0\)}
\put(16.2,7.0){\(2\)}
\put(14.75,7.75){\vector(1,1){.7}}
%
\put(19,12){\circle{2}}
\put(18.3,10.3){\(1\)}
\put(19.5,10.3){\(0\)}
\put(18.75,12.25){\vector(1,-1){.7}}
%
\put(19,4){\circle{2}}
\put(18.3,5.3){\(0\)}
\put(19.5,5.3){\(1\)}
\put(19.25,3.75){\vector(-1,1){.7}}
%
\put(23,8){\circle{2}}
\put(21.5,8.5){\(0\)}
\put(21.5,7.0){\(1\)}
\put(23.25,8.25){\vector(-1,-1){.7}}
%
\put(6.7,4.7){\line(1,1){2.6}}
\put(6.7,11.3){\line(1,-1){2.6}}
\put(11,8){\line(1,0){3}}
\put(15.7,8.7){\line(1,1){2.6}}
\put(15.7,7.3){\line(1,-1){2.6}}
\put(19.7,11.3){\line(1,-1){2.6}}
\put(19.7,4.7){\line(1,1){2.6}}
%
\end{picture}
%
\end{center}
}}
%
\afigcap{3}{An oriented network}
%
\end{figure*}
%
\apar{4-3}
In general terms, the \emph{orientation problem} is to specify a
protocol whose execution, given an initial configuration of
orientations of vertices, eventually results in an oriented network\@.
We say that an orientation protocol is \emph{correct} under a given
computation model if it orients every permissible initial
configuration of every permissible network topology\@.
%
\apar{4-4}
There are obvious scenarios in which the orientation problem is solved
by the simple expedient of orienting the network at construction time\@.
We are not interested in these\@. Nor are we interested in situations
that use an elected leader to specify the orientation\@.
%
\apar{4-5}
We are interested in \emph{uniform} orientation protocols in which the
processors have no IDs and no idea of the global network topology\@.
That is, the protocol computation by the processor at a vertex is a
function only of the degree of the vertex\@. But we achieve an even more
restrictive notion of uniformity\@. Although different-degree processors
are clearly executing different programs, they do not communicate this
fact to their neighbors\@. That is, by observing an edge, it is not
possible to determine the degree of either processor at the end of the
edge\@.
%
\apar{4-6}
It is the fact that such a simple protocol works for the
generalization of rings to unicyclic networks, and does so under an
even more restricted notion of uniformity, that makes this problem
interesting\@.
%
\apar{4-7}
We will present our orientation protocol first as a high-level
protocol executed on a custom abstract machine\@. Then we will show how
to implement the custom machine under our standard model with its
read/write scheduling demon\@.
%
\apar{4-8}
The custom machine for the high-level protocol will be a network of
finite-state machines in which the state of an edge will cause its
pair of adjacent machines to make simultaneous state transitions\@. We
will show that this protocol is correct and has the claimed
performance\@. The correctness of the high-level protocol will be
established by standard proof methods\@.
%
\apar{4-9}
We do not explicitly use randomness in the protocol on the custom
machine\@. Instead, we assume that there is a primitive operation that
arbitrates a certain kind of conflict between adjacent processors\@. Of
course, the fact that randomness is required eventually is due to the
deterministic impossibility result for rings of
\cite{cj96-05-17}\@. Section~6 will show how to implement the custom
machine on the standard model by using the token-passing protocol of
Section~3\@.
%
\asection{5}{\protect\raggedright The High-Level Protocol and Custom
\nohyphenl{Machine}}
%
\apar{5-1}
The high-level protocol will be described using a simpler computation
model than is used for token passing\@. In this custom model of
computation, the processors in the network are finite-state machines
with a somewhat unusual scheduling demon\@. At each step of the
computation, the scheduler examines the \emph{edges} of the network,
and selects a \emph{pair} of adjacent processors that satisfy the
conditions of the protocol (described in detail in
Figure~5)\@. It then simultaneously changes the states of
both processors\@. Each application of the protocol to a current
configuration of two adjacent processors which produces a next
configuration of the two processors is termed an \emph{interaction}\@.
If no interactions are possible on any edge, then the high-level
protocol is said to be \emph{deadlocked}\@.
%
\apar{5-2}
We now describe the processors appearing at each vertex\@. In addition
to an orientation as described above, each processor has a \emph{mode}
that is either \(P\) or \(W\)\@. We denote the mode of the vertex \(v\) by
\(\vertmode(v)\)\@. The mode \(P\) stands for \emph{passing mode}, which can be
thought of as the processor being in possession of a token and wanting
to pass it to the neighbor it is oriented toward\@. The mode \(W\)
stands for \emph{waiting mode}, which can be thought of as the
processor waiting for a token to appear\@.
%
\apar{5-3}
This induces a condition for each edge described by the orientation and
mode of the processors at its ends\@. Figure~4 defines the various
states of an edge\@. An edge has two ways of being oriented, and two ways
of being disoriented\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{center}
%
\begin{tabular}{|l|c|r|}
\hline
Edge Name & Condition & \multicolumn{1}{c|}{Pictograph}\\
\hline
\hline
Properly Oriented Edge &
%
\(\begin{aligned}
%
\vertorientation(v) &=i\\
%
\vertmode(v) &=W\\
%
\vertorientation(w) &\ne j
%
\end{aligned}\)
%
& \(\rightportstate{W}\orientededge{}\rightportstate{}\)\\
\hline
%
Improperly Oriented Edge &
%
\(\begin{aligned}
%
\vertorientation(v) &=i\\
%
\vertmode(v) &=P\\
%
\vertorientation(w) &\ne j
%
\end{aligned}\)
%
& \(\rightportstate{P}\orientededge{}\rightportstate{}\) \\
\hline
Properly Disoriented Edge &
%
\(\begin{aligned}
%
\vertorientation(v) &=i\\
%
\vertmode(v) &\ne m(w)\\
%
\vertorientation(w) &=j
%
\end{aligned}\)
%
& \(\rightportstate{P} \orientededge{} \leftportstate{W}\)\\
\hline
%
Improperly Disoriented Edge &
%
\(\begin{aligned}
%
\vertorientation(v) &=i\\
%
\vertmode(v) &=m(w)\\
%
\vertorientation(w) &=j
%
\end{aligned}\)
%
&
%
\(\begin{gathered}
%
\rightportstate{P}\orientededge{}\leftportstate{P}\\
%
\text{or}\\
%
\rightportstate{W}\orientededge{}\leftportstate{W}
%
\end{gathered}\)
%
\\
\hline
%
Ignored Edge &
%
\(\begin{aligned}
%
\vertorientation(v) &\ne i\\
%
\vertorientation(w) &\ne j
%
\end{aligned}\)
& \(\leftportstate{} \orientededge{} \rightportstate{}\) \\
\hline
\end{tabular}
%
\end{center}
}}
%
\afigcap{4}{States of an edge \protect\(e=((v,i),(w,j))\protect\)}
%
\end{figure*}
%
The protocol is designed so that processors will only interact with
the neighbor they are oriented toward, and thus the rules for the
protocol are very simple: \emph{all progress toward orientation occurs
at disoriented edges}\@.
%
\apar{5-4}
Suppose that \(e=((v,i),(w,j))\) is a disoriented edge\@. The protocol
at \(e\) is described by Figure~5\@. Note that the protocol applies even
when one of the vertices is a leaf\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{center}
%
\begin{tabular}{|c|c|}
%
\hline
Current Configuration & Next Configuration\\
\hline
\hline
%
\(\begin{gathered}
%
\vertorientation(v)=i\text{, }\vertmode(v)=P\\
%
\vertorientation(w)=j\text{, }\vertmode(w)=W
%
\end{gathered}\)
&
%
\(\begin{gathered}
%
\vertorientation(v)=i\text{, }m(v)=W\\
%
\vertorientation(w)=(j+1)\bmod\deg(w)\text{, }\vertmode(w)=P
%
\end{gathered}\)
\\[5ex]
%
\(\rightportstate{P}\orientededge{}\leftportstate{W}\) &
\(\rightportstate{W}\orientededge{}\rightportstate{P}\)\\
%
\(\rightportstate{W}\orientededge{}\leftportstate{P}\) &
\(\leftportstate{P}\orientededge{}\leftportstate{W}\)\\
\hline
%
\(\begin{gathered}
%
\vertorientation(v)=i\text{, }\vertorientation(w)=j\\
%
\vertmode(v)=\vertmode(w)
%
\end{gathered}\)
&
\(\begin{gathered}
%
\vertorientation(v)=i\text{, }\vertorientation(w)=j\\
%
\vertmode(v)\ne\vertmode(w)\text{ (arbitrarily)}
%
\end{gathered}\)
\\[5ex]
%
\(\rightportstate{W}\orientededge{}\leftportstate{P}\) or
\(\rightportstate{P}\orientededge{}\leftportstate{W}\) &
\(\rightportstate{P}\orientededge{}\leftportstate{P}\) or
\(\rightportstate{W}\orientededge{}\leftportstate{W}\)\\
\hline
%
\end{tabular}
%
\end{center}
}}
%
\afigcap{5}{The high-level protocol at edge
\protect\(e=((v,i),(w,j))\protect\)}
%
\end{figure*}
%
\apar{5-5}
The behavior of the protocol depends on whether the edge is properly
or improperly disoriented\@. For a properly oriented edge, the passing
processor drops into waiting mode, and the waiting processor reorients
itself to its next port and enters passing mode\@. On the other hand,
for an improperly oriented edge, the mode conflicts must first be
\emph{arbitrated}, converting the edge into a properly oriented one\@.
We assume only that some arbitration mechanism exists\@. It need not be
fair\@. (In the implementation, this arbitration will be done
randomly\@.)
%
\apar{5-6}
The following is a direct consequence of the definition of the
high-level protocol\@.
%
\begin{alabsubtext}{Proposition}{5.1}{}
%
A network executing the high-level protocol is deadlocked if and only
if it is oriented\@.
%
\end{alabsubtext}
%
\apar{5-7}
We must now prove that every possible configuration of the network
eventually deadlocks\@. How does the protocol make progress\@? Consider a
possible execution of the protocol on a path as described by
Figure~6\@. For clarity, disoriented edges have been
indicated by \(\disorientededge{}\), and the edge marked with a
\(\interactionmark\) at each step is the interaction\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
\setlength{\tabcolsep}{\customtabcolsep}
%
\begin{tabular}{rc}
%
0: &
\(\rightportstate{P}\disorientededge{e_{1}\;\interactionmark}
\leftportstate{W}\orientededge{e_{2}}
\leftportstate{W}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}}
\leftportstate{W}\orientededge{e_{5}}
\rightportstate{W}\orientededge{e_{6}}
\rightportstate{W}\disorientededge{e_{7}}\leftportstate{P}\)
\\
\rule{0pt}{3ex}
1: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{P}\disorientededge{e_{2}\;\interactionmark}
\leftportstate{W}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}}
\leftportstate{W}\orientededge{e_{5}}
\rightportstate{W}\orientededge{e_{6}}
\rightportstate{W}\disorientededge{e_{7}}\leftportstate{P}\)
\\
\rule{0pt}{3ex}
2: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}}
\leftportstate{W}\orientededge{e_{5}}
\rightportstate{W}\orientededge{e_{6}}
\rightportstate{W}\disorientededge{e_{7}\;\interactionmark}\leftportstate{P}\)
\\
\rule{0pt}{3ex}
3: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}}
\leftportstate{W}\orientededge{e_{5}}
\rightportstate{W}\disorientededge{e_{6}\;\interactionmark}
\leftportstate{P}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
4: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}\;\interactionmark}
\leftportstate{W}\orientededge{e_{5}}
\leftportstate{P}\orientededge{e_{6}}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
5: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{P}\disorientededge{e_{4}\;\interactionmark}
\leftportstate{W}\orientededge{e_{5}}
\leftportstate{P}\orientededge{e_{6}}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
6: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\disorientededge{e_{4}\;\interactionmark}
\rightportstate{P}\orientededge{e_{5}}
\leftportstate{P}\orientededge{e_{6}}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
7: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\orientededge{e_{4}}
\rightportstate{P}\disorientededge{e_{5}\;\interactionmark}
\leftportstate{P}\orientededge{e_{6}}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
8: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\orientededge{e_{4}}
\rightportstate{P}\disorientededge{e_{5}\;\interactionmark}
\leftportstate{W}\orientededge{e_{6}}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
9: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\orientededge{e_{4}}
\rightportstate{W}\orientededge{e_{5}}
\rightportstate{P}\disorientededge{e_{6}\;\interactionmark}
\leftportstate{W}\orientededge{e_{7}}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
10: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\orientededge{e_{4}}
\rightportstate{W}\orientededge{e_{5}}
\rightportstate{W}\orientededge{e_{6}}
\rightportstate{P}\disorientededge{e_{7}\;\interactionmark}\leftportstate{W}\)
\\
\rule{0pt}{3ex}
11: &
\(\rightportstate{W}\orientededge{e_{1}}
\rightportstate{W}\orientededge{e_{2}}
\rightportstate{P}\orientededge{e_{3}}
\rightportstate{W}\orientededge{e_{4}}
\rightportstate{W}\orientededge{e_{5}}
\rightportstate{W}\orientededge{e_{6}}
\rightportstate{W}\orientededge{e_{7}}\rightportstate{P}\)
%
\end{tabular}
}}
%
\afigcap{6}{Example protocol execution}
%
\end{figure*}
%
\apar{5-8}
We can think of the protocol as transferring the disorientation of an
edge (e.g., \(e_{1}\) at Step~0) to an adjacent edge (e.g., \(e_{2}\)
at Step~1), leaving the first edge oriented\@. The disorientation state
keeps moving in its original direction until it either collides with
an ignored edge and is absorbed (e.g., \(e_{2}\) absorbed by \(e_{3}\)
when moving from Step~1 to Step~2), or reflects off of an improperly
disoriented edge (e.g., the disorientation initially at \(e_{7}\)
reflects off of the improper disorientation initially at \(e_{4}\))\@.
A collision with an ignored edge reduces the number of unoriented
edges, and so the protocol makes progress\@. Reflection off of an
improperly disoriented edge sometimes makes progress in orientation,
and always ensures that arbitration is never required again at that
edge\@.
%
\apar{5-9}
In addition to improperly oriented edges that may be present at the
beginning of execution, the reorientation of processors during
execution can create improperly oriented edges (e.g., \(e_{3}\) at
Step~2)\@. However, the definition of the protocol ensures the
following\@.
%
\begin{alabsubtext}{Lemma}{5.2}{}
%
During the execution of the protocol, at most one arbitration can
occur at each edge\@. Furthermore, the execution of the protocol cannot
generate any ignored edges\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.2}{}
\prooffontshape
%
Arbitration occurs only at
\(\rightportstate{P}\orientededge{}\leftportstate{P}\) or
\(\rightportstate{W}\orientededge{}\leftportstate{W}\) edges\@. In
either case, the arbitration results in either a
\(\rightportstate{P}\orientededge{}\leftportstate{W}\) or
\(\rightportstate{W}\orientededge{}\leftportstate{P}\) edge\@. Now the
state of an edge changes only when both processors are oriented toward
the edge\@. Assuming neither processor is a leaf, the next state of
\(\rightportstate{P}\orientededge{}\leftportstate{W}\) is
\(\rightportstate{W}\orientededge{}\rightportstate{P}\), and the next
state of
\(\rightportstate{W}\orientededge{}\leftportstate{P}\) is
\(\leftportstate{P}\orientededge{}\leftportstate{W}\)\@. The protocol
rules determine that a processor turns away from an edge only when it
is in \(W\) mode, and must enter into \(P\) mode after turning
away\@. Thus, the next state of the edge
\(\rightportstate{W}\orientededge{}\rightportstate{P}\) must be
\(\rightportstate{W}\orientededge{}\leftportstate{P}\), and the next
state of the edge
\(\leftportstate{P}\orientededge{}\leftportstate{W}\) must be
\(\rightportstate{P}\orientededge{}\leftportstate{W}\)\@. Thus, the
cases that result in an arbitration at the edge can never re-occur\@. We
have a slightly different behavior in the event that one processor is
a leaf (both cannot be)\@. Suppose the left processor is a leaf\@. Then
the next state of
\(\rightportstate{W}\orientededge{}\leftportstate{P}\) is
\(\rightportstate{P}\orientededge{}\rightportstate{W}\), and the same
argument against arbitration applies\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.2}}
\end{alabsubtext}
%
\apar{5-10}
Since arbitrations and reflections occur only at improperly
disoriented edges, there is an upper bound on the number of
reflections that can occur at an edge\@. Ignored edges are also
important, as they are points at which orientation conflicts are
resolved\@.
%
\apar{5-11}
It is convenient to reason about the orientation protocol's behavior
on a tree\@. An \emph{edge-rooted tree} \(T\) with root vertex \(v\) and
root edge \(e\) is constructed by taking a tree with root \(v\) and edge
\(e\) incident on \(v\), and deleting the vertex at the other end of \(e\)\@.
All of our trees will be edge-rooted, so we simply use the term
\emph{tree}\@. Figure~7 illustrates a prototypical tree
used in the proofs that follow\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{center}
\setlength{\unitlength}{.2in}
%
\begin{picture}(20,7)(0,3)
%
\put(9.8,9.9){\(v\)}
\put(13,10.5){\(e\)}
\put(4.8,4.9){\(v_{1}\)}
\put(14.3,4.9){\(v_{k-1}\)}
\put(6,7){\(e_{1}\)}
\put(13.5,7){\(e_{k-1}\)}
\put(9,4){{\Huge{\(\dots\)}}}
\put(4.5,3){\(T_{1}\)}
\put(14.5,3){\(T_{k-1}\)}
\put(11,10){\line(1,0){5}}
\put(10,10){\circle{2}}
\put(5,5){\circle{2}}
\put(15,5){\circle{2}}
\put(5.7,5.7){\line(1,1){3.6}}
\put(14.3,5.7){\line(-1,1){3.6}}
\put(2,2){\line(1,1){2.3}}
\put(2,2){\line(1,0){6}}
\put(8,2){\line(-1,1){2.3}}
\put(12,2){\line(1,1){2.3}}
\put(12,2){\line(1,0){6}}
\put(18,2){\line(-1,1){2.3}}
%
\end{picture}
%
\end{center}
}}
\afigcap{7}{A typical edge-rooted tree}
%
\end{figure*}
%
\apar{5-12}
The next two lemmas show that ignored and disoriented edges are
balanced in a network\@. Let \(\numberignored(G)\) and
\(\numberdisoriented(G)\) denote, respectively, the number of ignored
and disoriented edges in the network \(G\) (either proper or
improper)\@. When applied to a tree \(T\), the root edge of \(T\) is not
counted\@.
%
\begin{alabsubtext}{Lemma}{5.3}{}
%
Let \(T\) be a tree with root vertex \(v\) and root edge \(e\)\@. If
\(v\) is oriented toward \(e\), then
\(\numberignored(T)=\numberdisoriented(T)\)\@. If \(v\) is oriented
away from \(e\), then \(\numberignored(T)={\numberdisoriented(T)-1}\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.3}{}
\prooffontshape
%
\apar{Prove Lemma 5.3-1}
We proceed by induction on the size of \(T\)\@. For the case of \(T\)
being a single vertex (i.e., degree \(1\)), \(v\) must be oriented
toward \(e\), and we have
\(\numberignored(T)={\numberdisoriented(T)=0}\)\@.
%
\apar{Prove Lemma 5.3-2}
Suppose that vertex \(v\) has degree \(k>1\), and edges
\(e_{1},\dots,e_{k-1}\) in addition to \(e\)\@. Then \(T\) looks like
the tree of Figure~7\@.
%
\apar{Prove Lemma 5.3-3}
When \(v\) is oriented toward \(e\), each of the edges \(e_{l}\),
\(1\le l< k\), is either ignored or oriented\@. If \(e_{l}\) is oriented,
then \(v_{l}\) is oriented toward \(e_{l}\), and by induction
\(\numberignored(T_{l})=\numberdisoriented(T_{f}l)\)\@. If \(e_{l}\) is
ignored, then \(v_{l}\) is oriented away from \(e_{l}\), and by
induction
\(\numberignored(T_{l})=\numberdisoriented(T_{l})-1\)\@. Adding the
ignored edge \(e_{l}\) maintains the balance between ignored and
disoriented edges\@.
%
\apar{Prove Lemma 5.3-4}
When \(v\) is oriented away from \(e\), then it is oriented toward exactly
one edge \(e_{l}\), which is either oriented or disoriented\@. Balance is
maintained for the other subtrees as above\@. If \(e_{l}\) is oriented,
then \(v_{l}\) is oriented away from \(e_{l}\), and by induction
\(\numberignored(T_{l})=\numberdisoriented(T_{l})-1\), and so
\(\numberignored(T)=\numberdisoriented(T)-1\)\@. If \(e_{l}\) is
disoriented, then \(v_{l}\) is oriented toward \(e_{l}\), and by
induction
\(\numberignored(T_{l})=\numberdisoriented(T_{l})\)\@. Accounting for
the disoriented \(e_{l}\) we have
\(\numberignored(T)={\numberdisoriented(T)-1}\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.3}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{5.4}{}
%
Let \(G\) be a unicyclic network\@. Then
\({\numberignored(G)=\numberdisoriented(G)}\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.4}{}
\prooffontshape
%
\apar{Prove Lemma 5.4-1}
If \(G\) is oriented, then
\(\numberignored(G)=\numberdisoriented(G)=0\)\@. Suppose that \(G\) is
not oriented\@. Pick any edge \(e=((v,i),(w,j))\) on the ring of \(G\) and
cut it, attaching a leaf \(u\) to vertex \(w\) with the edge
\(f=((u,0),(w,j))\)\@. The net result is a tree \(T\) with root vertex
\(v\) and root edge~\(e\)\@.
%
\begin{sloppypar}
\apar{Prove Lemma 5.4-2}
If \(e\) was originally disoriented, then edge \(f\) will be disoriented,
and \(v\) will be directed toward \(e\)\@. That is, edge
\({e:\rightportstate{}\orientededge{}\leftportstate{}}\) becomes the
edge \({f:\rightportstate{}\orientededge{}\leftportstate{}}\), and
\(e\) becomes the root edge
\({\rightportstate{}\orientededge{}}\)\@. Applying
Lemma~5.3 to \(T\) we have
\(\numberignored(T)=\numberdisoriented(T)\)\@. Since the disoriented
edge \(f\) in \(T\) accounts for the originally disoriented edge
\(e\), we have balance for~\(G\)\@.
%
\apar{Prove Lemma 5.4-3}
If \(e\) was originally ignored, then edge \(f\) will be oriented, and
\(v\) will be directed away from \(e\)\@. That is, edge
\({e:~\leftportstate{}\orientededge{}\rightportstate{}}\) becomes the
edge \({f:~\rightportstate{}\orientededge{}\rightportstate{}}\), and
\(e\) becomes the root edge
\({\leftportstate{}\orientededge{}}\)\@. Applying Lemma~5.3 to \(T\),
we have \(\numberignored(T)=\numberdisoriented(T)-1\)\@. The oriented
edge \(f\) is not counted in \(T\), nor is the originally ignored edge
\(e\)\@. Accounting for \(e\) we have
\(\numberignored(G)=\numberdisoriented(G)\), and so we have balance
for~\(G\)\@.
\end{sloppypar}
%
\apar{Prove Lemma 5.4-4}
If \(e\) was originally oriented, we have one of the above cases\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.4}}
\end{alabsubtext}
%
\apar{5-13}
Since progress is made as disoriented edges move about the tree, we
need to know how they can interact\@. In a tree \(T\), we say that an edge
\(e\) is \emph{between} edges \(e_{1}\) and \(e_{2}\) if \(e\) lies on
a path between \(e_{1}\) and \(e_{2}\)\@. We say that a disoriented edge
\(e_{1}\) in tree \(T\) is \emph{covered} if there exists an ignored
edge between \(e_{1}\) and the root edge of~\(T\)\@.
%
\begin{alabsubtext}{Lemma}{5.5}{}
%
Let \(T\) be a tree with root vertex \(v\) and root edge \(e\)\@. If
\(v\) is oriented toward \(e\), then every disoriented edge in \(T\)
is covered\@. If \(v\) is oriented away from \(e\), then all but one
disoriented edge in \(T\) is covered\@. Furthermore, every ignored edge
covers some disoriented edge\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.5}{}
\prooffontshape
%
\apar{Prove Lemma 5.5-1}
We proceed by induction on the size of \(T\)\@. Assume that the lemma is
true for all trees of smaller size than \(T\)\@. For the case of \(T\)
being a single vertex (i.e., degree \(1\)), we have no disoriented edges\@.
%
\apar{Prove Lemma 5.5-2}
Suppose that vertex \(v\) has degree \(k>1\), and edges
\(e_{1},\dots,e_{k-1}\) in addition to \(e\)\@. Then \(T\) looks like
the tree of Figure~7\@.
%
\apar{Prove Lemma 5.5-3}
When \(v\) is oriented toward \(e\), each of the edges \(e_{l}\),
\(1\le l0\)\@. Since \(v\) is
oriented away from \(e\), then by Lemma~5.5 there is exactly one
disoriented edge \(f\) that is not covered\@. That is, \(f\) lies in the
subtree rooted at \(e\) obtained by cutting \(T\) at the ignored edges
nearest \(v\) on any path from \(v\) to a leaf\@. Call this tree
\(T_{f}\)\@. Suppose that \(k\) ignored edges were cut, resulting in
subtrees \(T_{1},\dots,T_{k}\), each of whose root edges was an
ignored edge of \(T\)\@. Now \(T_{f}\) does not contain any ignored
edges, and \(k\) ignored edges were destroyed by the cutting process,
so \(\numberignored(T)=k+\sum_{i=1}^{k}\numberignored(T_{i})\)\@. By
induction, the trees \(T_{i}\) can be partitioned into
\({\sum_{i=1}^{k}(1+\numberignored(T_{i}))}=\numberignored(T)\) subtrees
with the required properties\@. In particular, the root edge of each
subtree is an ignored edge in \(T\)\@. Since the root edge of \(T_{f}\)
is \(e\), and \(T_{f}\) contains the disoriented edge \(f\), we have
the required partition into \(1+\numberignored(T)\) subtrees\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.6}}
\end{alabsubtext}
%
\apar{5-15}
Now consider how a single proper disorientation moves about a tree of
otherwise properly oriented edges\@. By Lemma~5.3, there
is one disoriented edge and no ignored edges, thus the root vertex is
oriented away from the root edge\@. The protocol forces the disoriented
edge to move about the tree in a depth-first order that is induced by
the port numbers at each vertex\@. For the typical tree (Figure~7),
suppose that \(e_{1}\) is properly disoriented with \(v\) in passing
mode \(P\)\@. The disorientation moves from \(e_{1}\) into subtree
\(T_{1}\), moves about \(T_{1}\) in depth-first order, and returns to
\(e_{1}\) with \(v_{1}\) in mode \(P\) and \(v\) in waiting mode
\(W\)\@. The disorientation then passes to \(e_{2}\)\@. This process
continues until \(v\) is oriented toward \(e\) in mode \(P\)\@. That is,
the disorientation has moved out of \(T\)\@. We call the sequence of
edges that a disorientation follows as it depth-first searches the
tree a \emph{trip}\@.
%
\apar{5-16}
Two things can affect the trip that a disorientation takes in an
arbitrary tree\@. One is encountering an ignored edge\@. When this
happens, the protocol replaces the ignored and disoriented edges with
properly oriented ones, and the trip terminates\@. The other possibility
is that the disorientation will encounter an improperly oriented edge
(e.g., \(e_{4}\) of Figure~6)\@. In this case, it is possible for the
resulting arbitration to make the disoriented edge bounce, causing the
subtree below to be either skipped (when approaching from above), or
traversed again (when approaching from below)\@. A bounce, since it
requires an arbitration, can occur at most once at each particular
edge\@. Call an edge that has not yet participated in an arbitration an
\emph{unarbitrated edge}\@.
%
\apar{5-17}
Thus, we can measure progress in the protocol by observing the
decrease in the number of ignored and unarbitrated edges\@.
%
\begin{alabsubtext}{Lemma}{5.7}{}
%
Let \(T\) be a tree with root vertex \(v\) and root edge~\(e\)\@. Then:
%
\begin{enumerate}
%
\item[1.] \(T\) contains only oriented edges; or
%
\item[2.] every oriented edge in \(T\) is properly oriented, and there is
exactly one disoriented edge; or
%
\item[3.] in at most \(2\treesize(T)\) interactions between processors of
\(T\), the total number of ignored plus unarbitrated edges in \(T\) will
decrease by \(1\)\@.
%
\end{enumerate}
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.7}{}
\prooffontshape
%
\apar{Prove Lemma 5.7-1}
In order for the protocol to be active, \(T\) must contain at least one
disoriented edge, so we assume that (1) does not hold\@. If there are
no ignored edges in \(T\), then arbitrations will only occur if some
edges are improperly oriented, so we also assume that (2) does not
hold\@. Then \(T\) can contain exactly one improperly disoriented edge;
or exactly one properly disoriented edge and some improperly oriented
edges; or some ignored or some disoriented edges\@.
%
\apar{Prove Lemma 5.7-2}
All interactions that occur in \(T\) are at disoriented edges, and these
cause each such edge to progress along its depth-first trip through
\(T\)\@.
%
\apar{Prove Lemma 5.7-3}
If there is exactly one disoriented edge, and it is improper, then an
arbitration will occur at the edge to turn it into a properly
disoriented one, thus decreasing the number of unarbitrated edges by
one\@.
%
\apar{Prove Lemma 5.7-4}
If there is exactly one properly disoriented edge, then in at most
\(2\treesize(T)\) interactions the disorientation must encounter an
improperly oriented edge and cause an arbitration\@. Note that the
disorientations could move out of \(T\) and a new one could
enter---interactions are all that is important\@.
%
\apar{Prove Lemma 5.7-5}
The final case occurs when there is more than one disoriented edge\@.
By Lemma~5.3, there must be at most one more disoriented than ignored
edges in \(T\)\@. By Lemma~5.6, the motions of the disoriented edges are
occurring in disjoint portions of \(T\) connected by ignored
edges\@. The only way that a disorientation can miss an ignored edge is
for it to bounce off of an unarbitrated, improperly oriented edge,
which results in an arbitration\@. If this does not happen, at most
\(2\treesize(T)\) interactions are required before one of the
disoriented edges cancels with an ignored edge\@.
%
\apar{Prove Lemma 5.7-6}
In all cases, the number of ignored plus unarbitrated edges is reduced
by one\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.7}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{5.8}{}
%
In at most \(\orderof{\treesize(T)^2}\) interactions within tree \(T\)
with root vertex oriented away from the root edge, exactly one edge is
properly disoriented and all other edges are properly oriented\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.8}{}
\prooffontshape
%
By Lemma~5.2, no ignored edges are ever created\@. Since resolving an
ignored edge can require an arbitration, the number of ignored plus
unarbitrated edges is bounded by \(2\treesize(T)\)\@. By
Lemma~5.7, after every \(2\treesize(T)\) interactions this
number is reduced by one\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.8}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{5.9}{}
%
Let \(G\) be a unicyclic network\@. Then the protocol deadlocks on~\(G\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.9}{}
\prooffontshape
%
\apar{Prove Lemma 5.9-1}
We proceed by induction on the size of \(G\), and suppose that the claim
holds for all networks of smaller size\@.
%
\apar{Prove Lemma 5.9-2}
Suppose for contradiction that some particular execution of the
protocol does not deadlock on \(G\)\@. Then there is at least one
disoriented edge in \(G\), and by Lemma~5.4, there is an equal number of
ignored edges\@.
%
\apar{Prove Lemma 5.9-3}
Since ignored edges are never created, there must be an ignored edge
\(e=((v,i),(w,j))\) that existed at the beginning of the execution and
that will exist forever\@. So the processors at both ends of \(e\) never
orient toward~\(e\)\@.
%
\apar{Prove Lemma 5.9-4}
Suppose that the edge \(e\) is on the ring of \(G\)\@. We cut the network
at edge \(e\), and add a leaf vertex \(u\) with edge \(((u,0),(w,j))\) to
create a tree \(T\) with root vertex \(v\) and root edge \(e\)\@. Since
neither \(v\) nor \(w\) orient toward edge \(e\), the particular execution
of the protocol must also fail to deadlock when projected onto~\(T\)\@.
%
\apar{Prove Lemma 5.9-5}
But, since vertex \(v\) is oriented away from \(e\), by
Lemma~5.7, eventually \(T\) will contain one properly
disoriented edge, and all others will be properly oriented\@. This
properly disoriented edge must eventually move toward \(e\), and so \(e\)
cannot remain ignored\@. This contradicts the choice of~\(e\)\@.
%
\apar{Prove Lemma 5.9-6}
Thus, \(e\) must be inside a tree\@. It must connect a subtree
\(T_{w}\) of size at least 2 to the rest of \(G\)\@. (\(T_{w}\) cannot
be a leaf, because then \(e\) would not be ignored\@.) So we can cut the
network at edge \(e\), and add a leaf vertex \(u\) with edge
\(((u,0),(w,j))\) to create a new network \(G'\)\@. Vertex \(w\) never
orients toward this new edge, so the particular execution of the
protocol behaves the same when projected onto \(G'\), and so must not
deadlock\@. But \(G'\) is smaller than \(G\), and so this contradicts
the inductive assumption\@.
%
\apar{Prove Lemma 5.9-7}
Thus, the protocol always deadlocks on \(G\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.9}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{5.10}{}
%
Let \(G\) be a unicyclic network\@. Then in at most
\(\orderof{\treesize(G)^{2}}\) interactions between processors of
\(G\), the protocol deadlocks\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{5.10}{}
\prooffontshape
%
\apar{Prove Lemma 5.10-1}
Consider a possible serialization of a protocol execution on \(G\),
and consider the edge \(e\) of the ring of \(G\) that remained
ignored for the longest time\@. So long as \(e\) is ignored, the
protocol is behaving as if it is on a tree\@. So by
Lemma~5.8, this \(e\) could have remained ignored for
at most \(\orderof{\treesize(G)^{2}}\) interactions\@. After these
interactions, no edges of the cycle are ignored, and since there are
exactly as many processors as edges on the cycle, the cycle is
oriented\@.
%
\apar{Prove Lemma 5.10-2}
Any remaining ignored edges occur in subtrees of \(G\), and further
interactions cannot involve processors on the ring of \(G\), so these
interactions are confined to subtrees\@.
%
\apar{Prove Lemma 5.10-3}
Consider a subtree \(T\), and its ignored edge \(e\) closest to the
root\@. By Lemma~5.8, for the subtree \(S\) with root edge \(e\), in
\(\orderof{\treesize(S)^{2}}\) interactions within \(S\), every edge
of \(S\) is properly oriented except for one properly disoriented edge
\(f\)\@. By Lemma~5.7, edge \(f\) is covered by the ignored edge \(e\),
and in \(\orderof{\treesize(T)}\) interactions they will cancel\@.
%
\apar{Prove Lemma 5.10-4}
Thus at most \(\orderof{\treesize(G)^{2}}\) interactions occur before
the protocol \nohyphenl{deadlocks}\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 5.10}}
\end{alabsubtext}
%
\apar{5-18}
Thus, by Proposition~5.1 and Lemma~5.10 we have:
%
\begin{alabsubtext}{Proposition}{5.11}{}
%
If the unicyclic network \(G\) is started in any state, then in
\(\orderof{\treesize(G)^{2}}\) interactions, the protocol deadlocks
with the network oriented\@.
%
\end{alabsubtext}
%
\asection{6}{\protect\raggedright The Orientation Protocol on the
\nohyphenl{Standard}~Model}
%
\apar{6-1}
We now show how to implement the orientation protocol under the
standard model with its read/write scheduling demon\@. Each processor
runs the program of Figure~8\@. The program has an integer constant,
\algo{informal}{}{\(\idr{degree}\)}, which specifies the degree of the
processor\@.
%
\begin{figure*}
\fbox{\parbox[t]{\textwidth}{%
%
\begin{algorithm}{informal}{}
%
\begin{progstmtlist}
%
\item \(\idr{degree}\oftype\text{integer constant}\)
\comment{Degree of this processor}
%
\item \(\idr{focus}\oftype\{0,1,\dots,\idr{degree}-1\}\)
\comment{Current focus of attention}
%
\item \(\idr{mode}\oftype\{R,W,P\}\)
\comment{Current mode of protocol at focus of attention}
%
\item \(\idr{inport}\oftype
\text{\kwd{array} }0..\idr{degree}-1\text{ \kwd{of} }\{R,W,P\}\)
%
\item \(\idr{outport}\oftype
\text{\kwd{array} }0..\idr{degree}-1\text{ \kwd{of} }\{R,W,P\}\)
%
\end{progstmtlist}
%
\begin{progstmtlist}
%
\item \kwd{do forever} \{
%
\begin{progstmtlist}
%
\item \comment{Read Phase}
%
\begin{progstmtlist}
%
\item \kwd{read} \(\idr{inport}[\idr{focus}]\);
%
\end{progstmtlist}
%
\item \comment{Write Phase}
%
\item \comment{Compute mode and focus transition from the table below}
%
%{\renewcommand{\multirowsetup}{\raggedleft}
{\renewcommand{\multirowsetup}{\relax}
\newlength{\lablnthc}\settowidth{\lablnthc}{Next Mode}
\begin{tabular}{|rr|ccc|}
%
\hline
%\multicolumn{2}{|r|}{\multirow{2}{7em}{Next Mode}} &
\multicolumn{2}{|c|}{\multirow{2}{\lablnthc}{Next Mode}} &
\multicolumn{3}{c|}{Input}\\
%
& & \(W\) & \(P\) & \(R\)\\
\hline
%
\multirow{3}{4em}{Current Mode} & \(W\) & \((W,P,R)\) & \(R\) & \(W\)\\
%
& \(P\) & \(P\) & \((W,P,R)\) & \(W\)\\
%
& \(R\) &
%
{\renewcommand{\arraystretch}{0.5}
\(\left[\begin{array}{c}
%
P\\
%
\text{increment }\idr{focus}
%
\end{array}\right]\)
}
%
& \(R\) & \((W,P,R)\)\\[1.3ex]
\hline
%
\end{tabular}
}
%
\begin{progstmtlist}
%
\item \kwd{if} \(\idr{mode}\equtest\idr{inport[\idr{focus}]})\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned\progfunc{random}\{W,P,R\}\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest W\progand\idr{inport[\idr{focus}]}\equtest P)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned R\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest P\progand\idr{inport}[\idr{focus}]\equtest R)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned W\);
%
\end{progstmtlist}
%
\item \} \kwd{else if}
\((\idr{mode}\equtest R\progand\idr{inport}[\idr{focus}]\equtest W)\) \{
%
\begin{progstmtlist}
%
\item \(\idr{mode}\assigned P\);
%
\item \(\idr{focus}\assigned\idr{focus}+1\bmod\idr{degree}\);
%
\end{progstmtlist}
%
\item \}
%
\item \kwd{write} \(\idr{mode}\) \kwd{to} \(\idr{outport}\);
%
\end{progstmtlist}
%
\end{progstmtlist}
%
\item \}
%
\end{progstmtlist}
%
\end{algorithm}
}}
%
\afigcap{8}{Low-level orientation protocol}
%
\end{figure*}
%
\apar{6-2}
This protocol is running the token-passing protocol of
Section~3 on each edge\@. But the processor only interacts
with the single processor that is its focus of attention\@. It suspends
its side of the token-passing protocol that is being executed with its
other neighbors\@. This suspension is invisible to the other side of the
edge, as the processor goes through exactly the same sequence of mode
changes (from \(R\) to \(P\)) that the token-passing protocol does\@.
%
\apar{6-3}
Now the fact that the processor only gives attention to one neighbor
at a time does not affect the fact the token-passing protocol on the
edge will, if given enough attention, eventually self-stabilize in
\(\orderof{1}\) steps\@. Once stabilized, the modes at the ends of an
edge between processors \(A\) and \(B\) will go through the following
cycle:
%
\[
%
\text{\(WP\), \(RP\), \(RW\), (\(A\) suspends), \(PW\), \(PR\), \(WR\),
(\(B\) suspends), \(WP\), \ltwoemm\ldots\ltwoemm}
%
\]
%
where the possibility exists that the protocol can be suspended by a
processor turning its focus elsewhere\@. Note that while the focus is
elsewhere, the processor at the other end of the edge cannot advance
in the token-passing protocol and so must wait for focus to return\@.
%
\apar{6-4}
We now map the states of an edge under the token-passing protocol onto
the states of an edge under the high-level protocol\@. This is done by
mapping \(P\) at the low level to \(P\) at the high level, and \(W\),
\(R\) at the low level to \(W\) at the high level\@. The focus of the
processor at the low level corresponds to the orientation of the
processor at the high level\@.
%
\apar{6-5}
For the stabilized token-passing protocol, we get the following
behavior at an edge\@.
%
\[\begin{array}{r|ccccccc}
%
\text{Level}\\
\hline
%
\text{Low} & WP & RP & RW & PW & PR & WR & WP\\[1ex]
%
\text{High} & \rightportstate{W}\leftportstate{P} &
\rightportstate{W}\leftportstate{P} & \leftportstate{P}\leftportstate{W} &
\rightportstate{P}\leftportstate{W} & \rightportstate{P}\leftportstate{W} &
\rightportstate{W}\rightportstate{P} & \rightportstate{W}\leftportstate{P}
%
\end{array}\]
%
Stabilization at the low level exactly corresponds to arbitration of
improperly disoriented edges at the high level\@.
%
\apar{6-6}
Observe that when the network is oriented, so is every edge: therefore
the token-passing protocol is suspended at each edge, and it is
impossible for the scheduler to make any progress\@. In the high-level
protocol, orientation may be achieved even when an edge is improperly
oriented\@. Similarly, at the low level, orientation may be achieved
even before the token-passing protocol has stabilized at an edge\@.
Thus:
%
\begin{alabsubtext}{Theorem}{6.1}{}
%
Under the read/write scheduling demon, there exists a uniform
orientation protocol that will self-stabilize on any unicyclic network
\(G\) into an oriented configuration within
\(\orderof{\treesize(G)^{2}}\) expected number of progress-making
processor activations\@.
%
\end{alabsubtext}
\sentence
%
\asection{7}{Acknowledgments}
%
\apar{7-1}
We wish to thank Amos Israeli for showing us that the problem was
interesting, Joe Culberson for much stimulating discussion and for
catching subtle bugs in our earlier versions, and the referees for
their extraordinarily thorough work and excellent suggestions\@.
%
\asection{Appendix:}{Supplementary Code}
%
\apar{A-1}
The journal archive containing this article also contains files
providing an executable program to explore the state space of the
token-passing algorithm in Figure~8, verifying that the system is
self-stabilizing\@.
%
\begin{itemize}
%
\item \filename{readme.txt} is a brief description of the files below\@.
%
\item \filename{explore.c}, \filename{explore.h} implement a
general-purpose state-exploration algorithm\@.
%
\item \filename{problem.h} provides the functional interface to the
state transitions for a given problem\@.
%
\item \filename{problem.c} implements the state transitions for the
particular token-passing algorithm of Figure~8\@.
%
\item \filename{Makefile} compiles the programs above, producing an
executable file called \filename{explore}\@. On most UNIX systems you
need only type \emph{make}\@.
%
\item \filename{explore.out} is the output of \filename{explore}\@.
%
\end{itemize}
%
\end{articletext}
%
\begin{articlebib}
%
\bibliographystyle{\the\rbibstyle}
\bibliography{cj96-05}
%
\end{articlebib}
%
\end{document}