% The Chicago Journal of Theoretical Computer Science, Volume 1998, Article 2
% Published by MIT Press, Cambridge, Massachusetts USA
% Copyright 1998 by Massachusetts Institute of Technology
%
\newtoks\rstyle
\rstyle={cjlook}
%
\documentclass[v1.1,published]{cjstruct}
%
% Local style resources for this article.
%
\usestyleresource{amstext}
\usestyleresource{eepic}
%
% Local macro definitions for this article.
%
\catcode`\@=11
\ifx\nopagebreakbeginpar\undeftex
\newcommand{\nopagebreakbeginpar}{\@beginparpenalty10000}
\fi
\ifx\nopagebreakendpar\undeftex
\newcommand{\nopagebreakendpar}{\@endparpenalty10000}
\fi
\ifx\nopagebreakinteritem\undeftex
\newcommand{\nopagebreakinteritem}{\@itempenalty10000}
\fi
\catcode`\@=12
%
\newcommand{\prooffontshape}{\fontshape{n}\selectfont}
%
\newcommand{\setof}[2]{\left\{\,#1:#2\,\right\}}
\newcommand{\goto}{\rightarrow}
\newcommand{\actlsr}{\(\forall\)CTL\(^\star\) }
\newcommand{\zug}[1]{\mbox {\(\langle #1 \rangle \)}}
\newcommand{\stam}[1]{}
\newcommand{\bft}{\mbox{\emph{\textbf{true}}}}
\newcommand{\bff}{\mbox{\emph{\textbf{false}}}}
\def\squarebox#1{\hbox to #1{\hfill\vbox to #1{\vfill}}}
\def\registered{\({}^{\ooalign%
{\hfil\raise.07ex\hbox{{\tiny\textup{R}}}%
\hfil\crcr\scriptsize\mathhexbox20D\cr}}\)}
%
% end of local macro definitions for this article
%
\begin{document}
%
\begin{articleinfo}
%
\publisher{The MIT Press}
%
\publishercomment{%
{\Large
\begin{center}
Volume 1998, Article 2\\
\textit{16 March 1998}
\end{center}
}
\par\noindent
ISSN 1073--0486\@. MIT Press Journals, Five Cambridge Center, Cambridge, MA
02142-1493 USA; (617)253-2889;
\emph{journals-orders@mit.edu, journals-info@mit.edu}\@.
Published one article at a time in \LaTeX\ source form on the
Internet\@. Pagination varies from copy to copy\@. For more
information and other articles see:
\begin{itemize}
\item \emph{http://mitpress.mit.edu/CJTCS/}
\item \emph{http://www.cs.uchicago.edu/publications/cjtcs/}
\item \emph{ftp://mitpress.mit.edu/pub/CJTCS}
\item \emph{ftp://cs.uchicago.edu/pub/publications/cjtcs}
\end{itemize}
\sentence
\par\noindent
The \emph{Chicago Journal of Theoretical Computer Science} is
abstracted or indexed in \emph{Research Alert,\registered
SciSearch,\registered Current Contents\registered/Engineering
Computing \& Technology}, and \emph{CompuMath Citation Index\@.\registered}
}
%
\copyrightstmt{%
\copyright 1998 The Massachusetts Institute of Technology\@.
Subscribers are licensed to use journal articles in a variety of
ways, limited only as required to insure fair attribution to authors
and the journal, and to prohibit use in a competing commercial
product\@. See the journal's World Wide Web site for further
details\@. Address inquiries to the Subsidiary Rights Manager, MIT
Press Journals; (617)253-2864;
\emph{journals-rights@mit.edu}\@.\pagebreak[2]
}
%
\journal{Chicago\nolinebreak[1] Journal of Theoretical Computer\nolinebreak[1] Science}
\journalcomment{%
The \emph{Chicago Journal of Theoretical Computer Science}
is a peer-reviewed scholarly journal in theoretical computer
science\@. The journal is committed to providing a forum for
significant results on theoretical aspects of all topics in computer
science\@.
\par
\begin{trivlist}
\item[\emph{Editor in chief:}] Janos Simon
\item[\emph{Consulting editors:}]
Joseph Halpern, Stuart A.\ Kurtz, Raimund Seidel
\item[\emph{Editors:}]
\begin{tabular}[t]{lll}
Martin Abadi & Greg Frederickson & John Mitchell \\
Pankaj Agarwal & Andrew Goldberg & Ketan Mulmuley \\
Eric Allender & Georg Gottlob & Gil Neiger \\
Tetsuo Asano & Vassos Hadzilacos & David Peleg \\
Laszl\'o Babai & Juris Hartmanis & Andrew Pitts \\
Eric Bach & Maurice Herlihy & James Royer \\
Stephen Brookes & Ted Herman & Alan Selman \\
Jin-Yi Cai & Stephen Homer & Nir Shavit \\
Anne Condon & Neil Immerman & Eva Tardos \\
Cynthia Dwork & Howard Karloff & Sam Toueg \\
David Eppstein & Philip Klein & Moshe Vardi \\
Ronald Fagin & Phokion Kolaitis & Jennifer Welch \\
Lance Fortnow & Stephen Mahaney & Pierre Wolper \\
Steven Fortune & Michael Merritt
\end{tabular}
\vspace{1ex}
\item[\emph{Managing editor:}] Michael J.\ O'Donnell
\vspace{1ex}
\item[\emph{Electronic mail:}] \emph{chicago-journal@cs.uchicago.edu}
\end{trivlist}
\sentence
}
%
\bannerfile{cjtcs-banner.tex}
%
\volume{1998}
%
\articleid{2}
%
\selfcitation{
@article{cj98-02,
author={Orna Kupferman and Moshe Y. Vardi},
title={Verification of Fair Transition Systems},
journal={Chicago Journal of Theoretical Computer Science},
volume={1998},
number={2},
publisher={MIT Press},
month={March},
year={1998}
}
}
%
\begin{retrievalinfo}
\end{retrievalinfo}
%
\title{Verification of Fair Transition Systems}
\shorttitle{Verification of Systems}
%
\begin{authorinfo}
%
% Correction to error in cjstruct.sty version 1.1.
%
\def\fixversion{1.1}
\ifx\cjversion\fixversion
\renewcommand{\printname}[1]{
\global\authorstring={#1}
\global\authorstrue
\ifinfo\item Printed name: #1\fi
}
\fi
%
% End of correction.
%
\printname{Orna Kupferman}
\collname{}{Kupferman, Orna}
\begin{institutioninfo}
\instname{University of California at Berkeley}
\department{Department of EECS}
\address{}{Berkeley}{CA}{94720-1770}{U.S.A}
\end{institutioninfo}
\email{orna@eecs.berkeley.edu}
% \url{http://www-cad.eecs.berkeley.edu/\(^\sim\)orna}
\end{authorinfo}
\begin{authorinfo}
\printname{Moshe Y. Vardi}
\collname{}{Vardi, Moshe Y.}
\begin{institutioninfo}
\instname{Rice University}
\department{Department of Computer Science}
\address{}{Houston}{TX}{77005-1892}{U.S.A.}
\end{institutioninfo}
\email{vardi@cs.rice.edu}
% \url{http://www.cs.rice.edu/\(^\sim\)vardi}
\end{authorinfo}
%
\shortauthors{Kupferman and Vardi}
%
\support{
Part of this work was done in Bell Laboratories during the DIMACS
Special Year on Logic and Algorithms.
Orna Kupferman's work was supported in part
by the Office of Naval Research Young Investigator Award
N00014-95-1-0520,
by the National Science Foundation CAREER Award CCR-9501708,
by the National Science Foundation grant CCR-9504469,
by the Air Force Office of Scientific Research contract F49620-93-1-0056,
by the Army Research Office MURI grant DAAH-04-96-1-0341,
by the Advanced Research Projects Agency grant NAG2-892,
and
by the Semiconductor Research Corporation contract 95-DC-324.036.
Moshe Y. Vardi's work was
supported in part by the National Science Foundation
grants CCR-9628400 and
CCR-9700061, and by a grant from the Intel Corporation.
}
%
\begin{editinfo}
\submitted{11}{02}{1997}\reported{22}{08}{l997}
\submitted{28}{08}{1997}\reported{28}{10}{l997}
\submitted{18}{11}{1997}\reported{24}{11}{l997}
\submitted{26}{11}{1997}\reported{26}{11}{l997}\published{16}{03}{1998}
\end{editinfo}
%
\end{articleinfo}
%
\begin{articletext}
\tolerance=9000
\begin{abstract}
\apar{Abstract-1}
In program verification, we check that an
implementation meets its specification\@. Both the
specification and the implementation describe the possible behaviors
of the program, although at different levels of abstraction\@.
We distinguish between two approaches
to implementation of specifications\@. The first approach is
trace-based implementation, where we require every computation of
the implementation to correlate to some computation of the
specification\@. The second approach is tree-based
implementation, where we require every computation tree embodied in
the implementation to correlate to some computation tree embodied in
the specification\@.
The two approaches to implementation are strongly related
to the linear-time versus branching-time dichotomy in temporal logic\@.
%
\apar{Abstract-2}
In this work, we examine the trace-based and the tree-based approaches
from a complexity-theoretic point of view\@. We consider and
compare the complexity of verification of fair transition
systems, modeling both the implementation and the specification in
the two approaches\@. We consider unconditional, weak, and
strong fairnesses\@.
For the trace-based approach, the corresponding problem is
fair containment\@. For the tree-based approach, the corresponding
problem is fair simulation\@.
We show that while both problems are PSPACE-complete,
their complexities in terms of the size of the implementation do not
coincide, and the trace-based approach is easier\@. As the
implementation is normally much bigger than the specification, we see
this as an advantage of the trace-based approach\@. Our results are at
variance with the known results for the case of transition systems
with no fairness, where no approach is evidently advantageous\@.
\end{abstract}
\asection{1}{Introduction}
%
\apar{1-1}
In \emph{program verification\/}, we check that an {\em
implementation\/} meets its \emph{specification\/}\@. Both the
specification and the implementation describe the possible behaviors
of the program,
but the implementation is more concrete than the specification,
or, equivalently, the specification is more abstract than the
implementation \cite{cj98-02-01}\@.
This basic notion of verification suggests a top-down method for
design development\@. Starting with a highly abstract specification,
we can construct a sequence of ``behavior descriptions\@.'' Each
description refers to its predecessor as a specification,
so it is less abstract than its predecessor\@.
The last description contains no abstractions, and constitutes the
implementation\@. Hence the name \emph{hierarchical refinement\/}
for this methodology \cite{cj98-02-28,cj98-02-29,cj98-02-25}\@.
%
\apar{1-2}
We distinguish between two approaches
to implementation of specifications\@. The first approach is
\emph{trace-based implementation},
where we require every computation of
the implementation to correlate to some computation of the
specification\@. The second approach is
\emph{tree-based implementation\/}, where we
require every computation tree
embodied in the implementation to correlate to some computation tree
embodied in the specification\@.
The exact notion of correct implementation then depends on how we
interpret correlation\@. We can, for example, interpret correlation as
identity\@.
Then, a correct trace-based implementation is one in which
every computation is also a computation of the specification, and
a correct tree-based implementation is one in which every embodied
computation tree is also embodied in the specification\@.
Numerous interpretations of correlation are suggested and studied in
the literature \cite{cj98-02-20,cj98-02-32,cj98-02-01}\@.
Here we consider a simple definition of correlation,
and interpret it as equivalence with respect to the
variables joint to the implementation and the specification,
as the implementation is typically defined over a wider set of
variables
(reflecting the fact that it is more concrete than the specification)\@.
%
\apar{1-3}
The tree-based approach is stronger
in the following sense\@. If \({\cal I}\) is a correct tree-based
implementation of the specification \({\cal S}\), then \({\cal I}\) is
also a correct trace-based implementation of \({\cal S}\)\@. As shown by
Milner \cite{cj98-02-31}, the opposite situation is not true\@.
The two approaches to implementation are strongly related
to the linear-time versus branching-time dichotomy in temporal logic
\cite{cj98-02-37}\@.
The temporal-logic analogy to the strength of the tree-based approach
is the expressiveness superiority of \(\forall\)CTL\(^\star\), the
universal fragment of CTL\(^\star\), over LTL \cite{cj98-02-08}\@. Indeed, while
a correct trace-based implementation is guaranteed to satisfy all the
LTL formulas satisfied in the specification, a correct tree-based
implementation is guaranteed to satisfy all the \(\forall\)CTL\(^\star\)
formulas satisfied in the specification \cite{cj98-02-17}\@.
%
\apar{1-4}
In this work, we examine the traced-based and tree-based approaches
from a complexity-theoretic point of view\@. More precisely, we
consider and compare the complexity of the problem of determining
whether \({\cal I}\) is a correct trace-based implementation of \({\cal
S}\), and the problem of determining whether \({\cal I}\) is a correct
tree-based implementation of \({\cal S}\)\@. The different levels of
abstraction in the implementation and the specification are reflected
in their sizes\@. The implementation is typically much larger than the
specification, and it is the implementation's size that is the computational
bottleneck\@. Therefore, of particular interest to us is the
\emph{implementation complexity} of these problems; i.e., their complexity
in terms of \({\cal I}\), assuming \({\cal S}\) is fixed\@.\footnote{
The distinction between the relative sizes of the implementation and
the specification is less significant in the context of hierarchical
refinement\@. There, each internal behavior description is
both an implementation (of some step \(i\) in the chain of successive
refinements) and a specification (of step \(i+1\) in the chain)\@. Thus,
the distinction
is crucial only in the last refinement step, which handles the final,
and hence largest, implementation\@.}
%
\apar{1-5}
We model specifications and implementations by \emph{transition
systems} \cite{cj98-02-23}\@.
The systems are defined
over the sets \(AP_{\cal I}\) and \(AP_{\cal S}\) of \emph{atomic
propositions\/} used in the implementation and specification,
respectively\@. Thus, the alphabets of the systems are
\(2^{AP_{\cal I}}\) and \(2^{AP_{\cal S}}\)\@. Recall that usually the
implementation has more variables than the specification; hence,
\(AP_{\cal I} \supseteq AP_{\cal S}\)\@. We therefore interpret
correlation as equivalence with respect to \(AP_{\cal S}\)\@.
In other words, associating behaviors of the implementation with those of
the specification, we first project the behaviors onto \(AP_{\cal S}\)\@.
Within this framework, correct trace-based implementation corresponds
to \emph{containment}, and correct tree-based implementation
corresponds to \emph{simulation} \cite{cj98-02-30}\@.
%
\apar{1-6}
We start by reviewing and examining transition systems with no
fairness conditions\@. It is well known that simulation can be checked in
polynomial time \cite{cj98-02-31,cj98-02-06,cj98-02-03,cj98-02-19},
whereas the containment problem is in PSPACE \cite{cj98-02-42}\@.
We show that the latter problem is PSPACE-hard; thus the tree-based
approach is easier than the trace-based approach\@. Yet, once we turn to
consider the implementation complexity of simulation and containment,
the trace-based approach is easier than the tree-based approach\@.
Indeed, we show that the implementation complexity of simulation is
PTIME-complete, which is most likely harder than the
NLOGSPACE-complete bound for the implementation
complexity of containment\@. For these reasons, when we consider transition systems
with no fairness, there is no clear advantageous approach\@.
This is reminiscent of the computational
relations of branching-time model checking and linear-time model
checking: while model checking is easier
for the branching paradigm, the implementation complexity of model
checking in the two paradigms coincide \cite{cj98-02-26,cj98-02-10,cj98-02-44,cj98-02-07}\@.
%
\apar{1-7}
Often, we want our implementations and specifications to
describe behaviors that satisfy both liveness and
safety properties\@. Then, transition systems with no fairness
condition are too weak, and we need the framework of \emph{fair
transition systems}\@. We consider unconditional, weak,
and strong fairness (also known as impartiality,
justice, and compassion, respectively)
\cite{cj98-02-27,cj98-02-14,cj98-02-34}\@.
Within this framework, correct trace-based implementation corresponds
to \emph{fair containment}, and correct tree-based
implementation corresponds to \emph{fair simulation}
\cite{cj98-02-04,cj98-02-02,cj98-02-17}\@.
Hence, it is the complexity of these
problems that should be examined when we compare the trace-based and
the tree-based approaches\@.
%
\apar{1-8}
We present a uniform method and a simple algorithm for solving the
fair-containment problem for all three types of fairness
conditions\@. Unlike \cite{cj98-02-09}, we consider the case where
both the specification and the implementation are nondeterministic,
as is appropriate in a hierarchical refinement framework\@.
We prove
that the problem is PSPACE-complete for all three types\@. For the
case where the implementation uses the unconditional or weak fairness
conditions,
our nondeterministic algorithm requires space logarithmic
in the size of the implementation (regardless of the type of fairness
condition used in the specification)\@. For the case where the
implementation uses the strong fairness condition, we suggest an
alternative algorithm that runs in time polynomial in the size of the
implementation\@. We show that these algorithms are optimal; the
implementation complexity of fair containment is
NLOGSPACE-complete for implementations that use the unconditional
or weak fairness conditions \cite{cj98-02-45} and is PTIME-complete for
implementations that use the strong fairness condition\@.
To prove the latter, we show that
the nonemptiness problem for fair transition systems with a strong
fairness condition is PTIME-hard\@.
%
\apar{1-9}
We also present a uniform method and a simple algorithm for solving
the fair-simulation problem for the three types of fairness
conditions\@. Our algorithm uses the fair-containment algorithm as a
subroutine\@. We prove that the problem is PSPACE-complete for all
three types\@.
Like Milner's algorithm for checking simulation \cite{cj98-02-33}, our
algorithm can be implemented as a calculation of a fixed-point
expression\@.
The running time of our algorithm is polynomial in the size of the
implementation\@. We show that this is optimal; thus, the implementation
complexity of fair simulation is PTIME-complete for all types of
fairness conditions\@. In fact, in proving the latter, we prove that the
implementation complexity of simulation (without fairness conditions)
is PTIME-hard as well\@.
%
\apar{1-10}
Our results (summarized in Table~1) show that when we model
the specification and the implementation by fair transition systems,
or we consider the implementation complexity of verification, the
computational advantage of the tree-based approach disappears\@.
Furthermore, when we consider the implementation complexity,
checking implementations that use unconditional or weak fairness
conditions is easier in the trace-based approach\@.
\asection{2}{Preliminaries}
\asubsection{2.1}{Fair Transition Systems}
%
\apar{2.1-1}
A fair transition system (transition system, for short)
\(S=\zug{\Sigma,W,R,W_0,L,\alpha}\)
consists of an alphabet \(\Sigma\), a finite set \(W\) of states,
a total transition relation \(R \subseteq W \times W\)
(i.e., for every \(w \in W\) there exists \(w' \in W\) such that
\(R(w,w')\)), a set \(W_0\) of initial states, a labeling function
\(L:W \rightarrow \Sigma\), and a fairness condition \(\alpha\)\@.
We will define three types of fairness conditions shortly\@.
A computation of \(S\) is a sequence \(\pi=w_0,w_1,w_2,\dots\) of
states such that for every \(i \geq 0\), we have \(R(w_i,w_{i+1})\)\@.
For an alphabet \(\Sigma\), we use \(\Sigma^*\) and \(\Sigma^\omega\)
to denote the sets of all finite and infinite words over \(\Sigma\),
respectively\@. For two words \(\rho_1 \in \Sigma^*\) and \(\rho_2 \in
\Sigma^* \cup \Sigma^\omega\), we use \(\rho_1 \cdot \rho_2\) to denote
the concatenation of \(\rho_1\) and \(\rho_2\)\@.
Each computation \(\pi=w_0,w_1,w_2,\dots\) induces the word
\(L(\pi)=L(w_0) \cdot L(w_1) \cdot L(w_2)\dots \in \Sigma^\omega\)\@.
%
\apar{2.1-2}
To determine whether a computation is fair, we refer
to the set \(\mbox{\emph{Inf\/}}(\pi)\) of states that \(\pi\) visits
infinitely often\@. Formally,
\[\mbox{\emph{Inf\/}}(\pi) = \{ w \in W \: :\mbox{ for
infinitely many }i \geq 0, \mbox{ we have } w_i=w \}\]
The way we refer to \(\emph{Inf\/}(\pi)\) depends upon the fairness
condition of \(S\)\@. Several types of fairness conditions are studied in
the literature\@. Here we consider three:
\begin{itemize}
\item
Unconditional fairness (or impartiality), where \(\alpha
\subseteq W\), and \(\pi\) is fair
if and only if \(\mbox{\emph{Inf\/}}(\pi) \cap \alpha \neq \emptyset\)\@.
\item
Weak fairness (or justice), where \(\alpha \subseteq 2^W
\times 2^W\), and \(\pi\) is fair if and only if for every pair \(\zug{B,G} \in
\alpha\), we have that \(\mbox{\emph{Inf\/}}(\pi) \cap (W \setminus B) =
\emptyset\) implies \(\mbox{\emph{Inf\/}}(\pi) \cap G \neq \emptyset\)\@.
\item
Strong fairness (or fairness), where \(\alpha \subseteq 2^W
\times 2^W\), and \(\pi\) is fair if and only if for every pair \(\zug{B,G} \in
\alpha\), we have that \(\mbox{\emph{Inf\/}}(\pi) \cap B \neq \emptyset\)
implies \(\mbox{\emph{Inf\/}}(r) \cap G \neq \emptyset\)\@.
\end{itemize}
%
\apar{2.1-3}
In addition, we consider \emph{nonfair transition systems};
i.e., transition systems in which all the computations are fair\@.
%
\apar{2.1-4}
It is easy to see that fair transition systems are essentially
notational variants of automata on infinite words \cite{cj98-02-43}\@.
In particular, the unconditional and the strong fairness conditions
correspond to the B\"uchi and Streett acceptance conditions\@.
This correspondence motivates our noncanonical definition of fairness,
where the unconditional fairness condition consists of a single
constraint, whereas the weak and strong fairness conditions
consist of a conjunction of constraints\@. In the sequel, we use results
from the theory of B\"uchi and Streett automata in the context of fair
transition systems\@.
%
\apar{2.1-5}
Defining the unconditional
fairness condition as a conjunction of constraints (that is, having
\(\alpha \subseteq 2^W\)) corresponds to the
generalized B\"uchi condition from automata theory\@. Note that such
generalized unconditional fairness conditions are very similar to weak
fairness conditions; in fact, they can be viewed as a
special case of weak fairness conditions, with \(B=W\) in all pairs\@.
Moreover, as we state formally in Lemma~1, weak fairness
conditions can be easily translated to generalized unconditional
fairness conditions\@. Indeed, a pair
\(\zug{B,G}\) in a weak fairness condition is equivalent to a set
\((W \setminus B) \cup G\) in a generalized unconditional fairness
condition\@.
%
\apar{2.1-6}
For a state \(w\), a \(w\)-computation is a computation
\(w_0,w_1,w_2,\dots\) with \(w_0=w\)\@.
We use \({\cal T}(S^w)\) to denote the set of all traces
\(\sigma_0 \cdot \sigma_1 \dots \in \Sigma^\omega\) for which there
exists a fair \(w\)-computation \(w_0,w_1,\dots\) in \(S\) with
\(L(w_i)=\sigma_i\) for all \(i \geq 0\)\@. The \emph{trace set}
\({\cal T}(S)\) of \(S\) is then defined as \(\bigcup_{w \in W_0}{\cal
T}(S^w)\)\@.
Thus, each transition system defines a subset of \(\Sigma^{\omega}\)\@.
We say that a transition system \(S\) is \emph{empty\/} if and only if \({\cal
T}(S)=\emptyset\); i.e., \(S\) has no fair computation\@.
We sometimes say that \(S\) accepts a trace \(\rho\), meaning that
\(\rho \in {\cal T}(S)\)\@.
Note that for a nonfair transition system \(S\), the set \({\cal T}(S)\)
contains all traces \(\rho \in \Sigma^\omega\) for which there exists a
computation \(\pi\) with \(L(\pi)=\rho\)\@.
%
\apar{2.1-7}
The size of a transition system and its fairness condition determine
the complexity of solving questions about it\@.
We define classes of transition systems according to these
two characteristics\@. We use \({\cal U}, {\cal W}\), and
\({\cal S}\) to distinguish between the unconditional, weak, and strong
fairness conditions, respectively\@.
We measure the size of a transition system by the number of its states
(the number of edges is at most quadratic in the number of states)
and, in the case of weak and strong fairness, also by the number of
pairs in
its fairness condition\@.
For example, the set of unconditionally fair transition systems with
\(n\) states
is denoted \({\cal U}(n)\)\@. We also use a line over the transition
system to denote the complementary transition system
(one that accepts the complementary trace set)\@. For example, the
set of transition systems that complements strongly fair transition
systems with \(n\) states and \(m\) pairs is denoted by
\(\overline{{\cal S}(n,m)}\)\@. By \cite{cj98-02-38,cj98-02-39}, every fair
transition system indeed has a complementary transition system\@.
We use \(\goto\) to denote a possible translation (that preserves the
trace sets of the systems) between various types
of transition systems\@. For example,
\({\cal W}(n,m) \goto {\cal U}(nm)\) means that each weakly fair
transition system \(S\) with \(n\) states and \(m\) pairs can be translated to
an unconditionally fair transition system \(S'\) with \(nm\) states such
that \({\cal T}(S)={\cal T}(S')\)\@.
%
\stam{
Finally, we sometimes speak about the \emph{union} and \emph{product} of
two fair transition systems\@. Given \(S\) and \(S'\), the union of \(S\) and
\(S'\) is a transition system \(S\) with \({\cal T}(S)={\cal T}(S_1) \cup
{\cal T}(S_2)\)\@. Their product is a transition system \(S''\) with \({\cal
T}(S'')={\cal T}(S_1) \cap {\cal T}(S_2)\)\@. By \cite{cj98-02-11}, every two
fair transition systems indeed have a union and a product transition
systems\@.}
\asubsection{2.2}{Trace-Based and Tree-Based Implementations}
%
\apar{2.2-1}
In this section, we formalize what it means for an implementation \(S\)
to correctly implement a specification \(S'\), in both the trace-based
and the tree-based approaches\@.
Recall that \(S\) and \(S'\) are given as fair transition
systems over the alphabets \(2^{AP}\) and \(2^{AP'}\),
respectively, with \(AP \supseteq AP'\)\@.
For technical convenience, we assume that \(AP = AP'\);
thus, the implementation and the specification are defined over the
same alphabet\@. By taking, for each \(\sigma \in 2^{AP}\), the
letter \(\sigma \cap AP'\) instead of the letter \(\sigma\), all of our
algorithms and results are also valid for the case \(AP \supset AP'\)\@.
%
\apar{2.2-2}
Recall that nonfair transition systems are of special interest\@. The
problems that formalize correct trace-based and tree-based
implementations in this framework are containment and
simulation\@.
Once we add fairness to the systems, the corresponding problems are
fair containment and fair simulation\@.
Below we define the four problems\@. All problems are defined with
respect to two transition systems: \(S=\zug{\Sigma,W,R,W_0,L,\alpha}\),
and \(S'=\zug{\Sigma,W',R',W'_0,L',\alpha'}\)\@.
\asubsubsection{2.2.1}{Containment and Fair Containment}
%
\apar{2.2.1-1}
The fair-containment problem of \(S\) and \(S'\) is to determine
whether \({\cal T}(S) \subseteq {\cal T}(S')\); that is, whether every
trace accepted by \(S\) is also accepted by \(S'\)\@. When \(S\) and \(S'\) are
nonfair, we call the problem containment\@.
%
\apar{2.2.1-2}
While containment and fair containment refer only to the set of
computations of \(S\) and \(S'\), simulation and fair simulation also refer
to the branching structure of the transition systems\@.
\asubsubsection{2.2.2}{Simulation}
%
\apar{2.2.2-1}
Let \(w\) and \(w'\) be states in \(W\) and \(W'\), respectively\@. A relation
\(H \subseteq W \times W'\) is a simulation relation from
\(\zug{S,w}\) to \(\zug{S',w'}\) if and only if the following conditions hold
\cite{cj98-02-30}:
\begin{enumerate}
\item \(H(w,w')\)\@.
\item For all \(t\) and \(t'\) with \(H(t,t')\), we have \(L(t)= L(t')\)\@.
\item
For all \(t\) and \(t'\) with \(H(t,t')\) and for all \(s \in W\) such that
\(R(t,s)\), there exists \(s' \in W'\) such that \(R'(t',s')\) and \(H(s,s')\)\@.
\end{enumerate}
%
\apar{2.2.2-2}
A simulation relation \(H\) is a simulation from \(S\) to \(S'\) if and only if
for every \(w \in W_0\) there exists \(w' \in W'_0\) such that
\(H(w,w')\)\@. If there exists a simulation from \(S\) to \(S'\), we
say that \(S\) simulates \(S'\), and we write \(S \leq S'\)\@.
Intuitively, this means that the transition system \(S'\) has more
behaviors than the transition system \(S\)\@. In fact, every \actlsr
formula that is satisfied in \(S'\) is satisfied also in \(S\)
\cite{cj98-02-05}\@.
Given \(S\) and \(S'\), the simulation problem is to determine
whether \(S \leq S'\)\@.
%
\apar{2.2.2-3}
We also mention here the \emph{bisimulation problem} \cite{cj98-02-30}\@.
Two transition systems are bisimilar if and only if they have exactly the same
behavior\@. Formally, a relation \(H \subseteq W \times W'\) from
\(\zug{S,w}\) to \(\zug{S',w'}\) is a bisimulation if, in addition to
conditions 1--3 above, the following condition also holds:
\begin{enumerate}
\item[4.]
For all \(t\) and \(t'\) with \(H(t,t')\) and for all \(s' \in W'\) such that
\(R'(t',s')\), there exists \(s \in W\) such that \(R(t,s)\) and \(H(s,s')\)\@.
\end{enumerate}
\asubsubsection{2.2.3}{Fair Simulation}
%
\apar{2.2.3-1}
Let \(H \subseteq W \times W'\) be a relation over the states of \(S\) and
\(S'\)\@. It is convenient to extend \(H\) to also relate infinite
computations of \(S\) and \(S'\)\@. For two computations
\(\pi=w_0,w_1,\dots\) in \(S\), and \(\pi'=w'_0,w'_1,\dots\) in \(S'\), we
say that \(H(\pi,\pi')\) holds if and only if \(H(w_i,w'_i)\) holds for all \(i \geq
0\)\@.
For a pair \(\zug{w,w'} \in W \times W'\), we say that \(\zug{w,w'}\) is
\emph{good in} \(H\) if and only if for every fair
\(w\)-computation \(\pi\) in \(S\), there exists a fair \(w'\)-computation
\(\pi'\) in \(S'\), such that \(H(\pi,\pi')\)\@.
%
\apar{2.2.3-2}
Let \(w\) and \(w'\) be states in \(W\) and \(W'\), respectively\@. A relation
\(H \subseteq W \times W'\) is a fair-simulation relation from
\(\zug{S,w}\) to \(\zug{S',w'}\) if and only if the following conditions hold \cite{cj98-02-17}:
\begin{enumerate}
\item \(H(w,w')\)\@.
\item For all \(t\) and \(t'\) with \(H(t,t')\), we have \(L(t)= L(t')\)\@.
\item For all \(t\) and \(t'\) with \(H(t,t')\), the pair \(\zug{t,t'}\) is
good in \(H\)\@.
\end{enumerate}
%
\apar{2.2.3-3}
A fair-simulation relation \(H\) is a fair simulation from \(S\) to
\(S'\) if and only if for every \(w \in W_0\) there exists \(w' \in W'_0\) such that
\(H(w,w')\)\@. If there exists a fair simulation from \(S\) to \(S'\), we
say that \(S\) \emph{fair simulates} \(S'\), and we write \(S \leq S'\)\@.
Intuitively, this means that the transition system \(S'\) has more
fair behaviors than the transition system \(S\)\@. In fact, every
fair-\actlsr formula (that is, every \actlsr formula with path
quantification ranging only over fair computations \cite{cj98-02-13}) that
is satisfied
in \(S'\) is also satisfied in \(S\) \cite{cj98-02-17}\@.
The fair-simulation problem is, given \(S\) and \(S'\), to determine
whether \(S \leq S'\)\@. Note that when they relate nonfair
transition systems, fair simulation and simulation coincide\@.
%
\apar{2.2.3-4}
We also mention here the \emph{fair-bisimulation problem} \cite{cj98-02-17}\@.
Two transition systems are bisimilar if and only if they have exactly the same
fair behavior\@. Formally, a relation \(H \subseteq W \times W'\) from
\(\zug{S,w}\) to \(\zug{S',w'}\) is a
fair bisimulation if conditions 1--3 hold with the following
symmetric definition of when a pair is good in \(H\)\@. In fair
bisimulation, a pair \(\zug{w,w'} \in W \times W'\) is good in \(H\)
if and only if for every fair \(w\)-computation \(\pi\) in \(S\), there exists a fair
\(w'\)-computation \(\pi'\) in \(S'\) such that \(H(\pi,\pi')\), and
for every fair \(w'\)-computation \(\pi'\) in \(S'\), there exists a fair
\(w\)-computation \(\pi\) in \(S\) such that \(H(\pi,\pi')\)\@.
%
\apar{2.2.3-5}
It is easy to see that simulation implies containment, and
that fair simulation implies fair containment;
that is, if \(S \leq S'\), then \({\cal T}(S) \subseteq {\cal T}(S')\)\@.
The opposite, however, is not true\@. Consider the
two transition systems \(S\) and \(S'\) presented in Figure~1\@.
Note that while the system \(S\) has two initial states, one labeled
by \(a\) and one labeled by \(b\), the system \(S'\)
has three initial states, two labeled by \(a\) and one labeled by \(b\)\@.
The trace sets of both
transition systems are \((a+b)^\omega\)\@. As such, \({\cal T}(S) \subseteq
{\cal T}(S')\), but still, \(S\) does not simulate \(S'\)\@. Indeed, no
initial state of \(S'\) can be paired, by any \(H\), to the initial state
labeled \(a\) of \(S\)\@.
%
\begin{figure}[htb]
\begin{center}
\setlength{\unitlength}{0.00625000in}
\begingroup\makeatletter\ifx\SetFigFont\undefined
\def\x#1#2#3#4#5#6#7\relax{\def\x{#1#2#3#4#5#6}}%
\expandafter\x\fmtname xxxxxx\relax \def\y{splain}%
\ifx\x\y % LaTeX or SliTeX?
\gdef\SetFigFont#1#2#3{%
\ifnum #1<17\tiny\else \ifnum #1<20\small\else
\ifnum #1<24\normalsize\else \ifnum #1<29\large\else
\ifnum #1<34\Large\else \ifnum #1<41\LARGE\else
\huge\fi\fi\fi\fi\fi\fi
\csname #3\endcsname}%
\else
\gdef\SetFigFont#1#2#3{\begingroup
\count@#1\relax \ifnum 25<\count@\count@25\fi
\def\x{\endgroup\@setsize\SetFigFont{#2pt}}%
\expandafter\x
\csname \romannumeral\the\count@ pt\expandafter\endcsname
\csname @\romannumeral\the\count@ pt\endcsname
\csname #3\endcsname}%
\fi
\fi\endgroup
\begin{picture}(457,139)(0,-10)
\path(70,77) (63.359,68.891)
(58.125,63.875)
(50.000,62.000)
%
\path(50,62) (45.500,69.797)
(44.375,75.799)
(44.000,82.375)
(44.375,88.904)
(45.500,94.766)
(50.000,102.000)
%
\path(50,102) (58.125,100.500)
(63.359,95.531)
(70.000,87.000)
%
\path(63.723,92.347)(70.000,87.000)(66.978,94.672)
\path(190,87) (196.641,95.531)
(201.875,100.500)
(210.000,102.000)
%
\path(210,102) (214.500,94.766)
(215.625,88.904)
(216.000,82.375)
(215.625,75.799)
(214.500,69.797)
(210.000,62.000)
%
\path(210,62) (201.875,63.875)
(196.641,68.891)
(190.000,77.000)
%
\path(196.463,71.878)(190.000,77.000)(193.292,69.440)
\path(116.062,100.509)(111.000,94.000)(118.530,97.361)
\path(111,94) (117.756,98.938)
(123.029,102.000)
(131.000,104.000)
%
\path(131,104) (138.971,102.000)
(144.244,98.938)
(151.000,94.000)
%
\path(110,72) (116.756,67.484)
(122.029,64.375)
(130.000,62.000)
%
\path(130,62) (137.971,64.375)
(143.244,67.484)
(150.000,72.000)
%
\path(144.534,65.826)(150.000,72.000)(142.271,69.124)
\path(310,17) (303.359,8.891)
(298.125,3.875)
(290.000,2.000)
%
\path(290,2) (285.500,9.797)
(284.375,15.799)
(284.000,22.375)
(284.375,28.904)
(285.500,34.766)
(290.000,42.000)
%
\path(290,42) (298.125,40.500)
(303.359,35.531)
(310.000,27.000)
%
\path(303.723,32.347)(310.000,27.000)(306.978,34.672)
\path(430,27) (436.641,35.531)
(441.875,40.500)
(450.000,42.000)
%
\path(450,42) (454.500,34.766)
(455.625,28.904)
(456.000,22.375)
(455.625,15.799)
(454.500,9.797)
(450.000,2.000)
%
\path(450,2) (441.875,3.875)
(436.641,8.891)
(430.000,17.000)
%
\path(436.463,11.878)(430.000,17.000)(433.292,9.440)
\put(90,77){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(a\)}}}}}
\put(260,102){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(S':\)}}}}}
\put(15,102){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(S:\)}}}}}
\put(410,17){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(b\)}}}}}
\put(330,17){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(a\)}}}}}
\put(410,77){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(a\)}}}}}
\put(330,77){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(a\)}}}}}
\put(170,77){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}\(b\)}}}}}
\path(356.062,40.509)(351.000,34.000)(358.530,37.361)
\path(351,34) (357.756,38.938)
(363.029,42.000)
(371.000,44.000)
%
\path(371,44) (378.971,42.000)
(384.244,38.938)
(391.000,34.000)
%
\put(90,82){\ellipse{42}{42}}
\put(170,82){\ellipse{42}{42}}
\put(330,82){\ellipse{42}{42}}
\put(410,82){\ellipse{42}{42}}
\put(330,22){\ellipse{42}{42}}
\put(410,22){\ellipse{42}{42}}
\path(330,62)(330,42)
\path(328.000,50.000)(330.000,42.000)(332.000,50.000)
\path(350,12) (356.756,7.484)
(362.029,4.375)
(370.000,2.000)
%
\path(370,2) (377.971,4.375)
(383.244,7.484)
(390.000,12.000)
%
\path(384.534,5.826)(390.000,12.000)(382.271,9.124)
\path(170,122)(170,102)
\path(168.000,110.000)(170.000,102.000)(172.000,110.000)
\path(90,122)(90,102)
\path(88.000,110.000)(90.000,102.000)(92.000,110.000)
\path(430,52)(425,37)
\path(425.632,45.222)(425.000,37.000)(429.427,43.957)
\path(410,122)(410,102)
\path(408.000,110.000)(410.000,102.000)(412.000,110.000)
\path(330,122)(330,102)
\path(328.000,110.000)(330.000,102.000)(332.000,110.000)
\path(410,62)(410,42)
\path(408.000,50.000)(410.000,42.000)(412.000,50.000)
\end{picture}
%
\afigcap{1}{\({\cal T}(S) \subseteq {\cal T}(S')\) but \(S \not \leq S'\)}
\label{nosim}
\end{center}
\end{figure}
\asubsection{2.3}{Complexity Measures}
%
\apar{2.3-1}
In the rest of this paper, we examine the traced-based and the
tree-based approaches from a complexity-theoretic point of
view\@. We consider and compare the complexity of the four
problems\@. The different levels of abstraction in the implementation
and the specification are reflected in their sizes\@. The implementation
is typically much larger than the specification, and it is the implementation's size
that is the computational bottleneck\@. Therefore, of particular
interest to us is the implementation complexity of these
problems; i.e., the complexity of checking whether
\({\cal T}(S) \subseteq {\cal T}(S')\) and \(S \leq
S'\), in terms of the size of \(S\), assuming \(S'\) is fixed\@.
%
\apar{2.3-2}
We refer to three complexity classes: PSPACE, PTIME, and NLOGSPACE,
briefly described as follows (for a full definition, see \cite{cj98-02-16})\@.
We say that a problem is \emph{in} PSPACE (PTIME) if there
exists a deterministic algorithm for solving the problem such that the
working space (time, respectively) required for executing the
algorithm is polynomial in the size of its input\@. The problem is in
NLOGSPACE if there exists a nondeterministic algorithm for
solving the problem such that the working space required for the
algorithm is logarithmic in the size of its input\@.
A program \({\cal P}\) is complete for a complexity class \(C\)
if and only if \({\cal P}\) is in \(C\) and is also \(C\)-hard\@.
%
\apar{2.3-3}
Some of the results we study, mainly those that we consider nonfair
transition systems, are known; therefore we only review them\@.
\asection{3}{Verification of Nonfair Transition Systems}
%
\apar{3-1}
In this section, we study the complexity of the containment and
simulation problems\@. We show that while the
containment problem is harder, its implementation complexity is
lower than that of the simulation problem\@.
\asubsection{3.1}{The Containment Problem}
%
\begin{alabsubtext}{Theorem}{1}{}
\label{tc c}
The containment problem is PSPACE-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{1}{}
\prooffontshape
\apar{Proof of Theorem 1-1}
The upper bound follows from known PSPACE bound to the
fair-containment problem for unconditionally fair
transition systems (\cite{cj98-02-42}, see also Theorem~5)\@.
%
\apar{Proof of Theorem 1-2}
We can regard an unconditionally fair system
\(S=\zug{\Sigma,W,R,W_0,L,\alpha}\), with \(\alpha \subseteq W\), as a
\emph{finite-acceptance} transition system\@. The traces of a finite-acceptance
transition system are finite words over the alphabet
\(\Sigma\)\@. A finite
computation \(\pi=w_0,\dots,w_k\) is ``fair'' in \(S\) (that is, \(L(\pi)\)
is accepted) if and only if \(w_k \in \alpha\)\@. We call the states in \(\alpha\)
\emph{final} states\@. A finite-acceptance transition system \(S\) is
universal if and only if \({\cal T}(S)=\Sigma^*\)\@.
In \cite{cj98-02-35}, Mayer and Stockmayer prove a PSPACE lower bound for
the problem of determining whether a finite-acceptance transition
system \(S\) is universal (the framework in \cite{cj98-02-35} is of regular
expressions, yet regular expressions can be linearly translated to
finite-acceptance transition systems)\@.
%
\apar{Proof of Theorem 1-3}
We reduce the universality problem for finite-acceptance transition
systems to the containment problem\@.
Consider a finite-acceptance transition system
\(S=\zug{\Sigma,W,R,W_0,L,\alpha}\)\@. Let \[S'=\zug{\Sigma
\cup \{\#\},W \cup
\{w_{\#}\}, R \cup R_{\#}, W_0, L', W \cup \{w_{\#}\}}\] be a nonfair
transition system, where:
\begin{itemize}
\item
For every \(w \in (\alpha \cup \{w_{\#}\})\), we have \(R_{\#}(w,w_{\#})\)\@.
Thus \(S'\) has the transitions of \(S\) augmented with transitions from
all the final states to the state \(w_{\#}\), which has a self loop\@.
\item
For every state \(w \in W\), we have \(L'(w)=L(w)\)\@. In addition,
\(L'(w_{\#})=\{\#\}\)\@.
\end{itemize}
%
\apar{Proof of Theorem 1-4}
We prove that \(\Sigma^* \subseteq {\cal T}(S)\) if and only if \(\Sigma^\omega \cup
(\Sigma^* \cdot \{\#\}^\omega) \subseteq {\cal T}(S')\)\@.
Assume first that \(\Sigma^\omega \cup (\Sigma^* \cdot \{\#\}^\omega)
\subseteq {\cal T}(S')\), and assume, by way of contradiction, that
there exists \(\rho \in \Sigma^*\) such that \(\rho \not \in {\cal T}(S)\)\@.
Let \(\pi=w_0,w_1,\dots\) be an accepting computation of \(S'\) on \(\rho
\cdot \#^\omega\)\@. By the definition of \(L'\), we have that
\(\pi \in W^* \cdot w_{\#}^\omega\)\@. Let \(k\) be such that \(w_k \neq
w_{\#}\) and \(w_{k+1} = w_{\#}\)\@. As \(R_{\#}(w_k, w_{k+1})\), it must be
that \(w_k \in \alpha\)\@. In addition, for all \(0 \leq i < k-1\), we have
\(R(w_i,w_{i+1})\)\@. Hence \(w_0,\dots,w_{k}\) is an accepting
computation of \(S\) on \(\rho\), and we reach a contradiction\@.
%
\apar{Proof of Theorem 1-5}
Assume now that \(\Sigma^* \subseteq {\cal T}(S)\)\@. Consider a trace
\(\rho \in \Sigma^*\)\@. Let \(w_0,w_1,\dots,w_k\) be an accepting
computation of \(S\) on \(\rho\)\@. Clearly,
\(w_0,w_1,\dots,w_k,w_{\#}^\omega\) is an accepting computation of \(S'\)
on \(\rho \cdot \{\#\}^\omega \)\@. Hence, \(\Sigma^* \cdot
\{\#\}^\omega \subseteq {\cal T}(S')\)\@. Next, consider a trace
\(\rho= \sigma_0,\sigma_1,\dots \in \Sigma^\omega\)\@. We define a tree
that embodies all the possible runs of \(S\) on finite prefixes of
\(\rho\)\@. The tree has a root labeled \(\epsilon\)\@. The nodes of level \(1\)
(that is, nodes that have a path of length 1 from the root) are
states \(w_0\) in \(W_0\) for which \(L(w_0)=\sigma_0\)\@. For \(l \geq 0\), a
node \(w\) of level \(i\) has as successors nodes \(w'\) for which \(R(w,w')\)
and \(L(w')=\sigma_{l+1}\)\@. Because \(\Sigma^* \subseteq {\cal T}(S)\), the
tree
embodies accepting runs for all the (infinitely many) finite prefixes of
\(\rho\), and is therefore infinite\@. Hence, by K\"onig's lemma, we can
pick a sequence \(\pi=\epsilon,w_0,w_1,\dots\)\ such that \(w_0 \in W_0\),
and for all \(j \geq 0\), we have that \(L(w_j)=\sigma_j\) and
\(R(w_j,w_{j+1})\)\@. As such, \(\pi\) is an accepting run of \(S'\) on
\(\rho\)\@.
As we can easily construct a nonfair transition system for
\(\Sigma^\omega \cup (\Sigma^* \cdot \{\#\}^\omega)\), we are done\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 1}}
\end{alabsubtext}
%
\begin{alabsubtext}{Theorem}{2}{}
\label{tc ic}
The implementation complexity of the containment problem is
NLOGSPACE-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{2}{}
\prooffontshape
In Theorem~6, we prove an NLOGSPACE upper bound
for the implementation complexity of the more general
fair-containment problem for unconditionally fair transition systems\@.
The lower bound follows easily by a reduction from the
nonreachability problem in a directed graph, proved to be
NLOGSPACE-complete in \cite{cj98-02-22}\@.\footnote{The proof
in \cite{cj98-02-22} is for the reachability
problem\@. Yet, for every problem \(P\), we have that \(P\) is
NLOGSPACE-complete if and only if \(P\) is co-NLOGSPACE-complete (see
\cite{cj98-02-21,cj98-02-41} for NLOGSPACE = co-NLOGSPACE; the argument for the
completeness is easy)\@.}
Given a directed graph \(G\) with two designated nodes \(s\) and
\(t\), we can view \(G\) as a nonfair transition system \(S_G\) over
\(\Sigma=\{\sigma,s,t\}\) in which all states are initial states, all
states except \(s\) and \(t\) are labeled \(\sigma\), and the states \(s\) and
\(t\) are labeled with \(s\) and \(t\), respectively\@. It is
easy to see that the node \(t\) is not reachable from \(s\) if and only if
\({\cal T}(G) \subseteq \{\sigma,t\}^* \cdot \{\sigma,s\}^\omega \cup
\{\sigma,t\}^\omega\)\@. Since we can specify the expression in the
right with a fixed nonfair transition system, we are done\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 2}}
\end{alabsubtext}
\asubsection{3.2}{The Simulation Problem}
%
\begin{alabsubtext}{Theorem}{3}{}
\label{simulation c}
The simulation problem is PTIME-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{3}{}
\prooffontshape
The upper bound is given in \cite{cj98-02-31}\@.
The lower bound follows from the reduction in \cite{cj98-02-06} (the
reduction there proves PTIME-hardness for bisimulation, but it is
valid also for simulation)\@. An additional proof of the lower bound
is given in Theorem~4\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 3}}
\end{alabsubtext}
%
\begin{alabsubtext}{Theorem}{4}{}
\label{simulation ic}
The implementation complexity of the simulation problem is
PTIME-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{4}{}
\prooffontshape
\apar{Proof of Theorem 4-1}
Membership in PTIME follows from Theorem~3\@.
%
\apar{Proof of Theorem 4-2}
We prove hardness in PTIME by reducing the NAND circuit value
problem (NANDCV), proved to be PTIME-complete in \cite{cj98-02-18,cj98-02-15},
to the problem of determining whether a transition system \(S\)
simulates a fixed transition system \(S'\), both with no fairness\@.
In the NANDCV problem, we are given a Boolean circuit \(\beta\)
constructed solely of NAND gates of fanout \(2\), and a vector
\(\zug{x_1,\dots,x_n}\) of Boolean input values\@. The problem is to
determine whether the output of \(\beta\) on \(\zug{x_1,\dots,x_n}\)
is~\(1\)\@. Formally, we denote a Boolean circuit of NAND gates by a tuple
\(\beta=\zug{G,G_{in},g_{out},T_l,T_r}\), where \(G\) is the set of internal
gates, \(G_{in}\) is the set of input gates (identified as
\(g_1,\dots,g_n\)), \(g_{out} \in G \cup G_{in}\) is the output gate,
\(T_l: G \rightarrow G \cup G_{in}\) maps each internal gate to its left
input, and \(T_r: G \rightarrow G \cup G_{in}\) maps each internal gate
to its right input\@. Note that since there are no circular dependencies
in \(\beta\), the graph induced by \(T_l\) and \(T_r\) is acyclic\@.
%
\apar{Proof of Theorem 4-3}
The idea of the reduction is as follows\@.
We define a fixed transition system \(S'\) that embodies all the NAND
circuits \(\beta\) and input vectors \(\vec{x}\) for which the value of
\(\beta\) on \(\vec{x}\) is~\(1\)\@. Then, given a circuit \(\beta\) and an
input vector \(\vec{x}\), we translate them to a transition system \(S\)
such that \(S \leq S'\) if and only if the value of \(\beta\) on \(\vec{x}\) is~\(1\)\@.
%
\begin{figure}[htb]
\begin{center}
\setlength{\unitlength}{0.00875000in}
\begingroup\makeatletter\ifx\SetFigFont\undefined
\def\x#1#2#3#4#5#6#7\relax{\def\x{#1#2#3#4#5#6}}%
\expandafter\x\fmtname xxxxxx\relax \def\y{splain}%
\ifx\x\y % LaTeX or SliTeX?
\gdef\SetFigFont#1#2#3{%
\ifnum #1<17\tiny\else \ifnum #1<20\small\else
\ifnum #1<24\normalsize\else \ifnum #1<29\large\else
\ifnum #1<34\Large\else \ifnum #1<41\LARGE\else
\huge\fi\fi\fi\fi\fi\fi
\csname #3\endcsname}%
\else
\gdef\SetFigFont#1#2#3{\begingroup
\count@#1\relax \ifnum 25<\count@\count@25\fi
\def\x{\endgroup\@setsize\SetFigFont{#2pt}}%
\expandafter\x
\csname \romannumeral\the\count@ pt\expandafter\endcsname
\csname @\romannumeral\the\count@ pt\endcsname
\csname #3\endcsname}%
\fi
\fi\endgroup
\begin{picture}(532,339)(0,-10)
\path(169,189)(301,2)(486,52)
\path(478.799,47.982)(486.000,52.000)(477.755,51.843)
\path(364,189)(236,2)(47,52)
\path(55.245,51.887)(47.000,52.000)(54.222,48.020)
\path(171,212)(301,252)
\path(293.942,247.736)(301.000,252.000)(292.766,251.559)
\path(231.216,240.613)(226.000,247.000)(227.597,238.910)
\path(226,247)(306,77)
\path(300.784,83.387)(306.000,77.000)(304.403,85.090)
\path(326,77)(371,187)
\path(369.822,178.838)(371.000,187.000)(366.120,180.353)
\path(209,247)(164,137)
\path(165.178,145.162)(164.000,137.000)(168.880,143.647)
\path(221,47) (225.295,44.141)
(228.810,41.651)
(233.680,37.602)
(236.000,32.000)
%
\path(236,32) (233.488,29.448)
(228.844,27.625)
(222.777,26.531)
(216.000,26.166)
(209.223,26.531)
(203.156,27.625)
(198.512,29.448)
(196.000,32.000)
%
\path(196,32) (198.320,37.602)
(203.190,41.651)
(206.705,44.141)
(211.000,47.000)
%
\path(205.386,40.960)(211.000,47.000)(203.204,44.312)
\path(170,135)(296,322)(487,272)
\path(478.754,272.091)(487.000,272.000)(479.767,275.961)
\path(336,62)(481,62)
\path(473.000,60.000)(481.000,62.000)(473.000,64.000)
\path(336,267)(376,287)(486,72)
\path(480.576,78.211)(486.000,72.000)(484.137,80.033)
\path(391,212)(481,257)
\path(474.739,251.633)(481.000,257.000)(472.950,255.211)
\path(336,262)(481,262)
\path(473.000,260.000)(481.000,262.000)(473.000,264.000)
\path(363,135)(236,322)(46,272)
\path(53.228,275.970)(46.000,272.000)(54.246,272.102)
\path(196,67)(156,107)
\path(163.071,102.757)(156.000,107.000)(160.243,99.929)
\path(336,257)(376,217)
\path(368.929,221.243)(376.000,217.000)(371.757,224.071)
\path(336,57)(376,37)(486,252)
\path(484.137,243.967)(486.000,252.000)(480.576,245.789)
\path(321,277) (325.295,279.859)
(328.810,282.349)
(333.679,286.398)
(336.000,292.000)
%
\path(336,292) (333.488,294.552)
(328.844,296.375)
(322.777,297.469)
(316.000,297.834)
(309.223,297.469)
(303.156,296.375)
(298.512,294.552)
(296.000,292.000)
%
\path(296,292) (298.321,286.398)
(303.190,282.349)
(306.705,279.859)
(311.000,277.000)
%
\path(303.204,279.688)(311.000,277.000)(305.386,283.040)
\put(316,257){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}101\(\swarrow\)}}}}}
\put(376,197){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}001\(\swarrow\)}}}}}
\put(156,197){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}110\(\swarrow\)}}}}}
\put(156,117){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}001\(\searrow\)}}}}}
\put(216,57){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}011\(\searrow\)}}}}}
\put(316,57){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}101\(\searrow\)}}}}}
\put(376,117){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}110\(\searrow\)}}}}}
\put(216,257){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}011\(\swarrow\)}}}}}
\path(21,57) (18.223,52.869)
(15.844,49.453)
(12.000,44.625)
(6.000,42.000)
%
\path(6,42) (3.703,44.703)
(2.062,49.375)
(1.078,55.359)
(0.750,62.000)
(1.078,68.641)
(2.062,74.625)
(3.703,79.297)
(6.000,82.000)
%
\path(6,82) (12.000,79.375)
(15.844,74.547)
(18.223,71.131)
(21.000,67.000)
%
\path(14.898,72.547)(21.000,67.000)(18.226,74.766)
\path(21,257) (18.223,252.869)
(15.844,249.453)
(12.000,244.625)
(6.000,242.000)
%
\path(6,242) (3.703,244.703)
(2.062,249.375)
(1.078,255.359)
(0.750,262.000)
(1.078,268.641)
(2.062,274.625)
(3.703,279.297)
(6.000,282.000)
%
\path(6,282) (12.000,279.375)
(15.844,274.547)
(18.223,271.131)
(21.000,267.000)
%
\path(14.898,272.547)(21.000,267.000)(18.226,274.766)
\path(511,267) (513.777,271.131)
(516.156,274.547)
(520.000,279.375)
(526.000,282.000)
%
\path(526,282) (528.297,279.297)
(529.937,274.625)
(530.922,268.641)
(531.250,262.000)
(530.922,255.359)
(529.937,249.375)
(528.297,244.703)
(526.000,242.000)
%
\path(526,242) (520.000,244.625)
(516.156,249.453)
(513.777,252.869)
(511.000,257.000)
%
\path(517.102,251.453)(511.000,257.000)(513.774,249.234)
\path(511,67) (513.777,71.131)
(516.156,74.547)
(520.000,79.375)
(526.000,82.000)
%
\path(526,82) (528.297,79.297)
(529.937,74.625)
(530.922,68.641)
(531.250,62.000)
(530.922,55.359)
(529.937,49.375)
(528.297,44.703)
(526.000,42.000)
%
\path(526,42) (520.000,44.625)
(516.156,49.453)
(513.777,52.869)
(511.000,57.000)
%
\path(517.102,51.453)(511.000,57.000)(513.774,49.234)
\put(496,57){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}1\(\swarrow\)}}}}}
\put(496,257){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}0\(\searrow\)}}}}}
\put(36,257){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}1\(\searrow\)}}}}}
\put(36,57){\makebox(0,0)[b]{\smash{{{\SetFigFont{8}{9.6}{rm}0\(\swarrow\)}}}}}
\path(391,112)(481,67)
\path(472.950,68.789)(481.000,67.000)(474.739,72.367)
\put(156,202){\ellipse{40}{30}}
\put(36,62){\ellipse{28}{28}}
\put(36,262){\ellipse{28}{28}}
\path(296,262)(236,262)
\path(244.000,264.000)(236.000,262.000)(244.000,260.000)
\path(236,62)(296,62)
\path(288.000,60.000)(296.000,62.000)(288.000,64.000)
\path(361,112)(231,72)
\path(238.058,76.264)(231.000,72.000)(239.234,72.441)
\path(316,77)(316,247)
\path(318.000,239.000)(316.000,247.000)(314.000,239.000)
\put(376,202){\ellipse{40}{30}}
\put(496,62){\ellipse{28}{28}}
\put(496,262){\ellipse{28}{28}}
\put(216,62){\ellipse{40}{30}}
\put(316,62){\ellipse{40}{30}}
\put(316,262){\ellipse{40}{30}}
\put(216,262){\ellipse{40}{30}}
\put(156,122){\ellipse{40}{30}}
\put(376,122){\ellipse{40}{30}}
\path(216,247)(216,77)
\path(214.000,85.000)(216.000,77.000)(218.000,85.000)
\path(168.614,179.165)(164.000,186.000)(164.854,177.798)
\path(164,186)(204,76)
\path(199.386,82.835)(204.000,76.000)(203.146,84.202)
\path(196,62)(51,62)
\path(59.000,64.000)(51.000,62.000)(59.000,60.000)
\path(141,112)(51,67)
\path(57.261,72.367)(51.000,67.000)(59.050,68.789)
\path(196,267)(156,287)(46,72)
\path(47.863,80.033)(46.000,72.000)(51.424,78.211)
\path(141,212)(51,257)
\path(59.050,255.211)(51.000,257.000)(57.261,251.633)
\path(196,262)(51,262)
\path(59.000,264.000)(51.000,262.000)(59.000,260.000)
\path(196,57)(156,37)(46,252)
\path(51.424,245.789)(46.000,252.000)(47.863,243.967)
\path(330.614,240.165)(326.000,247.000)(326.854,238.798)
\path(326,247)(366,137)
\path(361.386,143.835)(366.000,137.000)(365.146,145.202)
\path(171,192)(301,72)
\path(293.765,75.957)(301.000,72.000)(296.478,78.896)
\path(237.235,247.043)(230.000,251.000)(234.522,244.104)
\path(230,251)(360,131)
\path(378.000,179.000)(376.000,187.000)(374.000,179.000)
\path(376,187)(376,137)
\path(374.000,145.000)(376.000,137.000)(378.000,145.000)
\path(191.757,249.929)(196.000,257.000)(188.929,252.757)
\path(196,257)(156,217)
\path(160.243,224.071)(156.000,217.000)(163.071,221.243)
\path(348.000,200.000)(356.000,202.000)(348.000,204.000)
\path(356,202)(176,202)
\path(184.000,204.000)(176.000,202.000)(184.000,200.000)
\path(184.000,124.000)(176.000,122.000)(184.000,120.000)
\path(176,122)(356,122)
\path(348.000,120.000)(356.000,122.000)(348.000,124.000)
\path(158.000,179.000)(156.000,187.000)(154.000,179.000)
\path(156,187)(156,137)
\path(154.000,145.000)(156.000,137.000)(158.000,145.000)
\path(340.243,74.071)(336.000,67.000)(343.071,71.243)
\path(336,67)(376,107)
\path(371.757,99.929)(376.000,107.000)(368.929,102.757)
\end{picture}
%
\afigcap{2}{A fixed transition system that embodies all NAND circuits}
\label{nand}
\end{center}
\end{figure}
%
\apar{Proof of Theorem 4-4}
The transition system \(S'\) (see Figure~2) has 12 states\@.
Eight states correspond to internal gates\@. Each of these states
is an entry in the truth table of the operator NAND, attributed
with a direction, either \(\swarrow\) or \(\searrow\)\@. Thus, the ``internal
states'' of \(S'\) are \(\zug{001\swarrow}\), \(\zug{011\swarrow}\),
\(\zug{101\swarrow}\), \(\zug{110\swarrow}\), \(\zug{001\searrow}\),
\(\zug{011\searrow}\), \(\zug{101\searrow}\), and \(\zug{110\searrow}\)\@.
Four more states correspond to the input
gates of the circuit\@. Each of these states is a Boolean value,
attributed with a direction; thus the ``input states'' are
\(\zug{0\swarrow}\), \(\zug{1\swarrow}\), \(\zug{0\searrow}\), and \(\zug{1\searrow}\)\@.
The intuition is that an internal state \(\zug{l,r,val,d}\) corresponds
to a NAND gate that has the value \(l\) in its left input, has the value
\(r\) in its right input, and whose output \(val\) can be only a \(d\)-input of
other gates\@. Similarly, an input state \(\zug{val,d}\) corresponds to an
input gate with output \(val\) that can only be a \(d\) input of other gates\@.
%
\apar{Proof of Theorem 4-5}
Accordingly, the transitions from an internal state \(\zug{l,r,val,d}\)
correspond to the possible ways of having \(l\) and \(r\) as right and
left inputs, respectively\@. Thus, we have transitions from this state
to all (internal or input) states with either \(val=l\) and \(d=\swarrow\) or
\(val=r\) and \(d=\searrow\)\@. For example, the internal state
\(\zug{101\swarrow}\) has transitions to the states
\(\zug{001\swarrow}, \zug{011\swarrow}, \zug{101\swarrow},
\zug{110\searrow}, \zug{1\swarrow}\),
and \(\zug{0\searrow}\)\@. It has transitions from all states
\(\zug{l,r,val,d}\) with \(l=0\)\@. In addition, the input states
have self loops\@.
%
\apar{Proof of Theorem 4-6}
We label an internal state by either \(\swarrow\) or \(\searrow\), according
to its directional element\@. For example, the node \(\zug{101\swarrow}\) is
labeled \(\{\swarrow\}\)\@. We label an input state by both its value and
direction\@. For example, the node \(\zug{1\searrow}\) is labeled
\(\{1, \searrow\}\)\@.
We define the initial states of \(S'\) to be those with \(val=1\), and we
impose no fairness condition\@.
Clearly, the size of \(S'\) is fixed\@.
%
\apar{Proof of Theorem 4-7}
Given a circuit \(\beta=\zug{G,G_{in},g_{out},T_l,T_r}\) and an
input vector \(\vec{x}\), the transition system \(S\) is simply \(\beta\),
with attributions of directions, and labeling of input gates according
to \(\vec{x}\)\@.
More precisely, we duplicate all gates and inputs of \(\beta\) so that
the output of each gate is either always a left input of other gates, in
which case we label it with \(\swarrow\), or always a right input of other
gates, in which case we label it with \(\searrow\)\@. In addition, we add self
loops to the input gates and label them with their values\@.
As the set of initial states, we take both attributions of \(g_{out}\)\@.
Formally,
{\footnotesize
\(S=\zug{\{\swarrow,\searrow\} \cup \{0,1\} \times
\{\swarrow,\searrow\},(G \cup G_{in}) \times
\{\swarrow,\searrow\}, T, \{g_{out}\} \times \{\swarrow,
\searrow\}, L}\)},
where:
\begin{itemize}
\item
\(T(\zug{g,d},\zug{g',d'})\) if and only if one of the following holds:
\begin{enumerate}
\item
\(g \in G\), \(d'=l\), and \(T_l(g)=g'\),
\item
\(g \in G\), \(d'=r\), and \(T_r(g)=g'\), or
\item
\(g \in G_{in}\), \(g=g'\), and \(d=d'\)\@.
\end{enumerate}
\item
For \(g \in G\), we have \(L(\zug{g,d})=d\)\@. For \(g_i \in G_{in}\), we
have \(L(\zug{g_i,d})=\zug{x_i,d}\)\@.
\end{itemize}
%
\apar{Proof of Theorem 4-8}
We define the \emph{depth} of a gate as the length of the longest
path from it to an input gate\@. Note that the only cycles in \(S\) are
the self loops in the input gates, and the path cannot use them\@.
Thus, for \(g \in G_{in}\), we have
\(depth(g)=0\), and for \(g \in G\), we have \(depth(g)=1+ \max
\{depth(T_l(g)),depth(T_r(g))\}\)\@.
%
\apar{Proof of Theorem 4-9}
We first prove that for a simulation relation \(H\) from \(S\) to
\(S'\) and for every pair \(\zug{\zug{g,d},\zug{val,d'}}\) or
\(\zug{\zug{g,d},\zug{l,r,val,d'}}\) in \(H\), the output of the gate \(g\)
on the vector \(\vec{x}\) is \(val\)\@.
The proof proceeds by induction on \(depth(g)\)\@.
If \(depth(g)=0\), then \(g \in G_{in}\)\@. Let \(g=g_i\)\@. By the definition of
\(L'\), the state \(\zug{g_i,d}\) is labeled with \(x_i\) and can
therefore be related by \(H\) only to states \(\zug{val,d'}\) for which
\(val=x_i\)\@. Assume that the claim holds for all gates of depth at
most \(i\)\@. Let \(g\) be such that \(depth(g)=i+1\)\@.
Then, \(g \in G\), and \(\zug{g,d}\) is mapped to some internal state
\(t=\zug{l,r,v,d'}\)\@. By the definition of simulation, for every
successor \(\zug{g',d'}\) of \(\zug{g,d}\) there exists a successor \(t'\)
of \(t\) such that \(H(g',t')\)\@. We know that \(\zug{g,d}\) has two
successors, \(\zug{T_l(g),\swarrow}\) and
\(\zug{T_r(g),\searrow}\)\@. By the definition of \(S'\), all the successors
of \(t\) that are labeled with \(\swarrow\) have value \(l\)\@.
Therefore, by the induction hypothesis, the output of \(T_l(g)\) is
\(l\)\@. Similarly, the output of \(T_r(g)\) is \(r\)\@.
Thus, the output of \(g\) is \(\mbox{NAND}(l,r)\)\@. By the definition of
\(S'\), we
also have \(v=\mbox{NAND}(l,r)\), and we are done\@.
%
\apar{Proof of Theorem 4-10}
We now prove that the output of \(\beta\) on \(\vec{x}\) is \(1\) if and only if \(S\)
simulates \(S'\)\@. Assume first that \(S\) simulates \(S'\)\@. Let \(H\) be the
simulation relation from \(S\) to \(S'\)\@. When we take as the initial
set of \(S'\) the states with \(val=1\), both states \(\zug{g_{out},\swarrow}\)
and \(\zug{g_{out},\searrow}\) of \(S\) are related by \(H\) to states with
\(val=1\)\@. Hence, by the above claim, the output of \(\beta\) on
\(\vec{x}\) is \(1\)\@.
%
\apar{Proof of Theorem 4-10}
Assume that the output of \(\beta\) on \(\vec{x}\) is \(1\)\@.
Consider a relation \(H\) from the states of \(S\) to the states of \(S'\)
in which \(H({\zug{g,d},\zug{l,r,val,d'}})\) for an internal gate \(g\) if and only if
\(l\) is the value of the left input to \(g\),
\(r\) is the value of the right input to \(g\),
\(val\) is the output of \(g\) on \(\vec{x}\), and \(d=d'\);
and \(H(\zug{g,d},\zug{val,d'})\) for an input gate \(g\) if and only if \(val\) is the
output of \(g\) on \(\vec{x}\) and \(d=d'\)\@.
We show that \(H\) is a simulation relation from \(S\) to \(S'\)\@.
Consider a state \(w_1\) in \(S\) with \(H(w_1,w'_1)\)\@. We have to show that
for every successor \(w_2\) of \(w_1\) there exists a successor \(w'_2\) of
\(w'_1\) such that \(H(w_2,w'_2)\)\@. Consider first the case where
\(w_1=\zug{g,d}\) for \(g \in G_{in}\)\@. By the definition of
\(S\), the state \(w_1\) has a single successor \(w_2\) with \(w_2=w_1\)\@. Let
\(w'_1\) be such that \(H(w_1,w'_1)\)\@. By the definition of \(H\), we have
\(w'_1=\zug{val,d}\), where \(val\) is the output of \(g\) on \(\vec{x}\)\@.
By the definition of \(S'\), the state \(w'_1\) has a single
successor \(w'_2\) with \(w'_2=w'_1\); hence \(H(w_2,w'_2)\)\@.
Consider now the case where \(w_1=\zug{g,d}\) for \(g \in G\)\@.
Let \(w'_1\) be such that \(H(w_1,w'_1)\)\@. By the definition of
\(H\), we have \(w'_1=\zug{l_1,r_1,val_1,d}\), where \(l_1\) is the value of the
left input to \(g\), \(r_1\) is the value of the right input to \(g\), and
\(val_1\) is the output of \(g\) on \(\vec{x}\)\@.
By the definition of \(S\), the state \(w_1\) has as successors the two
states \(\zug{T_l(g),\swarrow}\) and \(\zug{T_r(g),\searrow}\)\@.
Consider the state \(w_2=\zug{T_l(g),\swarrow}\)\@.
Let \(w'_2=\zug{l_2,r_2,val_2,\swarrow}\) be a successor of \(w'_1\) for
which \(l_2\) is the value of the left input to \(T_l(g)\),
\(r_2\) is the value of the right input to \(T_l(g)\), and
\(val\) is the output of \(T_l(g)\) on \(\vec{x}\)\@.
By the definition of \(S'\), such a successor \(w'_2\) exists\@.
Also, by the definition of \(H\), we have \(H(w_2,w'_2)\)\@.
The proof for the state \(\zug{T_r(g),\searrow}\) is similar\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 4}}
\end{alabsubtext}
\asection{4}{Verification of Fair Transition Systems}
%
\apar{4-1}
In this section, we study the complexity of the fair-containment
and the fair-simulation problems\@. We show that both problems are
PSPACE-complete, and that the implementation complexity of both problems
is significantly lower\@.
\asubsection{4.1}{The Fair-Containment Problem}
%
\apar{4.1-1}
We have seen that the containment problem is PSPACE-complete, and
that its implementation complexity is NLOGSPACE-complete\@. In this
section, we check what happens to the complexity when we augment the
transition systems with fairness conditions\@.
We start with some auxiliary results\@.
%
\begin{alabsubtext}{Lemma}{1}{}
\label{w to u}
\({\cal W}(n,m) \goto {\cal U}(nm)\)\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{1}{}
\prooffontshape
Consider a weakly fair transition system
\(S=\zug{\Sigma, W, R, W_0, L, \alpha}\), with fairness condition
\(\alpha=\{\zug{B_1,G_1},\dots,\zug{B_m,G_m}\}\)\@.
For every \(W' \subseteq W\) and pair \(\zug{B_i,G_i}\), we have that
\(W'\ \cap\ (W \setminus B_i) = \emptyset\) implies that \(W'\ \cap\ G_i \neq
\emptyset\), if and only if \(W'\ \cap\ ((W \setminus B_i) \cup G_i) \neq \emptyset\)\@.
Hence, a computation \(\pi\) is fair in \(S\) if and only if it is fair in all the
unconditionally fair transition systems
\(S_i=\zug{\Sigma,W,R,L,W_0,(W \setminus B_i) \cup G_i}\)\@.
The result then follows from the known bound on the size of the
product of unconditionally fair transition systems \cite{cj98-02-11}\@.
{\nopagebreakbeginpar\markendlst{Proof of Lemma 1}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{2}{}
\label{fo}
Given a transition system \(S \in {\cal U}(n)\) with a state space \(W\)
and a set \(B \subseteq W\), we can construct a transition system
\(S' \in {\cal U}(2n)\) such that a trace \(\rho\) is accepted by \(S'\) if and only if
there exists a fair computation \(\pi\) in \(S\) such that \(\mbox{{\em
Inf\/}}(\pi) \cap B = \emptyset\) and \(L(\pi)=\rho\)\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{2}{}
\prooffontshape
\apar{Proof of Lemma 2-1}
The idea, as suggested in \cite{cj98-02-24}, is that
the transition system \(S'\) guesses a position in each of
its computations from which no state of \(B\) can be visited\@.
For that, it maintains two copies of \(S\)\@. The first copy allows visits
in states in \(B\)\@. The second copy does not allow visits in states in
\(B\)\@. Each computation starts in the first copy, and should eventually move
to the second copy\@. Formally, for
\(S=\zug{\Sigma,W,R,L,W_0,\alpha}\), we define
\(S'=\zug{\Sigma,W \times \{1,2\},R',L',W_0 \times \{1\},\alpha \times
\{2\}}\), where:
\begin{itemize}
\item
For every \(w\) and \(w'\) in \(W\) and \(i\) and \(i'\) in \(\{1,2\}\), we have
that \\
\(R'(\zug{w,i},\zug{w',i'})\) if and only if \(R(w,w')\) and either:
\begin{itemize}
\item
\(w' \not \in B\) and \(i \leq i'\), or
\item
\(w' \in B\) and \(i = i' = 1\)\@.
\end{itemize}
\item
For every \(w \in W\) and \(i \in \{1,2\}\), we have \(L'(\zug{w,i})=
L(w)\)\@.
\end{itemize}
%
\apar{Proof of Lemma 2-2}
We prove the correctness of our construction\@.
Consider a trace \(\rho \in \Sigma^\omega\), and assume there exists a
fair computation \(\pi=w_0,w_1,\dots\ \) in \(S\) such that \(\mbox{{\em
Inf\/}}(\pi) \cap B = \emptyset\) and \(L(\pi)=\rho\)\@.
Let \(w_i\) be such that for all \(j > i\), we have \(w_j \not \in B\)\@.
The computation \(\pi' = \zug{w_0,1}, \zug{w_1,1}, \dots,
\zug{w_i,1}, \zug{w_{i+1},2}, \zug{w_{i+2},2},\dots\ \)
is then fair in \(S'\), and \(\rho\) is accepted by \(S'\)\@.
Assume now that \(\rho\) is accepted by \(S'\); thus, there exists a
computation \(\pi' = \zug{w_0,i_0}, \zug{w_1,i_1}, \dots\ \) in
\(S'\) such that \(L(\pi')=\rho\) and \(\mbox{\emph{Inf\/}}(\pi') \cap
(\alpha \times \{2\}) \neq \emptyset\)\@. As no transitions
from states in \(W \times \{2\}\) to states in \(W \times \{1\}\) are
possible, the computation \(\pi'\) eventually gets trapped in states in
\(W \times \{2\}\)\@. Therefore, as no transitions to states in
\(B \times \{2\}\) are possible, the computation \(\pi'\) visits states in
\(B\) only finitely often\@. Finally, as \(R'(\zug{w,i},\zug{w',i'})\)
only if \(R(w,w')\), the computation \(\pi=w_0, w_1,\dots\) exists and
is fair in \(S\), and we are done\@.
Note that \(S'\) is not necessarily total\@. For that, we restrict
\(S'\) to states that have at least one \(R'\)-successor\@. Clearly,
this does not affect the traces of \(S'\)\@.
{\nopagebreakbeginpar\markendlst{Proof of Lemma 2}}
\end{alabsubtext}
%
\begin{alabsubtext}{Lemma}{3}{}
\label{s to u}
\({\cal S}(n,m) \goto {\cal U}(n 2^{O(m)})\)\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{3}{}
\prooffontshape
\apar{Proof of Lemma 3-1}
Consider a strongly fair transition system
\(S=\zug{\Sigma,W,R,L,W_0,\alpha}\) with fairness condition
\(\alpha=\{\zug{B_1,G_1},\dots,\zug{B_m,G_m}\}\)\@.
With every \(I \subseteq \{1,\dots,m\}\), we associate an
unconditionally fair transition system \(S_I\) that accepts the traces
\(L(\pi)\) of \(S\) for which \(\mbox{\emph{Inf\/}}(\pi) \cap B_i \neq
\emptyset\) and \(\mbox{\emph{Inf\/}}(\pi) \cap G_i \neq \emptyset\) for
all \(i \in I\), and \(\mbox{\emph{Inf\/}}(\pi) \cap B_i = \emptyset\) for
all \(i \not \in I\)\@.
For that, we first define a weakly fair transition system
\(S'_I=\zug{\Sigma,W,R,W_0,L,\alpha_I}\) that accepts the traces \(L(\pi)\)
of \(S\) for which \(\mbox{\emph{Inf\/}}(\pi) \cap B_i \neq
\emptyset\) and \(\mbox{\emph{Inf\/}}(\pi) \cap G_i \neq \emptyset\) for
all \(i \in I\)\@. This is done by defining \(\alpha_I = \bigcup_{i \in I}
\{\zug{W, B_i}, \zug{W,G_i}\}\)\@. If \(S \in {\cal
S}(n,m)\), then \(S'_I \in {\cal W}(n,2m)\) and hence, by Lemma~1,
we can translate it to \(S''_I \in {\cal U}(2nm)\)\@.
Let \(B=\bigcup_{i \not \in I} B_i\)\@. Clearly, for every computation
\(\pi\) of \(S''_I\), we have that \(\mbox{\emph{Inf\/}}(\pi)
\cap B_i = \emptyset\) for all \(i \not \in I\) if and only if
\(\mbox{\emph{Inf\/}}(\pi) \cap B = \emptyset\)\@.
Hence, according to Lemma~2, we can construct the transition
system \(S_I\) with \(4nm\) states\@.
%
\apar{Proof of Lemma 3-2}
We prove that \({\cal T}(S)= \bigcup_{I \subseteq \{1,2,\dots,m\}}
{\cal T}(S_I)\)\@. A computation \(\pi\) is fair in \(S\) if for every pair
\(\zug{B_i,G_i}\), either \(\mbox{\emph{Inf\/}}(\pi) \cap B_i = \emptyset\),
or both \(\mbox{\emph{Inf\/}}(\pi) \cap B_i \neq \emptyset\) and
\(\mbox{\emph{Inf\/}}(\pi) \cap G_i \neq \emptyset\)\@. Let \(f(\pi)
\subseteq \{1,\dots,m\}\) be such that \(\mbox{\emph{Inf\/}}(\pi) \cap
B_i \neq \emptyset\) if and only if \(i \in f(\pi)\)\@. It is easy to see that
\(\pi\) is fair in \(S\) if and only if \(\pi\) is fair in \(S_{f(\pi)}\)\@.
The lemma now follows from the fact that union is easy for transition
systems (e.g., by defining the initial set as union of the initial
sets of the underlying systems)\@.
{\nopagebreakbeginpar\markendlst{Proof of Lemma 3}}
\end{alabsubtext}
%
\begin{alabsubtext}{Theorem}{5}{}
\label{cont comp}
The fair-containment problem \({\cal T}(S) \subseteq {\cal T}(S')\) for
\(S \in \bigcup\{{\cal U,W,S}\}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\) is
PSPACE-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{5}{}
\prooffontshape
\apar{Proof of Theorem 5-1}
As there are three possible types for the transition system \(S\) and
three possible types for the transition system \(S'\), we have nine
containment problems to solve to prove a PSPACE upper bound\@.
We solve them all using the same method:
\begin{enumerate}
\item
Translate the transition system \(S\) to an unconditionally fair
transition system \(S_U\)\@.
\item
Construct an unconditionally fair transition system \(\overline{S'_U}\)
that complements the transition system \(S'\)\@.
\item
Check \({\cal T}(S_U) \cap {\cal T}(\overline{S'_U})\) for
emptiness\@.
\end{enumerate}
%
\apar{Proof of Theorem 5-2}
This is how we perform step 1 for the three possible types of \(S\):
\begin{enumerate}
\item \({\cal U}(n) \goto {\cal U}(n)\)\@.
\item \({\cal W}(n,m) \goto {\cal U}(nm)\) [Lemma~1]\@.
\item \({\cal S}(n,m) \goto {\cal U}(n 2^{O(m)})\) [Lemma~3]\@.
\end{enumerate}
%
\apar{Proof of Theorem 5-3}
This is how we perform step 2 for the three possible types of \(S'\):
\begin{enumerate}
\item
\(\overline{{\cal U}(n)} \goto {\cal U}(2^{O(n \log n)})\) \cite{cj98-02-38}\@.
\item
\(\overline{{\cal W}(n,m)} \goto \overline{{\cal U}(nm)} \goto
{\cal U}(2^{O(nm \log (nm))})\) \cite{cj98-02-38}\@.
\item
\(\overline{{\cal S}(n,m)} \goto {\cal S}(2^{O(nm
\log(nm))},nm) \goto {\cal U}(2^{O(nm
\log(nm))})\) \cite{cj98-02-39}\@.
\end{enumerate}
%
\apar{Proof of Theorem 5-4}
For all three types of \(S\), going to \(S_U\) involves an at-most
exponential blowup\@. Similarly, for all three types of \(S'\),
going to \(\overline{S'_U}\) involves an at-most exponential blowup\@.
Thus, the size of the product of \(S_U\) and \(\overline{S'_U}\) is
exponential in the sizes of \(S\) and \(S'\) \cite{cj98-02-11}\@.
By \cite{cj98-02-45}, the nonemptiness problem for unconditionally fair
transition systems is in NLOGSPACE\@. Hence, as NLOGSPACE = co-NLOGSPACE,
checking the product of \(S_U\) and \(\overline{S'_U}\) for emptiness
can be done in space polynomial in their sizes\@.
%
\apar{Proof of Theorem 5-5}
Hardness in PSPACE follows from the PSPACE lower bound for trace
containment (Theorem~1)\@. Indeed, we can easily define a
fairness condition for which all the computations are fair (\(\alpha=W\)
for unconditional fairness and \(\alpha = \emptyset\) for weak and
strong fairness)\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 5}}
\end{alabsubtext}
\asubsubsection{4.1.1}{Implementation Complexity}
%
\apar{4.1.1-1}
Recall that our main concern is the complexity in terms of the
(much larger) implementation\@.
We now turn to consider the implementation complexity of fair
containment\@.
%
\begin{alabsubtext}{Theorem}{6}{}
\label{cont imp comp}
The implementation complexity of checking \({\cal T}(S) \subseteq {\cal
T}(S')\) for \(S \in \bigcup\{{\cal U,W}\}\) and
\(S' \in \bigcup\{{\cal U,W,S}\}\) is NLOGSPACE-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{6}{}
\prooffontshape
In the case where \(S \in \bigcup\{{\cal U,W}\}\), the translation of
\(S\) to \(S_U\) involves only a polynomial blowup\@. Thus, in this case,
fixing the size of \(S'\), the nondeterministic algorithm
described in the proof of Theorem~5, requires space
logarithmic in the size of \(S\)\@.
Hardness in NLOGSPACE follows from the NLOGSPACE lower bound for the
implementation complexity of containment (Theorem~2)\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 6}}
\end{alabsubtext}
%
\apar{4.1.1-2}
So, for the case where the implementation does not use the strong
fairness condition, our fair-containment algorithm requires space
that is only polylogarithmic in the size of the implementation\@.
Clearly, this is not the case when the implementation does use the
strong fairness condition\@. Then, our algorithm requires space that is
polynomial in the size of the implementation,
and time that is exponential in the size of the implementation\@.
We suggest an alternative algorithm that requires time that is only
polynomial in the size of the implementation\@. The price is larger
complexity in terms of the size of the specification\@.
We first need the following lemma\@.
%
\begin{alabsubtext}{Lemma}{4}{}
\label{sw intersection}
For \(S_1 \in {\cal S}(n_1,m)\) and \(S_2 \in {\cal U}(n_2)\), there exists
\(S \in {\cal S}(n_1 n_2,m+1)\) such that \({\cal T}(S)={\cal T}(S_1) \cap
{\cal T}(S_2)\)\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{4}{}
\prooffontshape
Given a strongly fair transition system
\(S_1=\zug{\Sigma,W_1,R_1,W_1^0,L_1,\alpha}\)
and an unconditionally fair transition system
\(S_2=\zug{\Sigma,W_2,R_2,W_2^0,L_2,\beta}\),
consider the strongly fair transition system
\(S=\zug{\Sigma,W,R,W_0,L,\gamma}\), where:
\begin{itemize}
\item
\(W=\{\zug{w_1,w_2}\): \(w_1 \in W_1, w_2 \in W_2\), and
\(L_1(w_1)=L_2(w_2)\}\),
\item
\(R(\zug{w_1,w_2}, \zug{w'_1,w'_2})\) if and only if \(R_1(w_1,w'_1)\) and
\(R_2(w_2,w'_2)\),
\item
\(W_0=(W_1^0 \times W_2^0) \cap W\),
\item
for every \(\zug{w_1,w_2} \in W\), we have \(L(\zug{w_1,w_2})= L_1(w_1)\), and
\item
\(\gamma \subseteq 2^W \times 2^W\) is such
that \(\zug{G,B} \in \gamma\) if and only if either there exists \(\zug{G',B'} \in
\alpha\) for which \(G=(G' \times W_2) \cap W\) and \(B=(B' \times W_2)
\cap W\), or \(G=W\) and \(B=(W_1 \times \beta) \cap W\)\@.
\end{itemize}
It is easy to see that \(S\) accepts an input trace if and only if both \(S_1\) and
\(S_2\) accept it, and that its size is as required\@.
{\nopagebreakbeginpar\markendlst{Proof of Lemma 4}}
\end{alabsubtext}
%
\begin{alabsubtext}{Theorem}{7}{}
\label{Streett imp ub}
The implementation complexity of checking \({\cal T}(S) \subseteq {\cal
T}(S')\) for \(S \in {\cal S}\)
and \(S' \in \bigcup\{{\cal U,W,S}\}\) is in PTIME\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{7}{}
\prooffontshape
Given \(S\) and \(S'\), we construct, as in the proof of
Theorem~5, the unconditionally fair transition system
\(\overline{S'_U}\)\@. Unlike the algorithm there, we do not translate the
transition system \(S\) to an unconditionally fair system\@. Rather, we
check the nonemptiness of \({\cal T}(S) \cap {\cal
T}(\overline{S'_U})\)\@. The nonemptiness problem for strongly fair
transition systems can be solved in polynomial time \cite{cj98-02-12}\@.
Hence, by Lemma~4, we can check the nonemptiness
of the intersection in time polynomial in the size of \(S\)\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 7}}
\end{alabsubtext}
%
\apar{4.1.1-3}
Note that the algorithm presented in the proof of
Theorem~7
uses time and space exponential in the size of the specification, in
contrast to
the algorithm in the proof of Theorem~5 that uses
space polynomial in the size of the specification\@.
Nevertheless, as \(S'\) is usually much smaller than \(S\),
the algorithm in the proof of Theorem~7
may work better in practice\@.
Can we do better and get the NLOGSPACE complexity
we have for implementations that use the unconditional or weak fairness
conditions?
As we now show, the answer to this question is most likely negative\@.
To see this, we first need the following theorem\@.
%
\begin{alabsubtext}{Theorem}{8}{}
\label{Streett nonemptiness lb}
The nonemptiness problem for strongly fair transition systems
is PTIME-hard\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{8}{}
\prooffontshape
\apar{Proof of Theorem 8-1}
We do a reduction from propositional anti-Horn satisfiability (PAHS)\@.
Propositional anti-Horn clauses are obtained from propositional Horn
clauses by replacing each proposition \(p\) with \(\neg p\)\@.
Thus, a propositional anti-Horn clause is either of the form
\(p \rightarrow q_1 \vee \dots \vee q_n\)
(an empty disjunction is
equivalent to false) or of the form \(q_1 \vee \dots \vee q_n\)\@.
As propositional Horn satisfiability is PTIME-complete \cite{cj98-02-36},
then clearly, so is PAHS\@.
%
\apar{Proof of Theorem 8-2}
Given an instance \(I\) of PAHS, we construct the transition system:
\[S_I=\zug{\{a\},W ,W \times W,W,L,\alpha}\]
where:
\begin{itemize}
\item
\(W=Q \times \{s\}\), where \(Q\) is
the set of all the propositions in \(I\), and \(s \not \in Q\),
\item
\(L\) maps all states to \(a\), and
\item
\(\alpha\) is the strongly fair condition defined as follows:
\begin{itemize}
\item
for a clause \(p \rightarrow q_1 \vee \dots \vee q_n\) in \(I\), we have
\(\zug{\{p\}, \{q_1,\dots,q_n\}}\) in \(\alpha\), and
\item
for a clause \(q_1 \vee \dots \vee q_n\) in \(I\), we have
\(\zug{W, \{q_1, \dots, q_n\}}\) in \(\alpha\)\@.
\end{itemize}
\end{itemize}
%
\apar{Proof of Theorem 8-3}
Each computation of \(S_I\) induces an assignment to the
propositions in \(I\)\@. A proposition is assigned \emph{true} if and only if the
computation visits it infinitely often\@. In addition, for each
assignment to the propositions in \(I\), there exists a computation
of \(S_I\) that induces it (the state \(s\) guarantees that the above
also holds for the assignment in which all propositions are assigned
\emph{false})\@.
The definition of \(\alpha\) thus guarantees that \(I\) is
satisfiable if and only if \(S_I\) is nonempty\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 8}}
\end{alabsubtext}
%
\apar{4.1.1-4}
We note that the nonemptiness problem for strongly fair transition systems
is already PTIME-hard for systems with pairs of a constant size\@. In
3-PAHS, all the clauses are of the form \(p \rightarrow q_1 \vee q_2\),
except possibly one clause, which has the form \(p\)\@. It is easy to see
that by introducing polynomially many new propositions, every instance
\(I\) of PAHS can be reduced to an instance \(I'\) of
3-PAHS\@. Given such \(I\), we can construct a strongly fair transition
system in which every pair has at most three states in its two
sets\@. The construction is the same as the one suggested in the proof,
only that we handle the clause \(p\) by \(|W|-1\) pairs of the form
\(\zug{\{q\},\{p\}}\), one for each \(q \in W \setminus \{p\}\)\@.
%
\apar{4.1.1-5}
Therefore, unlike unconditionally or weakly fair transition systems, for
which the
nonemptiness problem is NLOGSPACE-complete, testing strongly fair
transition systems for nonemptiness
is PTIME-complete\@. Theorems~7 and~8 imply the following theorem\@.
%
\begin{alabsubtext}{Theorem}{9}{}
\label{Streett imp tb}
The implementation complexity of checking \({\cal T}(S) \subseteq {\cal
T}(S')\) for \(S \in {\cal S}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\) is
PTIME-complete\@.
\end{alabsubtext}
\asubsection{4.2}{The Fair-Simulation Problem}
\asubsubsection{4.2.1}{The Upper Bound}
%
\begin{alabsubtext}{Theorem}{10}{}
\label{fs ub}
The fair-simulation problem \(S \leq S'\) for
\(S \in \bigcup\{{\cal U,W,S}\}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\)
is in PSPACE\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{10}{}
\prooffontshape
\apar{Proof of Theorem 10-1}
Given \(S=\zug{\Sigma,W,R,W_0,L,\alpha}\) and
\(S'=\zug{\Sigma,W',R',W'_0,L',\alpha'}\),
we show how to check in polynomial space that a candidate relation \(H\)
is a simulation from \(S\) to \(S'\)\@.
The claim then follows, since we can enumerate
all candidate relations using polynomial space\@.
First, we check (easily) that for every \(w \in W_0\) there exists \(w'
\in W'_0\) such that \(H(w,w')\)\@. We then check (also easily) that for
all \(\zug{w,w'} \in H\), we have \(L(w) = L(w')\)\@.
It is left to check that for all \(\zug{w,w'} \in H\), the pair
\(\zug{w,w'}\) is good in \(H\)\@. To do this, we define, for every
\(\zug{w,w'} \in H\), two transition systems\@. The alphabet of both
systems is \(W\)\@. The first transition system, \(A_w\), accepts all the fair
\(w\)-computations in \(S\)\@. The second transition system, \(U_{w'}\),
accepts all the sequences \(\pi\) in \(W^\omega\) for which there exists a
fair \(w'\)-computation \(\pi'\) in \(S'\) such that \(H(\pi,\pi')\)\@. Clearly,
the pair \(\zug{w,w'}\) is good in \(H\) if and only if \({\cal T}(A_w) \subseteq
{\cal T}(U_{w'})\)\@.
%
\apar{Proof of Theorem 10-2}
We define \(A_w\) and \(U_{w'}\) as follows\@. The transition system \(A_w\) does
nothing but trace the \(w\)-computations of \(S\), accepting those that
satisfy \(S\)'s acceptance condition\@.
Formally, \(A_w=\zug{W,W,R,\{w\},L'',\alpha}\), where for all \(u \in W\),
we have \(L''(u)=u\)\@.
%
\apar{Proof of Theorem 10-3}
The transition system \(U_{w'}\) has members of \(H\) as its set of
states\@. Thus, each state has two elements\@. The second element of each
state in \(U_{w'}\)
is a state in \(W'\), and according to \(R'\), it induces the
transitions\@. The first element in each state of \(U_{w'}\)
is a state in \(W\), and it
induces the labeling\@. This combination guarantees that a computation
\(\pi'' \in H^\omega\), whose \(W'\) elements form the
computation \(\pi' \in (W')^{\omega}\) and whose states are labeled with
\(\pi \in W^\omega\), satisfies \(H(\pi,\pi')\)\@. Formally,
\(U_{w'}=\zug{W,H,R'',W''_0,L''',\alpha''}\), where:
\begin{itemize}
\item
\(R''\) is adjusted to the new state space; i.e.,
\(R''(\zug{t,t'},\zug{q,q'})\) if and only if \(R'(t',q')\),
\item
\(W''_0=(W \times \{w'\}) \cap H\),
\item
for every
\(\zug{t,t'} \in H\), we have \(L'''(\zug{t,t'})=t\), and
\item
\(\alpha''\) is also adjusted to the new state space; i.e.,
each set \(B \subseteq W'\) in \(\alpha'\) is replaced by the set
\((W \times B) \cap H\) in \(\alpha''\)\@.
\end{itemize}
Note that \(R''\) is not necessarily total\@. For that, we restrict
\(U_{w'}\) to states that have at least one \(R''\) successor\@. Clearly,
this does not affect the traces of \(U_{w'}\)\@.
%
\apar{Proof of Theorem 10-4}
According to Theorem~5, checking that
\({\cal T}(A_w) \subseteq {\cal T}(U_{w'})\) can be done in space
polynomial in the sizes of \(A_w\) and \(U_{w'}\), thus polynomial in
the sizes of \(S\) and \(S'\)\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 10}}
\end{alabsubtext}
%
\apar{Proof of Theorem 10-5}
We note that our algorithm can be easily adjusted to check \(S\) and
\(S'\) for fair bisimulation\@.
\asubsubsection{4.2.2}{The Lower Bound}
%
\apar{4.2.2-1}
For a transition system \(S=\zug{\Sigma,W,R,W_0,L,\alpha}\), we say that
\(S\) is \emph{universal\/} if and only if \({\cal T}(S)=\Sigma^\omega\)\@. The {\em
universality problem} is to determine whether a given transition
system is universal\@. As we have already mentioned in the proof of
Theorem~1, Mayer and Stockmayer prove a PSPACE lower bound for
the problem of determining whether a finite-acceptance transition
system \(S\) is universal \cite{cj98-02-35}\@. Our PSPACE lower bound for the
fair-simulation problem follows the lines of their proofs, and we
first give its details, which are easily adjusted to infinite traces\@.
%
\begin{alabsubtext}{Theorem}{11}{}
The universality problem, \(L(S)=\Sigma^\omega\) for \(S \in
\bigcup\{{\cal U,W,S}\}\), is PSPACE-hard\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{11}{}
\prooffontshape
\apar{Proof of Theorem 11-1}
We do a reduction from polynomial-space Turing machines\@.
Given a Turing machine \(T\) of space complexity \(s(n)\), we construct a
transition system \(S_T\) of size linear in \(T\), and \(s(n)\) such that
\(S_T\) is universal if and only if \(T\) does not accept the empty tape\@.
We assume, without loss of generality,
that once \(T\) reaches a final state, it loops there forever\@.
The system \(S_T\) accepts a trace \(w\) if and only if \(w\) is not an
encoding of a legal computation of \(T\) over the empty tape, or if \(w\)
is an encoding of a legal yet rejecting computation of \(T\) over the
empty tape\@. Thus, \(S_T\) rejects a trace \(w\) if and only if it encodes a legal and
accepting computation of \(T\) over the empty tape\@. Hence \(S_T\) is
universal if and only if \(T\) does not accept the empty tape\@.
%
\apar{Proof of Theorem 11-2}
We now give the details of the construction of \(S_T\)\@.
Let \(T=\zug{\Gamma,Q,\rightarrow,q_0,F}\), where \(\Gamma\) is the
alphabet, \(Q\) is the set of states, \(\rightarrow \subseteq Q \times
\Gamma \times Q \times \Gamma \times \{L,R\}\) is the transition
relation (we use \((q,a) \rightarrow (q',b,\Delta)\) to indicate that
when \(T\) is in state \(q\) and it reads the input \(a\) in the current
tape cell, it moves to state \(q'\), writes \(b\) in the current tape
cell, and its reading head moves one cell to the left/right, according
to \(\Delta\)), \(q_0\) is the initial state, and \(F \subseteq Q\) is the
set of accepting states\@.
We encode a configuration of \(T\) by a string
\(\# \gamma_1 \gamma_2 \dots (q,\gamma_i) \dots \gamma_{s(n)}\)\@.
That is, a configuration starts with \(\#\), and all its other letters
are in \(\Gamma\), except for one letter in \(Q \times \Gamma\)\@.
The meaning of such a configuration is that the \(j^\mathrm{th}\) cell in \(T\), for
\(1 \leq j \leq s(n)\), is labeled \(\gamma_j\), the reading head points
at cell \(i\), and \(T\) is in state \(q\)\@. For example, the initial
configuration of \(T\) is
\(\# (q_0,b) b \dots b\) (with \(s(n)-1\) occurrences of \(b\)s) where \(b\)
stands for an empty cell\@.
We can then encode a computation of \(T\) by a sequence of configurations\@.
%
\apar{Proof of Theorem 11-3}
Let \(\Sigma=\{\#\} \cup \Gamma \cup (Q \times \Gamma)\) and let
\(\# \sigma_1 \dots \sigma_{s(n)} \# \sigma'_1 \dots \sigma'_{s(n)}\)
be two successive configurations of \(T\)\@. We also set
\(\sigma_0\), \(\sigma'_0\), and \(\sigma_{s(n)+1}\) to \(\#\)\@.
For each triple \(\zug{\sigma_{i-1},\sigma_i,\sigma_{i+1}}\) with \(1
\leq i \leq s(n)\), we know by the transition relation of \(T\) what
\(\sigma'_i\) should be\@. In addition, the letter \(\#\) should repeat
exactly every \(s(n)+1\) letters\@.
Let \(next(\zug{\sigma_{i-1},\sigma_i,\sigma_{i+1}})\) denote our
expectation for \(\sigma'_i\)\@. That is:
\begin{itemize}
\item
\(\mathit{Next}(\zug{\gamma_{i-1},\gamma_i,\gamma_{i+1}})=
\mathit{next}(\zug{\#,\gamma_i,\gamma_{i+1}})=
\mathit{next}(\zug{\gamma_{i-1},\gamma_i,\#})=
\gamma_i\)\@.
\item
\(\mathit{Next}(\zug{(q,\gamma_{i-1}),\gamma_i,\gamma_{i+1}})=
\mathit{next}(\zug{(q,\gamma_{i-1}),\gamma_i,\#})=\)
\begin{center}
\(\left\{
\begin{array}{ll}
\gamma_i & \mbox{if \((q,\gamma_{i-1}) \rightarrow
(q',\gamma'_{i-1},L)\)}\\
(q',\gamma_i) & \mbox{if \((q,\gamma_{i-1}) \rightarrow
(q',\gamma'_{i-1},R)\)}
\end{array}
\right.\)
\end{center}
\item
\(\mathit{Next}(\zug{\gamma_{i-1},(q,\gamma_{i}),\gamma_{i+1}})=
\mathit{next}(\zug{\#,(q,\gamma_{i}),\gamma_{i+1}})=\)\\
\(\mathit{next}(\zug{\gamma_{i-1},(q,\gamma_{i}),\#})=
\gamma'_i\) where \((q,\gamma_{i}) \rightarrow (q',\gamma'_{i},\Delta)\)\@.\footnote{
We assume that the reading head of \(T\) does not ``fall'' from the
right or the left boundaries of the tape\@. Thus, the case where
\((i=1)\) and \((q,\gamma_{i}) \rightarrow (q',\gamma'_{i},L)\) and the
dual case where \((i=2^n)\) and \((q,\gamma_{i}) \rightarrow
(q',\gamma'_{i},R)\) are not possible\@.}
\item
\(\mathit{Next}(\zug{\gamma_{i-1},\gamma_i,(q,\gamma_{i+1})})=
\mathit{next}(\zug{\#,\gamma_i,(q,\gamma_{i+1})})=\)
\begin{center}
\(\left\{
\begin{array}{ll}
\gamma_i & \mbox{if \((q,\gamma_{i+1}) \rightarrow
(q',\gamma'_{i+1},R)\)}\\
(q',\gamma_i) & \mbox{if \((q,\gamma_{i+1}) \rightarrow
(q',\gamma'_{i},L)\)}
\end{array}
\right.\)
\end{center}
\item
\(\mathit{Next}(\zug{\sigma_{s(n)},\#,\sigma'_1})=\#\)\@.
\end{itemize}
%
Consistency with \(\mathit{next}\) gives us a necessary condition for a trace
to encode a legal computation\@. In addition, the computation should
start with the initial configuration\@.
%
\apar{Proof of Theorem 11-4}
To check consistency with \(next\), \(S_T\) can use its
nondeterminism and guess when there is a violation of \(next\)\@. Thus,
\(S_T\) guesses \(\zug{\sigma_{i-1},\sigma_i,\sigma_{i+1}} \in \Sigma^3\),
guesses a position in the trace, checks whether the three letters
to be read starting this position are \(\sigma_{i-1},\sigma_i\), and
\(\sigma_{i+1}\), and checks whether
\(next(\zug{\sigma_{i-1},\sigma_i,\sigma_{i+1}})\) is not the
letter to come \(s(n)+1\) letters later\@.
Once \(S_T\) sees such a violation, it goes to an accepting sink\@.
To check that the first configuration is not the initial
configuration, \(S_T\) simply compares the first \(s(n)+1\) letters with
\(\# (q_0,b) b \dots b\)\@.
Finally, checking whether a legal computation is rejecting is also
easy; the final configuration has to be rejecting (one with \(q \not
\in F\))\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 11}}
\end{alabsubtext}
%
\apar{4.2.2-2}
We would like to do a similar reduction to prove that the
fair-simulation problem is PSPACE-hard\@.
For every alphabet \(\Sigma\), let \(S_\Sigma\) be the transition system
\(\zug{\Sigma,\Sigma,\Sigma \times \Sigma,\Sigma,L_\Sigma,\alpha}\),
where \(L_\Sigma(\sigma)=\sigma\) and \(\alpha\) is such that all the
computations of \(S_\Sigma\) are fair\@. That is, \(S_\Sigma\) is a
universal transition system in which each state is associated with a letter
\(\sigma \in \Sigma\) and \({\cal T}(S_\Sigma^\sigma)=\sigma \cdot
\Sigma^\omega\)\@. For example, \(S_{\{a,b\}}\) is the transition system \(S\) in
Figure~1\@.
It is easy to see that a transition system \(S\) over \(\Sigma\) is universal if and only if
\({\cal T}(S_\Sigma) \subseteq {\cal T}(S)\)\@. It is not true, however,
that \(S\) is universal if and only if \(S_\Sigma \leq S\)\@.
For example, the transition system \(S'\) in Figure~1 is universal,
yet \(S_{\{a,b\}} \not \leq S'\)\@.
Our reduction overcomes this difficulty by defining \(S_T\) in such a
way that if \(S_T\) is universal, then for each of its states \(w\), we
have \({\cal T}(S_T^w)=L(w) \cdot \Sigma^\omega\)\@.
For such \(S_T\), we do have that \(S_T\) is universal if and only if
\(S_\Sigma \leq S_T\)\@. Indeed, a relation that maps a state \(\sigma\) of
\(S_\Sigma\) to all the states of \(S_T\) that are labeled with \(\sigma\)
is a fair simulation\@.
%
\begin{alabsubtext}{Theorem}{12}{}
\label{fs lb}
The fair-simulation problem \(S \leq S'\) for
\(S \in \bigcup\{{\cal U,W,S}\}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\)
is PSPACE-hard\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{12}{}
\prooffontshape
\apar{Proof of Theorem 12-1}
We prove hardness for the case where \(S'\) is a strongly fair transition
system with three pairs\@. The other cases then follow from the linear
translation of \(S'\) to any \(S' \in \bigcup\{{\cal U,W,S}\}\)\@.
As in the previous proof, we do a reduction from polynomial space
Turing machines\@. Given the Turing machine \(T\), let \(T'\) be as follows\@.
Whenever \(T\) reaches an accepting configuration, \(T'\) ``cleans'' the tape
and starts from the beginning
(i.e., empty tape and initial state at the left end of the tape)\@.
Thus, \(T\) accepts the empty tape if and only if \(T'\) has an infinite computation,
in which case it visits the initial configuration infinitely often\@.
%
\apar{Proof of Theorem 12-2}
We now define a strongly fair transition system \(S_T\) as the union of two
strongly fair transition systems, \(S_T^1\) and \(S_T^2\), with the
following behaviors\@.
Reading a trace \(\rho\), the fair transition system \(S_T^1\) checks for a
violation of the transition relation of \(T'\) in \(\rho\) (by
guessing a violation of \(next\))\@. If \(S_T^1\) sees a violation, it goes to an
accepting sink\@. Therefore, the acceptance condition of \(S_T^1\) is a single
pair \(\zug{W_1,G}\), where \(W_1\) is the set of states in \(S_T^1\)
and \(G\) is a clique of \(|\Sigma|\) states, each labeled with a different
letter, which \(S_T^1\) enters once it sees a violation of \(\mathit{next}\)\@.
Reading a trace \(\rho\), the fair transition system
\(S_T^2\) checks for the occurrence of the initial configuration in
\(\rho\)\@. Because the initial configuration starts with \(\#\) and has no other
\(\#\) in it, it is easy to check its occurrence\@.
If \(S_T^2\) sees the initial configuration, it goes to a rejecting sink\@.
The acceptance condition of \(S_T^2\) is therefore a single pair
\(\zug{B,\emptyset}\), where \(B\) is a clique of \(|\Sigma|\) states, each
labeled with a different letter, which \(S_T^2\) enters once it sees an
occurrence of the initial configuration\@.
Assuming the state spaces of \(S_T^1\) and \(S_T^2\) are disjoint, the
fair transition system \(S_T\) simply has one copy of \(S_T^1\), and one copy
of \(S_T^2\); its initial set is the union of the initial sets of
\(S_T^1\) and \(S_T^2\); and its fairness condition has the two pairs
\(\zug{W_1,G}\) and \(\zug{B,\emptyset}\)\@.
%
\apar{Proof of Theorem 12-3}
It follows that the fair transition system \(S_T\) accepts a trace \(\rho\)
if \(\rho\) violates \(next\) or never visits the initial configuration\@.
Thus, \(S_T\) does not accept a trace \(\rho\) if and only if \(\rho\) does not
violate \(next\) and it visits the initial configuration of \(T\)\@.
Therefore, \(S_T\) is universal if and only if \(T\) does not accept the empty tape\@.
Indeed, in both cases there exists no accepting computation of \(T\) on
the empty tape\@.
%
\apar{Proof of Theorem 12-4}
We want, however, more than a universality test\@. We want to define
\(S_T\) in such a way that if
it is indeed universal, then for each of its states \(w\), we have
\({\cal T}(S_T^w)=L(w) \cdot \Sigma^\omega\)\@.
Let \(S_T=\zug{\Sigma,W,R,W_0,L,\alpha}\)\@.
We assume that \(R \cap (W \times W_0)=\emptyset\)\@. Thus, no computation
of \(S_T\) visits states from \(W_0\) more than once (this can be easily
achieved by duplicating states in \(W_0\) that are visited more than
once)\@.
We define the transition
system \(S'_T\) by adding to \(S_T\) the transitions from all states to all of
the initial states, with the requirement that these transitions can be
used only finitely often\@. Accordingly,
\(S'_T=\zug{\Sigma,W,R \cup (W \times W_0),W_0,L,\alpha \cup
\zug{W_0,\emptyset}}\)\@.
We claim the following:
\begin{alabsubtext}{Claim}{1}{}
\(S'_T\) is universal if and only if for each \(\sigma \in \Sigma\) we have \(w_0 \in
W_0\) with \(L(w_0)=\sigma\), and for each \(w \in W\), we have
\({\cal T}(S_T^w)=L(w) \cdot \Sigma^\omega\)\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Claim}{2}{}
\(S_T\) is universal if and only if \(S'_T\) is universal\@.
\end{alabsubtext}
%
\apar{Proof of Theorem 12-5}
Claim~1 is immediate, and we prove here Claim~2\@.
Clearly, every computation \(\pi\) of \(S_T\) is a computation in \(S'_T\)\@.
Since no computation in \(S_T\) visits \(W_0\) more than once, adding the
pair \(\zug{W_0,\emptyset}\) to the acceptance condition \(\alpha\), we
still have that if \(\pi\) is fair in \(S_T\), then it is also fair in
\(S'_T\)\@. Hence \({\cal T}(S_T) \subseteq {\cal T}(S'_T)\); thus, if \(S_T\)
is universal, so is \(S'_T\)\@.
Assume now that \(S_T\) is not universal\@.
Consider a trace \(\rho\) not accepted by \(S_T\)\@. Recall that \(\rho\) does not
violate \(\mathit{next}\), and it visits the initial configuration of \(T\)\@.
In other words, \(\rho\) is of the form \(yx\) where \(y\) is a prefix not
violating \(next\) and \(x\) is an infinite computation of \(T'\) (the
initial configuration of \(T\) is the first configuration in \(x\))\@. An
infinite computation of \(T'\) visits the initial configuration
infinitely often\@. Therefore, all the suffixes of \(\rho\) are of that
special form! Hence, if \(\rho\) is not accepted by \(S_T\), all its suffixes
are also not accepted by \(S_T\)\@. We show that this implies that \(\rho\) is
not accepted by \(S'_T\) too\@. Assume, by way of contradiction, that \(\rho\)
is accepted by \(S'_T\)\@. Let \(\pi=w_0,w_1,\dots\ \) by a fair computation
in \(S'_T\), with \(L(\pi)=\rho\)\@. By the acceptance condition of \(S'_T\), there
exists \(i \geq 0\) such that \(w_i \in W_0\), and for all \(j > i\), we have
\(W_j \not \in W_0\)\@. Hence, for all \(j \geq i\), we have
\(R(w_j,w_{j+1})\)\@. Therefore, the computation
\(\pi^i=w_i,w_{i+1},\dots\) is a fair computation in \(S_T\), and the
trace \(L(\pi^i)\) is accepted by \(S_T\), contradicting the fact it is a
suffix of a trace not accepted by \(S_T\)\@.
%
\apar{Proof of Theorem 12-6}
As discussed above, Claims~1 and~2 now imply that \(S_\Sigma
\leq S'_T\) if and only if \(S_T\) is universal; thus \(S_\Sigma \leq S'_T\) if and only if \(T\)
does not accept the empty tape\@. Because the fairness condition of
\(S_\Sigma\) can be specified in terms of either unconditional, weak, or
strong fairness, we are done\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 12}}
\end{alabsubtext}
%
\apar{4.2.2-3}
Theorems~10 and~12 together imply the following\@.
%
\begin{alabsubtext}{Theorem}{13}{}
\label{fs c}
The fair-simulation problem \(S \leq S'\) for
\(S \in \bigcup\{{\cal U,W,S}\}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\)
is PSPACE-complete\@.
\end{alabsubtext}
\asubsubsection{4.2.3}{Implementation Complexity}
%
\apar{4.2.3-1}
As our discussions thus far show, fair simulation and fair containment have the same complexity\@.
In Theorem~14, we
show that when we consider the implementation complexity of fair
simulation, the picture is different\@. Here, checking implementations
that use the unconditional or weak fairness conditions is not easier
than checking implementations that use the strong fairness condition;
hence fair simulation is most likely harder than fair containment, and
the trace-based approach is more efficient\@.
%
\begin{alabsubtext}{Theorem}{14}{}
\label{imp fs tb}
The implementation complexity of checking \(S \leq S'\) for \(S \in
\bigcup\{{\cal U,W,S}\}\) and \(S' \in \bigcup\{{\cal U,W,S}\}\) is
PTIME-complete\@.
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{14}{}
\prooffontshape
\apar{Proof of Theorem 14-1}
We start with the upper bound\@. Consider the algorithm presented in the
proof of Theorem~10\@. It checks whether a candidate relation
\(H\) is a simulation\@. Once we fix \(S'\),
then, by Theorems~6 and~9,
the complexity of checking each pair in the candidate relation is
NLOGSPACE for \(S \in \bigcup\{{\cal U,W}\}\), and is PTIME for \(S \in
{\cal S}\)\@.
Once we fix \(S'\), the number of pairs in each candidate relation is
linear in the size of \(S\)\@. Thus, fixing \(S'\), the problem of
checking a candidate relation \(H\) is in PTIME\@.
Instead of guessing a relation \(H\) and checking
it, we do a fixed-point computation as follows \cite{cj98-02-33}\@.
Let
\[H_0=\{\zug{w,w'}: w \in W, w' \in W', \mbox{ and } L(w) =L(w')\}\]
Thus, \(H_0\) is the maximal relation that satisfies condition 1 of
fair simulation\@.
Consider the monotonic function \(f: 2^{W \times W'} \rightarrow 2^{W
\times W'}\), where
\[f(H)=H \cap \{\zug{w,w'}: \zug{w,w'} \mbox{ is good in }H\}\]
Thus, \(f(H)\) contains all the pairs in \(H\) that are good with respect
to the relation \(H\)\@.
Let \(H^\star\) be the greatest fixed point of \(f\)
when restricted to pairs in \(H_0\)\@.
That is, \(H^\star= \nu z. H_0 \cap f(z)\)\@.
%
\apar{Proof of Theorem 14-2}
We now prove that \(S \leq S'\) if and only if for every \(w \in W_0\), we have
\((\{w\} \times W'_0)\ \cap\ H^\star \neq \emptyset\)\@.
First, as \(H^\star\) is a fair-simulation relation, the direction from
right to left is immediate from the definition of fair simulation\@.
Assume that \(S \leq S'\)\@. Then, there exists a fair-simulation
relation \(H'\) such that for every \(w \in W_0\), we have
\((\{w\} \times W'_0) \cap H' \neq \emptyset\)\@.
Let \(H_i=f^i(H_0)\)\@. We show that for every \(i \geq 0\) we have \(H'
\subseteq H_i\)\@. Thus, in particular, \(H' \subseteq H^\star\), and
we are
done\@. The proof proceeds by induction on \(i\)\@.
First, since \(H'\) satisfies condition 2 of fair-simulation
relations, then clearly \(H' \subseteq H_0\)\@.
Assume that \(H' \subseteq H_i\), and assume by way of
contradiction that \(H' \not \subseteq H_{i+1}\)\@. Then there exists
\(\zug{w,w'} \in H' \setminus H_{i+1}\)\@. Since \(H' \subseteq H_i\),
it follows that the pair \(\zug{w,w'}\) is not good in \(H_i\), which
implies, again by the containment of \(H'\) in \(H_i\), that it is also
not good in \(H'\)\@. Then, however, \(H'\) does not satisfy condition 3
of fair simulation, and we reach a contradiction\@.
%
\apar{Proof of Theorem 14-3}
We now consider the complexity of calculating \(H^\star\)\@.
Since \(W \times W'\) is finite, we can calculate \(H\) iteratively,
starting with \(H_0\) until we reach a fixed point\@.
Because \(f\) is monotonic, we must iterate it at most polynomially
many times\@. Hence, out of the \(2^{|W \times W'|}\) candidate relations
for simulation, we actually check at most
\(|W \times W'|\) relations\@.
Recall that if \(S'\) is fixed,
the problem of checking a candidate relation
is in PTIME\@.
Also, if \(S'\) is fixed, we have only linearly many candidate
relations to check\@.
The problem is therefore in PTIME\@.
%
\apar{Proof of Theorem 14-4}
Hardness in PTIME follows from the lower bound in
Theorem~4\@.
{\nopagebreakbeginpar\markendlst{Proof of Theorem 14}}
\end{alabsubtext}
\asection{5}{Discussion}
%
\apar{5-1}
We have examined the trace-based and the tree-based approaches to
implementation from a complexity-theoretic point of view\@.
Our results show that when we model the specification and the
implementation by fair transition systems, the complexity of checking
the correctness of a trace-based implementation coincides with that of
checking the correctness of a tree-based implementation\@.
Furthermore, when we consider the implementation complexity,
then checking implementations that
use the unconditional or weak fairness condition is easier in the
trace-based approach\@.
Overall, it seems that the trace-based approach is advantageous\@.
%
\apar{5-2}
It is interesting to compare our results with the known complexities
of LTL and \actlsr model checking\@. Trace-based implementations
are part of the linear-time paradigm, and correspond to LTL model
checking\@. Tree-based implementations are part of the branching-time
paradigm, and correspond to \actlsr model checking\@.
All four problems are PSPACE-complete \cite{cj98-02-40,cj98-02-12}\@.
The model-checking algorithm of \actlsr uses as a subroutine
the model-checking algorithm of LTL \cite{cj98-02-12}\@. In a similar manner,
our fair-simulation algorithm uses as a subroutine the
fair-containment algorithm\@. Clearly, the implementation dichotomy and
the temporal-logic dichotomy have a lot in common\@.
When we turn to consider the program complexity of model checking,
which is the analog to our implementation complexity, this is no
longer true\@. The program complexity of model checking for both LTL and
\actlsr is NLOGSPACE-complete \cite{cj98-02-44,cj98-02-07}\@. In contrast, we see
here that implementation is easier in the trace-based approach\@.
%
\apar{5-3}
Our results are summarized in Table~1\@. All the
complexities in the table denote tight bounds\@.
%
\begin{table}[htb]
\small
\begin{center}
\begin{tabular}{l||l|l|l|l|}
& & Implementation & & Implementation \\
& & Complexity & & Complexity \\
& Fair & of Fair & Fair & of Fair \\
& Containment & Containment & Simulation & Simulation \\ \hline \hline
\(S\) and \(S'\) with & PSPACE & NLOGSPACE & PTIME & PTIME \\
No Fairness & [Theorem 1] & [Theorem 2] &
[Theorem 3] & [Theorem 4] \\ \hline
\(S \in \bigcup\{{\cal U,W}\}\) and & PSPACE & NLOGSPACE & PSPACE & PTIME \\
\(S' \in \bigcup\{{\cal U,W,S}\}\) & [Theorem 5] &
[Theorem 6] & [Theorem 13] & [Theorem 14]
\\ \hline
\(S \in {\cal S}\) and & PSPACE & PTIME & PSPACE & PTIME \\
\(S' \in \bigcup\{{\cal U,W,S}\}\) & [Theorem 5] &
[Theorem 9] & [Theorem 13] & [Theorem 14]
\\ \hline
\end{tabular}
\end{center}
\normalsize
\caption{Is \(S\) a correct implementation of \(S'\)?}
\label{table}
\end{table}
%
\end{articletext}
%
\bibliographystyle{alpha}
\bibliography{cj98-02}
%
\end{document}