\documentclass[11pt,twoside]{article} % Fontsize 11 point
\usepackage{latexsym,colordvi,makeidx} % load symbols like \Box
\bibliographystyle{plain} % actually the vG-style is used
\textwidth 6.5 in % paper 8.5x11in
\textheight 9 in % text 6.5x 9in
\oddsidemargin 0 pt % 1 inch margin comes for free
\evensidemargin 0 pt % 1 inch margin comes for free
\topmargin 0 pt % 4 margins of 1in each
\headheight 0 pt % default 12pt = 0.42cm
\headsep 0 pt % default 25pt = 0.88cm
%\makeindex
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% Pagestyle (headers and footers) %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\makeatletter
\def\ps@robheadings{%
\let\@oddfoot\@empty\let\@evenfoot\@empty
\def\@evenhead{\Black{\thepage\hfil\slshape\leftmark}}%
\def\@oddhead{\Black{{\slshape\rightmark}\hfil\thepage}}%
\let\@mkboth\markboth
\def\subsectionmark##1{}
\def\sectionmark##1{\markboth{\leftheader}{##1}}}
\def\@maketitle{%
\newpage
\null
%\vskip 2em% a bit of space removed (< 2em)
\begin{center}%
\let \footnote \thanks
{\LARGE\bf \@title \par}% \bf added
\vskip 2em% was: 1.5em
{\large
\lineskip .5em%
\begin{tabular}[t]{c}%
\@author
\end{tabular}}%
\vskip .3em% \date and extra space removed
\end{center}%
\par}
\renewenvironment{thebibliography}[1]
{\section*{\refname
\@mkboth{\leftheader}{\refname}}%
\list{\@biblabel{\@arabic\c@enumiv}}%
{\settowidth\labelwidth{\@biblabel{#1}}%
% \itemsep 3pt
\leftmargin\labelwidth
\advance\leftmargin\labelsep
\@openbib@code
\usecounter{enumiv}%
\let\p@enumiv\@empty
\renewcommand\theenumiv{\@arabic\c@enumiv}}%
\sloppy\clubpenalty4000\widowpenalty4000%
\sfcode`\.\@m}
{\def\@noitemerr
{\@latex@warning{Empty `thebibliography' environment}}%
\endlist}
\renewenvironment{theindex}
{\if@twocolumn
\@restonecolfalse
\else
\@restonecoltrue
\fi
\columnseprule \z@
\columnsep 35\p@
\twocolumn[\section*{\raisebox{5pt}[0pt][0pt]{\Large\bf \indexname}}]%
\@mkboth{\leftheader}{\indexname}%
\parindent\z@
\parskip\z@ \@plus .3\p@\relax
\let\item\@idxitem}
{\if@restonecol\onecolumn\else\clearpage\fi}
\makeatother
\pagestyle{robheadings}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% Hyperlinks %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\def\hname#1{\special{html:}\special{html:}}
\def\href#1#2{\special{html:}{#2}\special{html:}}
\newcommand{\hlabel}[1]{\hname{#1}\label{#1}}
\newcommand{\hindex}[1]{\index{#1}\hname{#1}}
\newcommand{\hhref}[1]{\href{#1}{\ref{#1}}}
\newcommand{\hcite}[1]{\href{#1}{\cite{#1}}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% Theorem-like environments %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newtheorem{defi}{Definition}[section]
\newtheorem{theo}{Theorem}
\newtheorem{prop}{Proposition}[section]
\newtheorem{lemm}{Lemma}[section]
\newtheorem{coro}{Corollary}[section]
\newenvironment{definition}[1]{\begin{defi} \rm \hlabel{df-#1} }
{\end{defi}}
\newenvironment{theorem}[1]{\begin{theo} \rm \hlabel{thm-#1} }
{\end{theo}}
\newenvironment{proposition}[1] {\begin{prop} \rm \hlabel{pr-#1} }
{\hfill$\Box$\end{prop}}
\newenvironment{propositioni}[1]{\begin{prop} \rm \hlabel{pr-#1} }
{\end{prop}}
\newenvironment{lemma}[1]{\begin{lemm} \rm \hlabel{lem-#1} }
{\hfill$\Box$\end{lemm}}
\newenvironment{corollary}[1]{\begin{coro} \rm \hlabel{cor-#1} }
{\hfill$\Box$\end{coro}}
\newenvironment{proof}{\begin{trivlist} \item[\hspace{\labelsep}\bf Proof:]}
{\hfill$\Box$\end{trivlist}}
\newcommand{\df}[1]{\href{df-#1}{Definition~\ref{df-#1}}}
\newcommand{\thm}[1]{\href{thm-#1}{Theorem~\ref{thm-#1}}}
\newcommand{\pr}[1]{\href{pr-#1}{Proposition~\ref{pr-#1}}}
\newcommand{\lem}[1]{\href{lem-#1}{Lemma~\ref{lem-#1}}}
\newcommand{\cor}[1]{\href{cor-#1}{Corollary~\ref{cor-#1}}}
\newcommand{\ctr}[1]{\href{#1}{Counterexample~\ref{#1}}}
\newcommand{\fig}[1]{\href{#1}{Figure~\ref{#1}}}
\newcommand{\tab}[1]{\href{#1}{Table~\ref{#1}}}
\newcommand{\sect}[1]{\href{#1}{Section~\ref{#1}}}
\newenvironment{itemise}{\begin{list}{$\bullet$}{\leftmargin 18pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 4pt \itemsep 2pt \parsep 2pt}}{\end{list}}
\renewenvironment{abstract}{\begin{list}{}
{\rightmargin\leftmargin
\listparindent 1.5em
\parsep 0pt plus 1pt}
\small\item}{\end{list}}
\makeatletter
\newcounter{counterexample}
\def\fps@counterexample{htb}
\def\ftype@counterexample{1}
\def\ext@counterexample{lof}
\def\fnum@counterexample{\counterexamplename~\thecounterexample}
\newenvironment{counterexample}
{\@float{counterexample}}
{\end@float}
\newenvironment{counterexample*}
{\@dblfloat{counterexample}}
{\end@dblfloat}
\newcommand\counterexamplename{Counterexample}
\makeatother
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% New Commands %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\phrase}[1]{\hindex{#1}{\em #1}} % new concept
\newcommand{\pf}{\noindent {\bf Proof:\ }} % beginning of proof
\newcommand{\dcup}{\stackrel{\mbox{\huge .}}{\cup}} % disjoint union
\newcommand{\concatenate}{;\!} % relation composition
\newcommand{\rest}{ % restriction operator
\begin{picture}(2.16,3.2)
\thinlines
\put(1.12,-0.48){\line(0,1){3.52}}
\put(0.8,1.6){\tiny $\backslash$}
\end{picture} }
\newcommand{\dl}[1]{\mbox{\rm I\hspace{-0.75mm}#1}} % openface letter
\newcommand{\dc}[1]{\mbox{\rm {\raisebox{.4ex}{\makebox % openface character
[0pt][l]{\hspace{.2em}\scriptsize $\mid$}}}#1}}
\newcommand{\IT}{\mbox{\sf T\hspace{-5.5pt}T}} % openface T (terms)
\newcommand{\plat}[1]{\raisebox{0pt}[0pt][0pt]{$#1$}} % no vertical space
\newcommand{\Id}[1]{[\hspace{-1.4pt}[#1]\hspace{-1.2pt}]} % denotation
\newcommand{\In}[1]{\plat{ % notation
\stackrel{\mbox{\tiny $/\hspace{-2.1pt}/$}}
{\raisebox{-.3ex}[.3ex]{\tiny $\backslash
\hspace{-2.1pt}\backslash$}}\!\!#1\!\!
\stackrel{\mbox{\tiny $\backslash\hspace{-1.8pt}\backslash$}}
{\raisebox{-.3ex}[.3ex]{\tiny $/\hspace{-1.8pt}/$}} }}
\newcommand{\rec}[1]{\plat{ % recursion
\stackrel{\mbox{\tiny $/$}}
{\raisebox{-.3ex}[.3ex]{\tiny $\backslash$}}
\!\!#1\!\!
\stackrel{\mbox{\tiny $\backslash$}}
{\raisebox{-.3ex}[.3ex]{\tiny $/$}} }}
\newcommand{\goto}[1]{\stackrel{#1}{\longrightarrow}} % transition
\newcommand{\gonotto}[1]{\hspace{4pt}\not\hspace{-4pt} % no transition
\stackrel{#1\ }{\longrightarrow}}
\newcommand{\gort}[1]{\stackrel{#1}{~\makebox[0pt][c] % ready trace
{$\times$}\makebox[0pt][c]{$+$}\!\!\longrightarrow}} % transition
\newcommand{\bis}[1]{ \; % bisimulation
\raisebox{.3ex}{$\underline{\makebox[.7em]{$\leftrightarrow$}}$}
\,_{#1}\,}
\newcommand{\nobis}[1]{\mbox{$\,\not\hspace{-2.5pt} % no bisimulation
\raisebox{.3ex}{$\underline{\makebox[.7em]{$\leftrightarrow$}}$}
\,_{#1}\,$}}
\newcommand{\si}{\stackrel{\rightarrow}{\raisebox{0pt}
[1pt][0pt]{$\scriptstyle \leftarrow$}}} % simulation
\newcommand{\sii}{\stackrel{\subset~}{\raisebox{0pt}[1pt][0pt]
{$\scriptstyle \rightarrow$}}} % simulation inclusion
\newcommand{\nd}{\mbox{\sc nodes}}
\newcommand{\ed}{\mbox{\sc edges}}
\newcommand{\rt}{\mbox{\sc root}}
\newcommand{\pd}{\mbox{\sc paths}}
\renewcommand{\phi}{\varphi}
\renewcommand{\epsilon}{\varepsilon}
\newcommand{\weg}[1]{} % remove
%\newfont{\fsc}{eusm10} % frenchscript letters
%\newcommand{\pow}{\mbox{\fsc P}} % french P (powerset)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%% Abbreviations %%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newcommand{\IN}{\dl{N}} % natural numbers
\newcommand{\IG}{\dc{G}} % graphs
\newcommand{\IP}{\dl{P}} % transition space
\newcommand{\IO}{\dc{O}} % observations
\newcommand{\pow}{{\cal P}} % powerset
\newcommand{\fO}{{\cal O}} % a semantics
\newcommand{\fN}{{\cal N}} % a semantics
\newcommand{\fL}{{\cal L}} % modal language
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\def\leftheader{\hfill The linear time -- branching time spectrum I}
\title{The Linear Time - Branching Time Spectrum I%
\thanks{This is an extension of \hcite{vG90a-full}. The research
reported in this paper has been initiated at CWI in Amsterdam,
continued at the Technical University of Munich, and finalized at
Stanford University. It has been supported by Sonderforschungsbereich
342 of the TU M\"unchen and by ONR under grant number N00014-92-J-1974.
Part of it was carried out in the preparation of a course Comparative
Concurrency Semantics, given at the University of Amsterdam, spring 1988.
A coloured version of this paper is available at
\tt http://boole.stanford.edu/pub/spectrum1.ps.gz.}\\
\Large The Semantics of Concrete, Sequential Processes}
\author{R.J. van Glabbeek\\[-3pt]
\footnotesize Computer Science Department, Stanford University\\[-3pt]
\footnotesize Stanford, CA 94305-9045, USA\\[-3pt]
\footnotesize \tt rvg@cs.stanford.edu\\[-3pt]
\footnotesize \tt http://theory.stanford.edu/\~{}rvg}
\begin{document}
\maketitle
\thispagestyle{empty}
\begin{abstract}
In this paper various semantics in the linear time -- branching time
spectrum are presented in a uniform, model-independent way. Restricted
to the class of finitely branching, concrete, sequential processes,
only fifteen of them turn out to be different, and
most semantics found in the literature that can be defined uniformly
in terms of action relations coincide with one of these fifteen.
Several testing scenarios, motivating these semantics, are presented,
phrased in terms of `button pushing experiments' on generative and
reactive machines. Finally twelve of these semantics are applied to a
simple language for finite, concrete, sequential, nondeterministic
processes, and for each of them a complete axiomatization is provided.
\\[8pt]\footnotesize
{\bf Keywords}: Concurrency, Labelled transition systems,
Nondeterminism, Semantic equivalences, Linear time, Branching time,
Generative and reactive systems, Modal logic, Trace semantics, Failures
semantics, Failure trace, Ready trace, Simulation, Ready simulation,
Bisimulation, Complete axiomatizations. %, Deadlock, Termination.
\end{abstract}
\noindent
{\large\sc Table of contents}
\contentsline {subsection}{\href{Introduction}{Introduction}}{2}
\contentsline {subsection}{\numberline {1}\href{LTS}{Labelled transition systems and process graphs}}{6}
\contentsline {subsection}{\numberline {2}\href{trace}{Trace semantics}}{9}
\contentsline {subsection}{\numberline {3}\href{completed trace}{Completed trace semantics}}{11}
\contentsline {subsection}{\numberline {4}\href{failures}{Failures semantics}}{14}
\contentsline {subsection}{\numberline {5}\href{failure trace}{Failure trace semantics}}{18}
\contentsline {subsection}{\numberline {6}\href{ready trace}{Ready trace semantics}}{22}
\contentsline {subsection}{\numberline {7}\href{readiness}{Readiness semantics and possible-futures semantics}}{25}
\contentsline {subsection}{\numberline {8}\href{simulation}{Simulation semantics}}{29}
\contentsline {subsection}{\numberline {9}\href{ready simulation}{Ready simulation semantics}}{33}
\contentsline {subsection}{\numberline {10}\href{reactive}{Reactive versus generative testing scenarios}}{36}
\contentsline {subsection}{\numberline {11}\href{2-nested simulation}{2-nested simulation semantics}}{38}
\contentsline {subsection}{\numberline {12}\href{bisimulation}{Bisimulation semantics}}{39}
\contentsline {subsection}{\numberline {13}\href{tree}{Tree semantics}}{47}
\contentsline {subsection}{\numberline {14}\href{possible worlds}{Possible worlds semantics}}{48}
\contentsline {subsection}{\numberline {15}\href{summary}{Summary}}{50}
\contentsline {subsection}{\numberline {16}\href{determinism}{Deterministic and saturated processes}}{54}
\contentsline {subsection}{\numberline {17}\href{axiomatizations}{Complete axiomatizations}}{59}
\contentsline {subsection}{\numberline {18}\href{criteria}{Criteria for selecting a semantics for particular applications}}{72}
\contentsline {subsection}{\numberline {19}\href{termination}{Distinguishing deadlock and successful termination}}{77}
\contentsline {subsection}{\href{Concluding remarks}{Concluding remarks}}{80}
\advance\textheight -30 pt % make space for header
\section*{Introduction}\hname{Introduction}
\markboth{\leftheader}{Introduction}
\addcontentsline{toc}{section}{Introduction}
\paragraph{Process theory}\hindex{process theory}
A \phrase{process} is the behaviour of a system. The system can be a
machine, an elementary particle, a communication protocol, a network
of falling dominoes, a chess player, or any other system. {\em
Process theory} is the study of processes. Two main activities of
process theory are \phrase{modelling} and \phrase{verification}.
Modelling is the activity of representing processes, mostly by
mathematical structures or by expressions in a system description
language. Verification is the activity of proving statements about
processes, for instance that the actual behaviour of a system is equal
to its intended behaviour. Of course, this is only possible if a
criterion has been defined, determining whether or not two processes
are equal, i.e.\ two systems behave similarly. Such a criterion
constitutes the \phrase{semantics} of a process theory. (To be
precise, it constitutes the semantics of the equality concept employed
in a process theory.) Which aspects of the behaviour of a system are
of importance to a certain user depends on the environment in which
the system will be running, and on the interests of the particular
user. Therefore it is not a task of process theory to find the `true'
semantics of processes, but rather to determine which process
semantics is suitable for which applications.
\headheight 12 pt % default 12pt = 0.42cm
\headsep 18 pt % default 25pt = 0.88cm
\paragraph{Comparative concurrency semantics}
This paper aims at the classification of process se\-man\-tics.\footnote{%
This field of research is called \hindex{comparative concurrency semantics}%
{\em comparative concurrency\footnotemark semantics}, a terminology
first used by {\sc Meyer} in \hcite{Mey85}.}\footnotetext{Here
\phrase{concurrency} is taken to be synonymous with process theory,
although strictly speaking it is only the study of \phrase{parallel}
(as opposed to \phrase{sequential}) processes. These are the
behaviours of systems capable of performing different actions at the
same time. In this paper the term concurrency is considered to include
sequential process theory. This may be justified since much
work on sequential processes is intended to facilitate later studies
involving parallelism.} The set of possible process semantics can be
partially ordered by the relation `makes strictly more identifications
on processes than', thereby becoming a complete lattice\footnote{The
supremum of a set of process semantics is the semantics identifying
two processes whenever they are identified by every semantics in this
set.}. Now the classification of some useful process semantics can be
facilitated by drawing parts of this lattice and locating the
positions of some interesting process semantics, found in the
literature. Furthermore the ideas involved in the construction of
these semantics can be unravelled and combined in new compositions,
thereby creating an abundance of new process semantics. These
semantics will, by their intermediate positions in the semantic
lattice, shed light on the differences and similarities of the
established ones. Sometimes they also turn out to be interesting in
their own right. Finally the semantic lattice serves as a map on
which it can be indicated which semantics satisfy certain desirable
properties, and are suited for a particular class of applications.
Most semantic notions encountered in contemporary process theory can
be classified along four different lines, corresponding with four
different kinds of identifications. First there is the dichotomy of
linear time versus branching time: to what extent should one identify
processes differing only in the branching structure of their
execution paths? Secondly there is the dichotomy of
interleaving semantics versus partial order semantics: to what extent
should one identify processes differing only in the causal dependencies
between their actions (while agreeing on the possible orders of
execution)? Thirdly one encounters different treatments of abstraction
from internal actions in a process: to what extent should one identify
processes differing only in their internal or silent actions? And
fourthly there are different approaches to infinity: to what extent
should one identify processes differing only in their infinite
behaviour? These considerations give rise to a four dimensional
representation of the proposed semantic lattice.
However, at least three more dimensions can be distinguished.
In this paper, stochastic and real-time aspects of processes
are completely neglected.
Furthermore it deals with \phrase{uniform concurrency}\footnote{%
The term uniform concurrency is employed by {\sc De Bakker et al}
\hcite{BKMOZ86}.} only. This means that processes are studied,
performing actions\footnote{ Strictly speaking processes do not
perform actions, but systems do. However, for reasons of convenience,
this paper sometimes uses the word process, when actually referring to
a system of which the process is the behaviour.} $a,b,c,...$ which are
not subject to further investigations. So it remains unspecified if
these actions are in fact assignments to variables or the falling of
dominoes or other actions. If also the options are considered of
modelling (to a certain degree) the stochastic and real-time aspects
of processes and the operational behaviour of the elementary actions,
three more parameters in the classification emerge.
\paragraph{Process domains}
In order to be able to reason about processes in a mathematical way,
it is common practice to represent processes as elements of a
mathematical domain\footnote{I use the word \phrase{domain} in the
sense of \phrase{universal algebra}; it can be any class of
mathematical objects---typically the first component of an
\phrase{algebra}; the other component being a collection of operators
defined on this domain. Without further adjectives I do not refer to
the more restrictive domains employed in \phrase{domain theory}.}.
Such a domain is called a \phrase{process domain}.
The relation between the domain and the world of real processes
is mostly stated informally. The semantics of a process theory can
be modelled as an equivalence on a process domain, called a
\phrase{semantic equivalence}. In the literature one finds among others:
\begin{itemise}
\item
\phrase{graph domains}, in which a process is represented as a
{\em process graph}, or \phrase{state transition diagram},
\item
{\em net domains}, in which a process is represented as a (labelled)
\phrase{Petri net},
\item
{\em event structure domains}, in which a process is represented as a
(labelled) \phrase{event structure},
\item
\phrase{explicit domains}, where a process is represented as a
mathematically coded set of its properties,
\item
{\em projective limit domains}, which are obtained as projective
limits of series of finite term domains,
\item
and \phrase{term domains}, in which a process is represented as a term in a
system description language.
\end{itemise}
\paragraph{Action relations}
Write $p \goto{a} q$ if the process $p$ can evolve into the process
$q$, while performing the action $a$. The binary predicates
$\goto{a}$ are called \phrase{action relations}. The semantic
equivalences which are treated in this paper will be defined entirely
in terms of action relations. Hence these definitions apply to any
process domain on which action relations are defined. Such a domain
is called a \index{labelled transition system}{\em labelled transition
system}. Furthermore they will be defined \phrase{uniformly} in terms
of action relations, meaning that all actions are treated in the same
way. For reasons of convenience, even the usual distinction between
internal and external actions is dropped in this paper.
\paragraph{Finitely branching, concrete, sequential processes}
Being a first step, this paper limits itself to a very simple class of
processes. First of all only \phrase{sequential} processes are
investigated: processes capable of performing at most one action at a
time. Furthermore, instead of dropping the usual distinction between
internal and external actions, one can equivalently maintain to study
\phrase{concrete} processes: processes in which no internal actions occur.
% (and also no internal choices as in CSP \hcite{Ho85}).
For this simple class of processes the announced
semantic lattice collapses in two out of four dimensions and covers
only the {\em infinitary} \phrase{linear time -- branching time spectrum}.
Moreover, the main interest is in \phrase{finitely branching}
processes: processes having in each state only finitely many possible
ways to proceed. \Brown{The material pertaining to infinitely
branching processes---coloured brown in the electronic version of this
paper---can easily be omitted in first reading.}
\paragraph{Literature}
In the literature on uniform concurrency 12 semantics can be found,
which are uniformly definable in terms of action relations and different
on the domain of finitely branching, sequential processes (see Figure
\ref{fig-spectrum}).
\begin{figure}[htb]\small
.PS
scale = 3.5
boxwid = 0.6; boxht = 0.6
B1: box invis "\href{trace}{\it trace~semantics}" at (0,0) width 2.5
B2: box invis "~~\href{completed trace}{\it completed~trace~semantics}" at (0,2)
B3: box invis "\href{failures}{\it failures~semantics}" at (0,4)
B4: box invis "~~~~~~~\href{readiness}{\it readiness~semantics}" at (2,6)
B5: box invis "\href{failure trace}{\it failure~trace~semantics}~~~~~~~~~" at (-2,6)
B6: box invis "\href{ready trace}{\it ready~trace~semantics}" at (0,8)
PW: box invis "\href{possible worlds}{\it possible worlds semantics}" at (0,10)
B7: box invis "\href{ready simulation}{\it ready~simulation~semantics}" at (0,12) width 4
B8: box invis "\href{2-nested simulation}{\it 2-nested~simulation~semantics}" at (0,14) width 2.5
B9: box invis "\href{bisimulation}{\it bisimulation~semantics}" at (0,16)
S1: box invis "\href{simulation}{\it simulation~semantics}" at (-7,5) width .5
PF: box invis "\href{possible-futures}{\it possible-futures~semantics}" at (5,11)
B10: box invis "(\href{tree}{\it tree~semantics})" at (0,18)
arrow from B2.s to B1.n
arrow from B3.s to B2.n
arrow from B4.sw to B3.ne
arrow from B5.se to B3.nw
arrow from B6.se to B4.nw
arrow from B6.sw to B5.ne
arrow from B7.s to PW.n +(0,.1)
arrow from PW.s to B6.n
arrow from B8.s to B7.n
arrow from B9.s to B8.n
arrow from B10.s to B9.n
arrow from B7.sw to S1.ne
arrow from S1.se to B1.nw
arrow from B8.se to PF.nw
arrow from PF.sw to B4.ne
.PE
\centerline{\box\graph}
\caption{The linear time -- branching time spectrum\hlabel{fig-spectrum}}
\end{figure}
The coarsest one (i.e.\ the semantics making the most identifications)
is \index{trace semantics}{\em trace semantics}, as presented in {\sc
Hoare} \hcite{Ho80}. In trace semantics only \phrase{partial traces}
are employed. The finest one (making less identifications than any of
the others) is \index{bisimulation semantics}{\em bisimulation
semantics}, as presented in {\sc Milner} \hcite{Mi83}. Bisimulation
semantics is the standard semantics for the system description
language CCS ({\sc Milner} \hcite{Mi80}). The notion of bisimulation
was introduced in {\sc Park} \hcite{Pa81}. Bisimulation equivalence
is a refinement of \index{observational equivalence}{\em observational
equivalence}, as introduced by {\sc Hennessy \& Milner} in
\hcite{HM80-85}. On the domain of finitely branching, concrete,
sequential processes, both equivalences coincide. Also the semantics
of {\sc De Bakker \& Zucker}, presented in \hcite{BZ82}, coincides
with bisimulation semantics on this domain. Then there are ten
semantics in between. First of all a variant of trace semantics can
be obtained by using {\em complete traces} besides partial ones. In
this paper it is called \index{completed trace semantics}{\em
completed trace semantics}. {\em Failures semantics}\index{failures
semantics} is introduced in {\sc Brookes, Hoare \& Roscoe}
\hcite{BHR84}, and used in the construction of a model for the system
description language CSP ({\sc Hoare} \href{Ho78}{\cite{Ho78,Ho85}}).
It is finer than completed trace semantics. The semantics based on
\phrase{testing equivalences}, as developed in {\sc De Nicola \&
Hennessy} \hcite{DH84}, coincides with failures semantics on the
domain of finitely branching, concrete, sequential processes, as do
the semantics of {\sc Kennaway} \hcite{Ke81} and {\sc Darondeau}
\hcite{Da82}. This has been established in {\sc De Nicola}
\hcite{DN87}. In {\sc Olderog \& Hoare} \hcite{OH86} \index{readiness
semantics}{\em readiness semantics} is presented, which is slightly
finer than failures semantics. Between readiness and bisimulation
semantics one finds \index{ready trace semantics}{\em ready trace
semantics}, as introduced independently in {\sc Pnueli} \hcite{Pn85}
(there called \phrase{barbed semantics}), {\sc Baeten, Bergstra \&
Klop} \hcite{BBK87b} and {\sc Pomello} \hcite{Pm86} (under the name
\phrase{exhibited behaviour semantics}). The natural completion of
the square, suggested by failures, readiness and ready trace semantics
yields \index{failure trace semantics}{\em failure trace semantics}.
For finitely branching processes this is the same as \phrase{refusal
semantics}, introduced in {\sc Phillips} \hcite{Ph87}.
\index{simulation semantics}{\em Simulation semantics}, based on the
classical notion of \index{simulation}{\em simulation} (see e.g.\ {\sc
Park} \hcite{Pa81}), is independent of the last five semantics.
\index{ready simulation semantics}{\em Ready simulation semantics} was
introduced in {\sc Bloom, Istrail \& Meyer} \hcite{BIM95} under the
name \index{GSOS trace congruence}{\em GSOS trace congruence}. It is
finer than ready trace as well as simulation semantics. In {\sc
Larsen \& Skou} \hcite{LS91} a more operational characterization of
this equivalence was given under the name
\index{$\frac{2}{3}$-bisimulation equivalence}{\em
$\frac{2}{3}$-bisimulation equivalence}. The (denotational) notion of
\index{possible worlds semantics}{\em possible worlds semantics} of
{\sc Veglioni \& De Nicola} \hcite{VD98} fits between ready trace and
ready simulation semantics. Finally \index{2-nested simulation
semantics}{\em 2-nested simulation semantics}, introduced in {\sc
Groote \& Vaandrager} \hcite{GrV92}, is located between ready
simulation and bisimulation semantics, and \index{possible-futures
semantics}{\em possible-futures semantics}, as proposed in {\sc Rounds
\& Brookes} \hcite{RB81}, can be positioned between 2-nested
simulation and readiness semantics.
{\em Tree semantics}\index{tree semantics}, employed in {\sc Winskel}
\hcite{Wi84b}, is even finer than bisimulation semantics. However, a
proper treatment requires more than mere action relations.
\vspace{-5pt}
\paragraph{About the contents}
The first section of this paper introduces labelled transition systems
and process graphs. A labelled transition system is any process domain
that is equipped with action relations. The domain of {\em process graphs}
or {\em state transition diagrams} is one of the most popular labelled
transition systems. In Sections \ref{trace}--\ref{possible worlds} all
semantic equivalences mentioned above are defined on arbitrary labelled
transition systems. In particular these definitions apply to the domain
of process graphs. Most of the equivalences can be motivated by the
observable behaviour of processes, according to some testing scenario.
(Two processes are equivalent if they allow the same set of possible
observations, possibly in response to certain experiments.) I will try
to capture these motivations in terms of \phrase{button pushing
experiments} (cf.\ {\sc Milner} \hcite{Mi80}, pp.\ 10-12). Furthermore
the semantics will be partially ordered by the relation `makes at
least as many identifications as'. This yields the linear time --
branching time spectrum. Counterexamples are provided, showing that
on the graph domain this ordering cannot be further expanded. However,
for deterministic processes the spectrum collapses, as was first
observed by {\sc Park} \hcite{Pa81}. \sect{determinism}
describes various other classes of processes on which parts of the
spectrum collapse. In \sect{axiomatizations}, the
semantics are applied to a simple language for finite, concrete,
sequential, nondeterministic processes, and for twelve of them a
complete axiomatization is provided. \sect{criteria}
applies a few criteria indicating which semantics are suitable for
which applications. Finally, in \sect{termination} the work of
this paper is extended to labelled transition systems that distinguish
between deadlock and successful termination.
With each of the semantic equivalences treated in this paper (except
for tree semantics) a preorder is associated that may serve as an
implementation relation between processes. The results obtained for
the equivalences are extended to the associated preorders as well.
\vspace{-5pt}
\paragraph{Acknowledgment}
My thanks to Tony Hoare for suggesting that the axioms of \tab{axioms}
could be simplified along the lines of \tab{axioms BCSP}.
\pagebreak[3]
\section{Labelled transition systems and process graphs}
\subsection{Labelled transition systems}\hlabel{LTS}
In this paper processes will be investigated that are capable of
performing actions from a given set $Act$. By an \phrase{action} any
activity is understood that is considered as a conceptual entity on a
chosen level of abstraction. Actions may be instantaneous or durational
and are not required to terminate, but in a finite time only finitely
many actions can be carried out. Any activity of an investigated
process should be part of some action $a \in Act$ performed by the
process. Different activities that are indistinguishable on the chosen
level of abstraction are interpreted as occurrences of the same action
$a \in Act$.
A process is \phrase{sequential} if it can perform at most one action
at the same time. In this paper only sequential processes will be
considered. A class of sequential processes can often be conveniently
represented as a labelled transition system. This is a domain $\IP$
on which infix written binary predicates $\goto{a}$ are defined for
each action $a \in Act$. The elements of $\IP$ represent processes,
and $p \goto{a} q$ means that $p$ can start performing the action $a$
and after completion of this action reach a state where $q$ is its
remaining behaviour. In a labelled transition system it may happen
that $p \goto{a} q$ and \plat{p \goto{b} r} for different actions $a$
and $b$ or different processes $q$ and $r$. This phenomenon is called
\phrase{branching}. It need not be specified how the choice between
the alternatives is made, or whether a probability distribution can be
attached to it.
Certain actions may be synchronizations of a process with its
environment, or the receipt of a signal sent by the environment.
Naturally, these actions can only occur if the environment
cooperates. In the labelled transition system representation of
processes all these potential actions are included, so $p \goto{a} q$
merely means that there is an environment in which the action $a$ can
occur.\\[10pt]
{\bf Notation:} For any alphabet $\Sigma$, let $\Sigma^\ast$ be the
set of finite sequences and $\Sigma^\infty$ the set of infinite
sequences over $\Sigma$. $\Sigma^\omega := \Sigma^\ast \cup
\Sigma^\infty$. Write $\epsilon$ for the empty sequence, $\sigma
\rho$ for the concatenation of $\sigma \in \Sigma^\ast$ and $\rho \in
\Sigma^\omega$, and $a$ for the sequence consisting of the single
symbol $a \in \Sigma$.
\begin{definition}{LTS} A \phrase{labelled transition system} is a
pair $( \IP , \rightarrow )$ with $\IP$ a class and $\rightarrow\;
\subseteq \IP \times Act \times \IP$, such that for $p \in \IP$ and $a
\in Act$ the class $\{q \in \IP \mid (p,a,q) \in\; \rightarrow\}$ is a set.
\end{definition}
Most of this paper should be read in the context of a given labelled
transition system $( \IP , \rightarrow )$, ranged over by $p,q,r,...$.
Write $p \goto{a} q$ for $(p,a,q) \in \; \rightarrow$. The binary
predicates $\goto{a}$ are called \phrase{action relations}.
\begin{definition}{action relations}
(Remark that the following concepts are defined in terms of
action relations only)
\begin{itemise}
\item
The \phrase{generalized action relations} $\goto{\sigma}$ for $\sigma
\in Act^\ast$ are defined recursively by:
\begin{enumerate}
\item
$p \goto{\epsilon} p$, for any process $p$.
\item
$(p,a,q) \in \; \rightarrow$ with $a \in Act$ implies $p \goto{a} q$
with $a \in Act^\ast$.
\item
\plat{p \goto{\sigma} q \goto\rho r} implies \plat{p \goto{\sigma\rho} r}.
\end{enumerate}
In words: the generalized action relations $\goto{\sigma}$ are the
reflexive and transitive closure of the ordinary action relations
$\goto{a}$.
$p \goto{\sigma} q$ means that $p$ can evolve into $q$, while performing
the sequence $\sigma$ of actions.
Remark that the overloading of the notion $p \goto{a} q$ is quite harmless.
\item
A process $q \in \IP$ is \phrase{reachable} from $p \in \IP$ if $p
\goto{\sigma} q$ for some $\sigma \in Act^*$.
\item
The set of \phrase{initial actions} of a process $p$ is defined by:
$I(p)=\{a \in Act \mid \exists q: ~ p \goto{a} q\}$.
\item
A process $p \in \IP$ is \phrase{finite} if the set $\{(\sigma,q) \in
(Act^* \times \IP) \mid p \goto{\sigma} q\}$ is finite.
\item
$p$ is \phrase{image finite} if for each $\sigma \in
Act^*$ the set $\{q \in \IP \mid p \goto{\sigma} q\}$ is finite.
\item
$p$ is \phrase{deterministic} if $p \goto{\sigma} q \wedge
p \goto{\sigma} r \Rightarrow q=r$.
\item
$p$ is \phrase{well-founded} if there is no infinite
sequence $p \goto{a_1} p_1 \goto{a_2} p_2 \goto{a_3} \cdots$.
\item
$p$ is \phrase{finitely branching} if for each $q$ reachable from $p$, the set
$\{(a,r) \!\in\! Act \times \IP \mid q \goto{a} r\}$ is finite.
\end{itemise}
\end{definition}
Note that a process $p \in \IP$ is image finite iff for each $q \in
\IP$ reachable from $p$ and each $a \in Act$, the set $\{r \in \IP
\mid q \goto{a} r\}$ is finite. Hence finitely branching processes
are image finite. Moreover, by K\"onig's lemma a process is finite
iff it is well-founded and finitely branching.
\subsection{Process graphs}
\begin{definition}{process graph} A \phrase{process graph} over an
alphabet $Act$ is a rooted, directed graph whose edges are
labelled by elements of $Act$. Formally, a process graph $g$ is a
triple $(\nd(g), \rt(g), \ed(g))$, where
\begin{itemise}
\item
$\nd (g)$ is a set, of which the elements are called the \phrase{nodes}
or \phrase{states} of $g$,
\item
$\rt (g) \in \nd (g)$ is a special node: the \phrase{root} or \phrase{initial
state} of $g$,
\item
and $\ed (g) \subseteq \nd (g) \times Act \times \nd (g)$ is a set of triples
$(s,a,t)$ with $s,t \in \nd (g)$ and $a \in Act$: the \phrase{edges} or
\phrase{transitions} of $g$.
\end{itemise}
\end{definition}
If $e=(s,a,t) \in \ed (g)$, one says that $e$ {\em goes from s to t}.
A (finite) \phrase{path} $\pi$ in a process graph is an alternating sequence
of nodes and edges, starting and ending with a node, such that each edge
goes from the node before it to the node after it. If
$\pi = s_0 (s_0 , a_1 ,s_1 ) s_1 (s_1 , a_2 ,
s_2 ) \cdots (s_{n-1} , a_n , s_n ) s_n$, also denoted as
$\pi : s_0 \goto{a_1} s_1 \goto{a_2} \cdots \goto{a_n} s_n$, one says that
$\pi$ {\em goes from $s_0$ to $s_n$}; it {\em starts} in $s_0$ and
{\em ends} in $end( \pi )= s_n$. Let $\pd (g)$ be the set of
paths in $g$ starting from the root. If $s$ and $t$ are nodes in a
process graph then {\em t can be reached from s} if there is a path
going from $s$ to $t$. A process graph is said to be \phrase{connected} if
all its nodes can be reached from the root; it is a \phrase{phrase-tree} if each
node can be reached from the root by exactly one path. Let $\IG$ be
the domain of connected process graphs over a given alphabet $Act$.
\begin{definition}{isomorphism}
Let $g,h \in \IG$. A \phrase{graph isomorphism} between $g$ and $h$ is a
bijective function $f: \nd (g) \rightarrow \nd (h)$ satisfying
\begin{itemise}
\item
$f( \rt (g))= \rt (g)$ and
\item
$(s,a,t) \in \ed (g) ~~ \Leftrightarrow ~~ (f(s),a,f(t)) \in \ed (h)$.
\end{itemise}
Graphs $g$ and $h$ are \phrase{isomorphic}, notation $g \cong h$, if there
exists a graph isomorphism between them.
\end{definition}
In this case $g$ and $h$ differ only in the identity of their nodes.
Remark that graph isomorphism is an equivalence relation on $\IG$.
Connected process graphs can be pictured by using
open dots ($\circ$) to denote nodes, and labelled arrows to denote edges,
as can be seen further on. There is no need to mark the
root of such a process graph if it can be recognized as the unique node
without incoming edges, as is the case in all my examples. These
pictures determine process graphs only up to graph isomorphism,
but usually this suffices since it is virtually never needed
to distinguish between isomorphic graphs.
\begin{definition}{subgraph} For $g \in \IG$ and $s \in \nd (g)$, let
$g_s$ be the process graph defined by
\begin{itemise}
\item
$\nd (g_s ) = \{t \in \nd (g)\mid \mbox{there is a path going from $s$ to
$t$}\}$,
\item
$\rt (g_s ) = s \in \nd (g_s )$,
\item
and $(t,a,u) \in \ed (g_s )$ iff $t,u \in \nd (g_s )$
and $(t,a,u) \in \ed (g)$.
\end{itemise}
\end{definition}
Of course $g_s \in \IG$. Note that $g_{\rt (g)} = g$.
Now on $\IG$ action relations $\goto{a}$ for $a \in Act$ are defined by
$g \goto{a} h$ iff $( \rt (g),a,s) \in \ed (g)$ and $h=g_s$.
This makes $\IG$ into a labelled transition system.
\subsection[hola]{Embedding labelled transition systems in \IG}
Let $(\IP,\rightarrow)$ be an arbitrary labelled transition system and
let $p \in \IP$. The \phrase{canonical graph} $G(p)$ of $p$ is defined
as follows:
\begin{itemise}
\item
$\nd (G(p))=\{q \in \IP \mid \exists \sigma \in Act^* : ~ p \goto{\sigma} q\}$,
\item
$\rt (G(p))=p \in \nd (G(p))$,
\item
and $(q,a,r) \in \ed (G(p))$ iff $q,r \in \nd (G(p))$ and $q \goto{a} r$.
\end{itemise}
Of course $G(p) \in \IG$. This means G is a function from $\IP$ to $\IG$.
\begin{proposition}{embedding}
$G: \IP \rightarrow \IG$ is injective and satisfies,
for $a \in Act$: $G(p) \goto{a} G(q) \Leftrightarrow p \goto{a} q$.
Moreover, $G(p) \goto{a} h$ only if $h$ has the form $G(q)$ for some
$q \in \IP$ (with $p \goto{a} q$).\\
\pf Trivial.
\end{proposition}
\pr{embedding} says that $G$ is an \phrase{embedding} of $\IP$ in
$\IG$. It implies that any labelled transition system over $Act$
can be represented as a subclass $G( \IP )=\{G(p) \in \IG \mid
p \in \IP \}$ of $\IG$.
Since $\IG$ is also a labelled transition system, $G$ can be applied
to $\IG$ itself. The following proposition says that the function $G:
\IG \rightarrow \IG$ leaves its arguments intact up to graph isomorphism.
\begin{proposition}{selfembedding}
For $g \in \IG$, $G(g) \cong g$.
\\\pf
Remark that $\nd (G(g))=\{g_s \mid s \in \nd (g)\}$.\\ Now the function
$f: \nd (G(g)) \rightarrow \nd (g)$ defined by $f(g_s)=s$ is a
graph isomorphism.
\end{proposition}
\subsection{Equivalences relations and preorders on labelled
transition systems}\hlabel{equivalences}
This paper studies semantics on labelled transition systems. Each of
the semantics examined here (except for tree semantics) is defined or
characterized in terms of a
function $\fO$ that associates with every process $p \in \IP$ a set
${\fO}(p)$. In most cases the elements of ${\fO}(p)$ can be
regarded as the possible observations one could make while interacting
with the process $p$ in the context of a particular testing
scenario. The set ${\fO}(p)$ then constitutes the observable
behaviour of $p$. For every such $\fO$, the equivalence relation
$=_{\fO} \;\in \IP \times \IP$ is given by $p =_{\fO} q
\Leftrightarrow {\fO}(p) = {\fO}(q)$, and the preorder
$\sqsubseteq_{\fO} \;\in \IP \times \IP$ by $p \sqsubseteq_{\fO}
q \Leftrightarrow {\fO}(p) \subseteq {\fO}(q)$. Obviously $p
=_{\fO} q \Leftrightarrow p \sqsubseteq_{\fO} q \wedge q
\sqsubseteq_{\fO} p$. The semantic equivalence $=_{\fO}$
partitions $\IP$ into equivalence classes of processes that
are indistinguishable by observation (using observations of type
$\fO$). The preorder $\sqsubseteq_{\fO}$ moreover provides a
partial order between these equivalence classes; one that could be
taken to constitute an ``implementation'' relation. The associated
\phrase{semantics}, also called $\fO$, is the criterion that
identifies two processes whenever they are $\fO$-equivalent. Two
semantics are considered the same if the associated equivalence
relations are the same.
As the definitions of $\fO$ are given entirely in terms of action
relations, they apply to any labelled transition system
$\IP$. Moreover, the definitions of ${\fO}(p)$ involve only action
relations between processes reachable from $p$. Thus \pr{embedding}
implies that ${\fO}(G(p)) = {\fO} (p)$. This in turn yields
\begin{corollary}{embedding}
$p \sqsubseteq_{\fO} q$ iff $G(p) \sqsubseteq_{\fO} G(q)$ and
$p =_{\fO} q$ iff $G(p) =_{\fO} G(q)$.
\end{corollary}
Write ${\fO} \preceq_{\IP} {\fN}$ if semantics ${\fO}$
makes at least as much identifications as semantics ${\fN}$. This
is the case if the equivalence corresponding with ${\fO}$ is equal
to or coarser than the one corresponding with ${\fN}$, i.e.\ if $p
=_{\fN} q \Rightarrow p =_{\fO} q$ for all $p,q \in \IP$. Let
$\preceq$ abbreviate $\preceq_{\IG}$. The following is then immediate
by \cor{embedding}.
\begin{corollary}{coarser}
${\fO} \preceq {\fN}$ iff ${\fO} \preceq_{\IP} {\fN}$ for
{\em each} labelled transition system $\IP$.\\
On the other hand, ${\fO} \not\preceq {\fN}$ iff ${\fO}
\not\preceq_{\IP} {\fN}$ for {\em some} labelled transition
system $\IP$.
\end{corollary}
Write ${\fO} \preceq_{\IP}^* {\fN}$ if $p \sqsubseteq_{\fN} q
\Rightarrow p \sqsubseteq_{\fO} q$ for all $p,q \in \IP$, and let
$\preceq^*$ abbreviate $\preceq^*_{\IG}$. By definition $\fO \preceq^*
\fN \Rightarrow \fO \preceq \fN$ for all semantics $\fO$ and $\fN$.
The reverse does not hold by definition, but it will be shown to hold
for all semantics discussed in this paper (cf.\ \sect{summary}).
\subsection{Initial nondeterminism}\hlabel{initial nondeterminism}
In a process graph it need not be determined in which state one ends
after performing a nonempty sequence of actions. This phenomenon is
called \phrase{nondeterminism}. However, process graphs as defined
above are not capable of modelling \phrase{initial nondeterminism}, as
there is only one initial state. This can be rectified by considering
\phrase{process graphs with multiple roots}, in which $\mbox{\sc
roots}(g)$ may be any nonempty subset of $\nd(g)$---let $\IG^{\it mr}$
be the class of such connected process graphs. A process graph with
multiple roots can also be regarded as a nonempty set of process
graphs with single roots. More generally, initial nondeterminism can
be modelled in any labelled transition system $\IP$ by regarding the
nonempty subsets of $\IP$ (rather than merely its elements) to be
processes. The elements of a process $P \subseteq \IP$ then represent
the possible initial states of $P$.
Now any notion of observability $\fO$ on $\IP$ extends to processes
with initial nondeterminism by defining $\fO(P) = \bigcup_{p \in
P}\fO(p)$ for $P \subseteq \IP$. Thus also the equivalences $=_\fO$
and preorders $\sqsubseteq_\fO$ are defined on such processes.
Write ${\fO} \preceq'_{\IP} {\fN}$ if $P =_{\fN} Q \Rightarrow P
=_{\fO} Q$ for all nonempty $P,Q \subseteq \IP$, and let $\preceq'$
abbreviate $\preceq'_{\IG}$. Clearly, one has $\fO \preceq'
\fN \Rightarrow \fO \preceq \fN$ for all semantics $\fO$ and $\fN$.
Let $g$ be a process graph over $Act$ with multiple roots. Let $i$ be
an action ({\em initialize}) which is not in $Act$. Define $\rho(g)$
as the process graph over $Act \cup \{i\}$ obtained from $g$ by adding
a new state $*$, which will be the root of $\rho(g)$, and adding a
transition $(*,i,r)$ for every $r \in \mbox{\sc roots}(g)$. Now for every
semantics $\fO$ to be discussed in this paper it will be the case that $g
\sqsubseteq_\fO h \Leftrightarrow \rho(g) \sqsubseteq_\fO \rho(h)$, as
the reader may easily verify for each such $\fO$. From this it follows
that we have in fact $\fO \preceq' \fN \Leftrightarrow \fO \preceq
\fN$ for all semantics $\fO$ and $\fN$ treated in this paper. This
justifies focusing henceforth on process graphs with single roots and
processes as mere elements of labelled transition systems.
\section{Trace semantics}\hlabel{trace}
\paragraph{\Purple{Definition \ref{trace}}}
$\sigma \in Act^\ast$ is a \phrase{trace} of a process $p$ if there is a
process $q$ such that $p \goto{\sigma} q$. Let $T(p)$ denote the set
of traces of $p$. Two processes $p$ and $q$ are \phrase{trace
equivalent}, notation $p =_T q$, if $T(p)=T(q)$. In \phrase{trace semantics}
($T$) two processes are identified iff they are trace equivalent.
\paragraph{\Purple{Testing scenario}}
Trace semantics is based on the idea that two processes are to be
identified if they allow the same set of observations, where an
observation simply consists of a sequence of actions performed by the
process in succession.
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal trace}
The set $\fL_T$ of \phrase{trace formulas} over $Act$ is defined
recursively by:
\begin{itemise}
\item
$\top \in \fL_T$.
\item
If $\phi \in \fL_T$ and $a \in Act$ then $a \phi \in \fL_T$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_T$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
Note that a trace formula satisfied by a process $p$ represents
nothing more or less than a trace of $p$. Hence one has
\begin{proposition}{modal trace}
$p =_T q ~\Leftrightarrow~ \forall \phi \in \fL_T ( p \models \phi
\Leftrightarrow q \models \phi )$.
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $\pi : ~ s_0 \goto{a_1} s_1 \goto{a_2} \cdots
\goto{a_n} s_n \in \pd(g)$. Then $T(\pi) := a_1 a_2 \cdots a_n \in
Act^\ast$ is the \phrase{trace} of $\pi$. As $\IG$ is a labelled
transition system, $T(g)$ is defined above. Alternatively, it could be
defined as the set of traces of paths of $g$. It is easy to see that
these definitions are equivalent:
\begin{proposition}{trace paths}
$T(g) = \{T(\pi) \mid \pi \in \pd(g)\}$.
\end{proposition}
\paragraph{\Purple{Explicit model}}
In trace semantics a process can be represented by a trace equivalence
class of process graphs, or equivalently by the set of its
traces. Such a \phrase{trace set} is always nonempty and
prefix-closed. The next proposition shows that the domain $\IT$
of trace sets is in bijective correspondence with the domain
$\IG/_{=_T}$ of process graphs modulo trace equivalence, as well as
with the domain $\IG^{\it mr}/_{=_T}$ of process graphs with multiple
roots modulo trace equivalence. Models of concurrency like $\IT$, in
which a process is not represented as an equivalence class but rather
as a mathematically coded set of its properties, are sometimes
referred to as \phrase{explicit models}.
\begin{definition}{explicit trace}
The \phrase{trace domain} $\IT$ is the set of subsets ${\rm T}$ of
$Act^\ast$ satisfying
\begin{center}
\begin{tabular}{ll}
T1 & $\epsilon \in {\rm T}$, \\
T2 & $\sigma\rho \in {\rm T} ~\Rightarrow~ \sigma \in {\rm T}$.
\end{tabular}
\end{center}
\end{definition}
\begin{proposition}{explicit trace}
${\rm T} \in \IT \Leftrightarrow \exists g \in \IG: T(g)={\rm T}
\Leftrightarrow \exists g \in \IG^{\it mr}: T(g)={\rm T}$.\\ \pf Let
${\rm T} \in \IT$. Define the \phrase{canonical graph} $G(\mbox{T})$
of T by $\nd(G(\mbox{T}))={\rm T}$, $\rt(G(\mbox{T}))=\epsilon$ and
$(\sigma,a,\rho) \in \ed(G(\mbox{T}))$ iff $\rho = \sigma a$. As T
satisfies T2, $G(\mbox{T})$ is connected, i.e.\ $G(\mbox{T})\in \IG$.
In fact, $G(\mbox{T})$ is a tree. Moreover, for every path $\pi \in
\pd(G(\mbox{T}))$ one has $T(\pi)=end(\pi)$. Hence, using \pr{trace
paths}, $T(G(\mbox{T}))={\rm T}$.
For the remaining two implication, note that $\IG \subseteq \IG^{\it
mr}$, and the trace set $T(g)$ of any graph $g \in \IG^{\it mr}$
satisfies T1 and T2.
\end{proposition}
$\IT$ was used as a model of concurrency in {\sc Hoare} \hcite{Ho80}.
\paragraph{\Purple{Infinite processes}}\textBrown
For infinite processes one distinguishes two variants of trace
semantics: {\em (finitary) trace semantics} as defined above, and
\phrase{infinitary trace semantics} ($T^\infty$), obtained by taking
infinite runs into account.
\begin{definition}{infinitary trace}\mbox{}
$a_1 a_2 \cdots \in Act^\infty$ is an \phrase{infinite
trace} of a process $p \in \IP$ if there are processes $p_1 ,
p_2 , ...$ such that $p \goto{a_1} p_1 \goto{a_2} \cdots$.
Let $T^\infty (p)$ denote the set of infinite traces of $p$.
Two processes $p$ and $q$ are \phrase{infinitary trace equivalent},
notation $p =_T^\infty q$, if $T(p)=T(q)$ and $T^\infty (p)=T^\infty (q)$.
\end{definition}
Clearly $p =_T^\infty q \Rightarrow p =_T q$. That on $\IG$ the
reverse does not hold follows from \ctr{infinitary}:%
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-6,1)
LC3: circle at (-5,1)
LC4: circle at (-4,1)
LC5: circle at (-3,1)
LC3b: circle at (-5,0)
LC4b: circle at (-4,0)
LC5b: circle at (-3,0)
LC4c: circle at (-4,-1)
LC5c: circle at (-3,-1)
LC5d: circle at (-3,-2)
LB1: box invis "$a$" at (-4.61,1.54)
LB2: box invis "$a$" at (-3.32,1.50)
LB1: box invis "$a$" at (-5.04,1.68)
LB2: box invis "$a$" at (-4.15,1.5)
LB3: box invis "$a$" at (-5.2,.5)
LB3: box invis "$a$" at (-4.2,.5)
LB3: box invis "$a$" at (-3.2,.5)
LB4: box invis "$\cdots$" at (-2,1)
LB3: box invis "$a$" at (-4.2,-.5)
LB3: box invis "$a$" at (-3.2,-.5)
LB3: box invis "$a$" at (-3.2,-1.5)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC1 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC1 to LC5 chop circlerad chop circlerad
arrow from LC3 to LC3b chop circlerad chop circlerad
arrow from LC4 to LC4b chop circlerad chop circlerad
arrow from LC5 to LC5b chop circlerad chop circlerad
arrow from LC4b to LC4c chop circlerad chop circlerad
arrow from LC5b to LC5c chop circlerad chop circlerad
arrow from LC5c to LC5d chop circlerad chop circlerad
RC1: circle at (4,2)
RC2: circle at (2,1)
RC3: circle at (3,1)
RC4: circle at (4,1)
RC5: circle at (5,1)
RC7: circle at (7,1)
RC3b: circle at (3,0)
RC4b: circle at (4,0)
RC5b: circle at (5,0)
RC7b: circle at (7,0)
RC4c: circle at (4,-1)
RC5c: circle at (5,-1)
RC7c: circle at (7,-1)
RC5d: circle at (5,-2)
LB1: box invis "$a$" at (2.96,1.68)
LB1: box invis "$a$" at (4.68,1.50)
LB2: box invis "$a$" at (3.39,1.55)
LB2: box invis "$a$" at (3.85,1.5)
LB3: box invis "$a$" at (2.8,.5)
LB3: box invis "$a$" at (3.8,.5)
LB3: box invis "$a$" at (4.8,.5)
LB4: box invis "$\cdots$" at (6,1)
LB5: box invis "$a$" at (6,1.5)
LB3: box invis "$a$" at (3.8,-.5)
LB3: box invis "$a$" at (4.8,-.5)
LB3: box invis "$a$" at (6.8,.5)
LB3: box invis "$a$" at (4.8,-1.5)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC1 to RC3 chop circlerad chop circlerad
arrow from RC1 to RC4 chop circlerad chop circlerad
arrow from RC1 to RC5 chop circlerad chop circlerad
arrow from RC1 to RC7 chop
arrow from RC3 to RC3b chop circlerad chop circlerad
arrow from RC4 to RC4b chop circlerad chop circlerad
arrow from RC5 to RC5b chop circlerad chop circlerad
arrow from RC7 to RC7b chop circlerad chop circlerad
arrow from RC4b to RC4c chop circlerad chop circlerad
arrow from RC5b to RC5c chop circlerad chop circlerad
arrow from RC5c to RC5d chop circlerad chop circlerad
arrow from RC7b to RC7c dashed chop circlerad chop circlerad
arrow from RC7c to (7,-2) dotted chop circlerad chop circlerad
B3: box invis "$=_{B}^\omega ~$" at (0,1)
B3: box invis "$\neq_T^\infty \,$" at (0,0)
B3: box invis "$\neq_{\it PF}$" at (0,-1)
.PE
\centerline{\raise 1em\box\graph}}
\caption{Finitary equivalent but not infinitary equivalent\hlabel{infinitary}}
\end{counterexample}
one has $T({\it left}) = T({\it right}) = \{a^n \mid n \in \IN\}$, but
$T^\infty({\it left}) \neq T^\infty({\it right})$, as only the graph
at the right has an infinite trace.
However, with K\"onig's lemma one easily proves that for image finite
processes finitary and infinitary trace equivalence coincide:
\begin{proposition}{koenig}
Let $p$ and $q$ be image finite processes with $p =_T q$. Then $p
=_T^\infty q$.\\
\pf It is sufficient to show that $T^\infty(p)$ can be expressed in
terms of $T(p)$ for any image finite process $p$. In fact,
$T^\infty(p)$ consists of all those infinite traces for which all
finite prefixes are in $T(p)$. One direction of this statement is
trivial: if $\sigma \in T^\infty(p)$, all finite prefixes of $\sigma$
must be in $T(p)$. For the other direction suppose that, for $i \in
\IN$, $a_i \in Act$ and $a_1 a_2 \cdots a_i \in T(p)$. With induction on
$i \in \IN$ one can show that there exists processes $p_i$ such that
$i=0$ and $p_0=p$, or $p_{i-1} \goto{a_i} p_i$, and for every $j\geq i$
one has $a_{i+1} a_{i+2} \cdots a_j \in T(p_i)$. The existence of
these $p_i$'s immediately entails that $a_1a_2a_3 \cdots \in
T^\infty(p)$. The base case ($i=0$) is trivial. Suppose the claim
holds for certain $i$. For every $j \geq i+1$ there must be a process
$q$ with \mbox{$p_i \;-\hspace{-11pt}\stackrel{\raisebox{-2pt}[0pt][0pt]
{$\scriptstyle a_{i+1}$}}{-}\hspace{-7pt}\rightarrow q$} and
$a_{i+2}a_{i+3}\cdots a_j \in T(q)$. As there are only finitely many
processes $q$ with
\mbox{$p_i \;-\hspace{-11pt}\stackrel{\raisebox{-2pt}[0pt][0pt]{$\scriptstyle
a_{i+1}$}}{-}\hspace{-7pt} \rightarrow q$}, there must be one choice
of $q$ for which $a_{i+2}a_{i+3}\cdots a_j \in T(q)$ for infinitely
many values of $j$. Take this $q$ to be $p_{i+1}$. As $T(p_{i+1})$ is
prefix-closed, one has $a_{i+2}a_{i+3}\cdots a_j \in T(p_{i+1})$ for
{\em all} $j \geq i+1$.
\end{proposition}
An explicit representation of infinitary trace semantics is obtained
by taking the subsets T of $Act^\omega$ satisfying T1 and T2.
\textBlack
\section{{Completed trace semantics}}\hlabel{completed trace}
\paragraph{\Purple{Definition \ref{completed trace}}}
$\sigma \in Act^\ast$ is a \phrase{complete trace} of a process $p$,
if there is a process $q$ such that $p \goto{\sigma} q$ and $I(q)=
\emptyset$. Let $CT(p)$ denote the set of complete traces of $p$.
Two processes $p$ and $q$ are \phrase{completed trace equivalent},
notation $p =_{CT} q$, if $T(p)=T(q)$ and $CT(p)=CT(q)$. In
\phrase{completed trace semantics} ($CT$) two processes are identified
iff they are completed trace equivalent.
\paragraph{\Purple{Testing scenario}}
Completed trace semantics can be explained with the following (rather
trivial) \phrase{completed trace machine}.
\begin{figure}[htb]
.PS
scale = 3.54
boxwid = 1.5; boxht = 1.5
circlerad = 0.25
box wid 8 height 4 at (4,2) fill
box "\Blue{\LARGE $b$}" at (6,2) fill 0
.PE
\centerline{\raise 1em\box\graph}
\caption{The completed trace machine}
\end{figure}
The process is modelled as a black box that contains as its interface
to the outside world a display on which the name of the action is
shown that is currently carried out by the process. The process
autonomously chooses an execution path that is consistent with its
position in the labelled transition system $( \IP , \rightarrow )$.
During this execution always an action name is visible on the display.
As soon as no further action can be carried out, the process reaches a
state of deadlock and the display becomes empty. Now the existence of
an observer is assumed that watches the display and records the
sequence of actions displayed during a run of the process, possibly
followed by deadlock. It is assumed that an observation takes only a
finite amount of time and may be terminated before the process
stagnates. Hence the observer records either a sequence of actions
performed in succession---a trace of the process---or such a sequence
followed by deadlock---a completed trace. Two processes are identified
if they allow the same set of observations in this sense.
The \phrase{trace machine} can be regarded as a simpler version of the
completed trace machine, were the last action name remains visible in
the display if deadlock occurs (unless deadlock occurs in the beginning
already). On this machine traces can be recorded, but stagnation
can not be detected, since in case of deadlock the observer may
think that the last action is still continuing.
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal completed trace}
The set $\fL_{CT}$ of \phrase{completed trace formulas}
over $Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{CT}$.
\item
$0 \in \fL_{CT}$.
\item
If $\phi \in \fL_{CT}$ and $a \in Act$ then $a \phi \in \fL_{CT}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{CT}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models 0$ if $I(p)=\emptyset$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
Note that a completed trace formula satisfied by a process $p$
represents either a trace (if it has the form $a_1 a_2 \cdots a_n
\top$) or a completed trace (if it has the form $a_1 a_2 \cdots a_n
0$). Hence one has
\begin{proposition}{modal completed trace}
$p =_{CT} q ~\Leftrightarrow~ \forall \phi \in \fL_{CT} ( p
\models \phi \Leftrightarrow q \models \phi )$.
\end{proposition}
Also note the close link between the constructors of the modal formulas
(corresponding to the three clauses in \df{modal completed trace}) and
the types of observations according to the testing scenario: $\top$
represents the act of the observer of terminating the observation,
regardless of whether the observed process has terminated, $0$
represents the observation of deadlock (the display becomes empty),
and $a\phi$ represents the observation of $a$ being displayed,
followed by the observation $\phi$.
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $s \in \nd(g)$. Then
$I(s) := \{a \in Act \mid \exists t : (s,a,t) \in \ed (g) \}$
is the \phrase{menu} of $s$. $CT(g)$ can now be characterized as follows.
\begin{proposition}{completed trace paths}
$CT(g) = \{T(\pi) \mid \pi \in \pd(g) \wedge I(end(\pi))=\emptyset\}$.
\end{proposition}
\paragraph{\Purple{Classification}}
Trivially $T \preceq CT$ (as in \fig{fig-spectrum}).%
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-5,1)
LC3: circle at (-5,0)
LC4: circle at (-3,1)
LB1: box invis "$a$" at (-4.65,1.65)
LB2: box invis "$a$" at (-3.35,1.65)
LB3: box invis "$b$" at (-5.2,.5)
LB: box invis "$ab+a$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
box invis "$=_T~$" at (0,1.7)
box invis "$\neq_{CT}$" at (0,1.2)
box invis "$=_S~$" at (0,.7)
box invis "$\neq_F^1~$" at (0,.2)
RC1: circle at (4,2)
RC2: circle at (4,1)
RC3: circle at (4,0)
RB1: box invis "$a$" at (3.8,1.5)
RB2: box invis "$b$" at (3.8,.5)
RB3: box invis at (5.2,0)
RB: box invis "$ab$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Trace and simulation equivalent, but not completed trace
equivalent\hlabel{TvsCT}}
\end{counterexample}
\ctr{TvsCT} shows that the reverse does not hold:
one has $T({\it left})=T({\it right})=\{\epsilon ,~ a, ~ ab\}$, whereas $CT({\it left})
\neq CT({\it right})$ (since $a \in CT({\it left})-CT({\it right})$). Hence the two
process graphs are identified in trace semantics but distinguished in
completed trace semantics. Thus $T \prec CT$: on $\IG$ completed trace
semantics makes strictly less identifications than trace semantics.
\paragraph{\Purple{Explicit model}}
In completed trace semantics a process can be represented by a
completed trace equivalence class of process graphs, or equivalently
by the pair $(\mbox{T},\mbox{CT})$ of its sets of traces and complete
traces. The next proposition gives an explicit characterization of
the domain $\dc{C}\IT$ of pairs of sets of traces and complete
traces of process graphs with multiple roots.
\begin{definition}{explicit completed trace}
The \phrase{completed trace domain} $\dc{C}\IT$ is the set of
pairs $(\mbox{T},\mbox{CT}) \in Act^* \times Act^*$ satisfying
\begin{center}
$\mbox{T} \in \IT$ and $\mbox{CT} \subseteq \mbox{T}$,\\
$\sigma \in {\rm T}-{\rm CT} ~\Rightarrow~ \exists a \in Act: \sigma a
\in {\rm T}$.
\end{center}
\end{definition}
\begin{proposition}{explicit completed trace}
$({\rm T},{\rm CT}) \in \dc{C}\IT \Leftrightarrow \exists g \in
\IG^{\it mr}: T(g)={\rm T} \wedge CT(g)={\rm T}$.\\ \pf Let $({\rm
T},{\rm CT}) \in \dc{C}\IT$. Define the \phrase{canonical graph}
$G(\mbox{T},\mbox{CT})$ of $(\mbox{T},\mbox{CT})$ by
\begin{itemise}
\item
$\nd(G(\mbox{T},\mbox{CT}))={\rm T} \cup \{\sigma\delta \mid \sigma
\in \mbox{CT}\}$,
\item
$\mbox{\sc roots}(G(\mbox{T},\mbox{CT}))=\{\epsilon\}
\cup \{\delta \mid \epsilon \in \mbox{CT}\}$ and
\item $(\sigma,a,\rho) \in \ed(G(\mbox{T}))$ iff $\rho = \sigma a \vee
\rho = \sigma a \delta$.
\end{itemise}
As T satisfies T2, $G(\mbox{T},\mbox{CT})$ is connected, i.e.\
$G(\mbox{T},\mbox{CT})\in \IG^{\it mr}$. In fact,
$G(\mbox{T},\mbox{CT})$ is a tree, except that it may have two
roots. Using Propositions \href{pr-trace paths}{\ref{pr-trace paths}}
and \ref{pr-completed trace paths} it is easy to see that
$T(G(\mbox{T},\mbox{CT}))={\rm T}$ and $CT(G(\mbox{T},\mbox{CT}))={\rm CT}$.
\end{proposition}
The pairs obtained from process graphs with single roots are the ones
moreover satisfying $$\epsilon \in CT \Leftrightarrow T=\{\epsilon\}.$$
\paragraph{\Purple{Infinite processes}}\textBrown
Also for completed trace semantics one can distinguish a finitary and
an infinitary variant. In terms of the testing scenario, the latter
($CT^\infty$) postulates that observations may take an infinite amount
of time.
\begin{definition}{infinitary completed trace}
Two processes $p$ and $q$ are \phrase{infinitary completed trace
equivalent}, notation $p =_{CT}^\infty q$, if
$CT(p)=CT(q)$ and $T^\infty (p)=T^\infty (q)$.
Note that in this case also $T(p)=T(q)$.
\end{definition}
\pr{koenig} implies that for image finite processes
$CT$ and $CT^\infty$ coincide, whereas Counterexample \hhref{infinitary}
shows that in general the two are different. In fact, $T \prec
T^\infty \prec CT^\infty$ and $T \prec CT \prec CT^\infty$, and
the two preceding counterexamples show that there are no further inclusions.
\textBlack
\section{{Failures semantics}}\hlabel{failures}
\paragraph{\Purple{Testing scenario}}
The \phrase{failures machine} contains as its interface to the outside
world not only the display of the completed trace machine, but also
a switch for each action $a \in Act$ (as in \fig{failure trace
machine}).
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 1.5; boxht = 1.5
circlerad = 0.25
define switch
X
circle rad 0.1 at $1,$2
\textRed
circle invis "\raisebox{-3.5pt}[0pt][0pt]{\Huge $\cdot$}" at (last circle.w.x -.2,last circle.c.y +.15)
line from 2nd last circle.w +(-.05,0) to last circle.c
\textBlack
X
box invis at (-.3,0)
box wid 9 height 4 at (4.5,2) fill .3
circle invis "$a$" at (1,1)
switch(1,2)
circle invis "$b$" at (2,1)
switch(2,2)
circle invis "$\cdots$" at (3,2)
circle invis "$z$" at (4,1)
switch(4,2)
box "\Blue{\LARGE $a$}" at (6,2) fill 0
.PE
\centerline{\box\graph}\vspace{-1em}
\caption{The failure trace machine\hlabel{failure trace machine}}
\end{figure}
By means of these switches the observer may determine which actions
are \phrase{free} and which are \phrase{blocked}. This situation may be
changed any time during a run of the process. As before, the process
autonomously chooses an execution path that fits with its position in
$( \IP , \rightarrow )$, but this time the process may only start the execution
of free actions. If the process reaches a state where all initial
actions of its remaining behaviour are blocked, it can not proceed and
the machine stagnates, which can be recognized from the empty display.
In this case the observer may record that after a certain sequence of
actions $\sigma$, the set $X$ of free actions is refused by the process.
$X$ is therefore called a \phrase{refusal set} and $\rec{ \sigma , X }$ a
\phrase{failure pair}. The set of all failure pairs of a process is called
its \phrase{failure set}, and constitutes its observable behaviour.
\paragraph{\Purple{Definition \ref{failures}}}
$\rec{ \sigma , X} \in Act^\ast \times \pow (Act)$ is a
\phrase{failure pair} of a process $p$ if there is a process $q$
such that $p \goto{\sigma} q$ and $I(q) \cap X = \emptyset$. Let
$F(p)$ denote the set of failure pairs of $p$. Two processes $p$ and
$q$ are \phrase{failures equivalent}, notation $p =_F q$, if
$F(p)=F(q)$. In \phrase{failures semantics} ($F$) two processes are
identified iff they are failures equivalent.\\[2ex]
Note that $T(p)$ can be expressed in terms of $F(p)$: $T(p) = \{\sigma
\in Act^\ast \mid \rec{\sigma,\emptyset} \in F(p)\}$; hence $p =_F q$
implies $T(p) = T(q)$.
\begin{definition}{cont}
For $p \in \IP$ and $\sigma \in T(p)$, let
$Cont_p( \sigma ) = \{a \in Act \mid \sigma a \in T(p)\}$, the set of
possible \phrase{continuations} of $\sigma$.
\end{definition}
The following proposition says that the failure set $F(p)$ of a
process $p$ is completely determined by the set of failure pairs
$\rec{\sigma,X}$ with $X \subseteq Cont_p(\sigma)$.
\begin{proposition}{cont}
Let $p \!\in\! \IP$, $\sigma \!\in\! T(p)$ and $X \!\subseteq\! Act$.
Then $\rec{ \sigma , X } \!\in\! F(p) \Leftrightarrow
\rec{ \sigma , X \cap Cont_p( \sigma ) } \!\in\! F(p)$.
\\\pf If $p \goto{\sigma} q$ then $I(q) \subseteq Cont_p(\sigma)$.
\end{proposition}
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal failure}
The set $\fL_{F}$ of \phrase{failure formulas}
over $Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{F}$.
\item
$\widetilde{X} \in \fL_{F}$ for $X \subseteq Act$.
\item
If $\phi \in \fL_{F}$ and $a \in Act$ then $a \phi \in \fL_{F}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{F}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models \widetilde{X}$ if $I(p) \cap X =\emptyset$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
$\widetilde{X}$ represents the observation that the process refuses
the set of actions $X$, i.e.\ that stagnation occurs in a situation
where $X$ is the set of actions allowed by the environment. Note that
a failure formula satisfied by a process $p$ represents either a
trace (if it has the form $a_1 a_2 \cdots a_n \top$) or a failure pair
(if it has the form $a_1 a_2 \cdots a_n \widetilde{X}$). Hence one has
\begin{proposition}{modal failure}
$p =_{F} q ~\Leftrightarrow~ \forall \phi \in \fL_{F} ( p
\models \phi \Leftrightarrow q \models \phi )$.
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $\pi \in \pd(g)$. Then
$$F(\pi) := \{\rec{T(\pi),X} \mid I(end(\pi)) \cap X = \emptyset \}$$
is the \phrase{failure set} of $\pi$. $F(g)$ can now be characterized
as follows.
\begin{proposition}{failure paths}
$F(g) = \bigcup_{\pi \in \pd(g)} F(\pi)$.
\end{proposition}
\paragraph{\Purple{Classification}}
%\begin{proposition}{CTvsF}
$CT \prec F$.
\\
\pf For ``$CT \preceq F$'' it suffices to show that also $CT(p)$ can be
expressed in terms of $F(p)$:
$$
CT(p)=\{ \sigma \in Act^\ast \mid \rec{ \sigma , Act } \in F(p)\}.
$$
It also suffices to show that the modal language ${\fL}_{CT}$ is a sublanguage of $\fL_F$: $p \models 0
\Leftrightarrow p \models \widetilde{Act}$.\\
``$CT \not\succeq F$'' follows from \ctr{CTvsF}: one
has $CT({\it left})=CT({\it right})= \{ ab,~ac \}$, whereas $F({\it left}) \neq
F({\it right})$ (since $\rec{a,\{c\}} \in F({\it left})-F({\it right})$).
\hfill $\Box$%
%\end{proposition}
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-5,1)
LC3: circle at (-5,0)
LC4: circle at (-3,1)
LC5: circle at (-4,0)
LC6: circle at (-2,0)
LB1: box invis "$a$" at (-4.65,1.65)
LB2: box invis "$a$" at (-3.35,1.65)
LB3: box invis "$b$" at (-5.2,.5)
LB4: box invis "$b$" at (-3.65,0.65)
LB5: box invis "$c$" at (-2.35,0.65)
circle invis "$ab+a(b+c)$" at (-3.5,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC4 to LC6 chop circlerad chop circlerad
box invis "$=_{CT}$" at (0,1.7)
box invis "$\neq_F~$" at (0,1.2)
box invis "$=_{CS}$" at (0,.7)
box invis "$\neq_F^1~$" at (0,.2)
RC1: circle at (4,2)
RC2: circle at (4,1)
RC3: circle at (3,0)
RC4: circle at (5,0)
RB1: box invis "$a$" at (3.8,1.5)
RB2: box invis "$b$" at (3.35,.65)
RB3: box invis "$c$" at (4.65,.65)
RB4: box invis at (5.2,0)
circle invis "$a(b+c)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Completed trace and completed simulation equivalent, but not
failures equivalent or even singleton-failures equivalent\hlabel{CTvsF}}
\end{counterexample}
\paragraph{\Purple{Explicit model}}
In failures semantics a process can be represented by a failures
equivalence class of process graphs, or equivalently by its failure
set. The next proposition gives an explicit characterization of
the domain $\dl{F}$ of failure sets of process graphs with multiple
roots.
\begin{definition}{explicit failures}
The \phrase{failures domain} $\dl{F}$ is the set of subsets ${\rm F}$ of
$Act^\ast \times \pow(Act)$ satisfying
\begin{center}
\begin{tabular}{ll}
F1 & $\rec{\epsilon,\emptyset} \in {\rm F}$, \\
F2 & $\rec{\sigma\rho,\emptyset} \in {\rm F} ~\Rightarrow~
\rec{\sigma,\emptyset} \in {\rm F}$,\\
F3 & $\rec{\sigma,Y} \in {\rm F} \wedge X \subseteq Y ~\Rightarrow~
\rec{\sigma,X} \in {\rm F}$,\\
F4 & $\rec{\sigma,X} \in {\rm F} \wedge \forall a \in Y (\rec{\sigma
a,\emptyset} \not\in {\rm F}) ~\Rightarrow~ \rec{\sigma,X \cup Y} \in
{\rm F}$.
% ,\\
% F5 & $\rec{\epsilon,X} \in {\rm F} ~\Rightarrow~ \forall a \in X:
% \rec{a,\emptyset} \not\in \rm F$.
\end{tabular}
\end{center}
\end{definition}
\begin{proposition}{explicit failures}
${\rm F} \in \dl{F} \Leftrightarrow \exists g \in \IG^{\it mr}:
F(g)={\rm F}$.\\
\pf ``$\Leftarrow$'': F1 and F2 follow from T1 and T2 in \sect{trace},
as one has $\rec{\sigma,\emptyset} \in F(g) \Leftrightarrow \sigma \in
T(g)$.\\
F3 follows immediately from the definitions, as $I(q) \cap Y =
\emptyset \wedge X \subseteq Y \Rightarrow I(q) \cap X = \emptyset$.\\
F4 follows immediately from \pr{cont}, as $\forall a \in Y (\rec{\sigma
a,\emptyset} \not\in F(g))$ iff $Y \cap Cont_g(\sigma) = \emptyset$.
% \\
% F5 follows immediately from the observation that
% $I(\rt(g)) = \{a \in Act \mid \rec{a,\emptyset} \in F(g)\}$.
For ``$\Rightarrow$'' let ${\rm F} \in \dl{F}$.
For $\sigma \in Act^*$ write $Cont_{\rm F}(\sigma)$ for $\{a
\in Act \mid \rec{\sigma a,\emptyset} \in {\rm F}\}$.\\
Define the \phrase{canonical graph} $G(\mbox{F})$ of F by
\begin{itemise}
\item $\nd(G(\mbox{F}))=\{\rec{\sigma,X} \in {\rm F} \mid
X \subseteq Cont_{\rm F}(\sigma)\}$,
\item $\mbox{\sc roots}(G(\mbox{F}))=\{\rec{\epsilon,X}\mid
\rec{\epsilon,X}\in \rm F\}$,
\item $\ed(G(\mbox{F}))=\{(\rec{\sigma,X},a,\rec{\sigma a,Y}) \mid
\rec{\sigma,X},\rec{\sigma a,Y} \in \nd(G(\mbox{F})) \wedge a \not\in X\}$.
\end{itemise}
By F1, $\mbox{\sc roots}(G(\mbox{F})) \neq \emptyset$. Using F3 and
F2, any node $s=\rec{a_1 \cdots a_n,X}$ of $G(\mbox{F})$ is reachable
from a root by the path $\pi_s: \rec{\epsilon,\emptyset} \goto{a_1}
\rec{a_1,\emptyset} \goto{a_2} \cdots \goto{a_{n-1}} \rec{a_1 \cdots
a_{n-1},\emptyset} \goto{a_n} \rec{a_1 \cdots a_n,X}$; hence $G(F)$ is
connected. So $G(\mbox{F}) \in \IG^{\it mr}$. I have to show that
$F(G(\mbox{F}))=\rm F$.
``$\supseteq$'':
Suppose $\rec{\sigma,X} \in \rm F$. Then, by F3,
$s:=\rec{\sigma, X \cap Cont_{\rm F}(\sigma)} \in \nd(G(\mbox{F}))$.
By construction one has $T(\pi_s)=\sigma$ and $I(s) \cap X = \emptyset$.
Hence $\rec{\sigma,X} \in F(\pi_s) \subseteq F(G(\mbox{F}))$.
``$\subseteq$'':
With induction on the length of paths, it follows immediately from the
definition of $G(\mbox{F})$ that for $\pi \in \pd(G(\mbox{F}))$, if
$end(\pi)=\rec{\rho,Y}$ then $\rho = T(\pi)$ and
$I(end(\pi))=Cont_{\rm F}(\rho)-Y$.\hfill (*)\\
Suppose $\rec{\sigma,X} \in F(G(\mbox{F}))$. Then, by \pr{failure paths},
there must be a path $\pi \in \pd(G(\mbox{F}))$ with $\rec{\sigma,X} \in
F(\pi)$. So $T(\pi)=\sigma$ and $I(end(\pi)) \cap X = \emptyset$.
Let $end(\pi):=\rec{\rho,Y} \in \rm F$. By (*), $\rho=\sigma$ and $X
\cap Cont_{\rm F}(\sigma) \subseteq Y$. By F3 it follows that
$\rec{\sigma,X\cap Cont_{\rm F}(\sigma)} \in \rm F$, and F4 yields
$\rec{\sigma,X} \in \rm F$.
\end{proposition}
A variant of $\dl{F}$ was used as a model of concurrency in {\sc
Hoare} \hcite{Ho85}.\footnote{There a process is given as a triple
$(A,F,D)$ with $A \subseteq Act$ a set of actions that may occur in
the process, $F \in \dl{F}$ and $D$ a set of so-called
{\em divergencies}, traces that can lead along a state where an
infinite sequence of internal actions is possible. As this paper
considers only concrete, and hence divergence-free, processes, $D$ is
always empty here.}
If $\mbox{\sc roots}(g)$ would be allowed to be empty, a characterization
is obtained by dropping requirement F1.
A characterization of the domain of failure sets of process graphs
with single roots is given by adding to F1--4 the requirement
\begin{center}
F5~~~$\rec{\epsilon,X} \in {\rm F} ~\Rightarrow~ \forall a \in X:
\rec{a,\emptyset} \not\in \rm F$.
\end{center}
That F5 holds follows from the observation that $I(\rt(g)) = \{a \in
Act \mid \rec{a,\emptyset} \in F(g)\}$ for $g \in \IG$.
\paragraph{\Purple{Alternative characterizations}}
In {\sc De Nicola} \hcite{DN87} several equivalences, that were proposed in
{\sc Kennaway} \hcite{Ke81}, {\sc Darondeau} \hcite{Da82} and
{\sc De Nicola \& Hennessy} \hcite{DH84},
are shown to coincide with failures semantics on the domain of finitely
branching transition systems without internal moves. For this purpose
he uses the following alternative characterization of failures
equivalence.
\begin{definition}{must} Write $p~{\it after}~ \sigma ~{\it MUST}~X$
if for each $q \in \IP$ with $p \goto{\sigma} q$ there is an $a \in I(q)$
with $a \in X$. Put $p \simeq q$ if for
all $\sigma \in Act^\ast$ and $X \subseteq Act$: $p~{\it after}~ \sigma
~{\it MUST}~X~ \Leftrightarrow ~q~{\rm after}~ \sigma ~{\it MUST}~X$.
\end{definition}
\begin{proposition}{must}
Let $p,q \in \IP$. Then $p \simeq q ~ \Leftrightarrow ~ p =_F q$.
\\
\pf $p~{\it after}~ \sigma ~{\it MUST}~X~ \Leftrightarrow ~ \rec{ \sigma
,X} \not\in F(p)$ \hcite{DN87}.
\end{proposition}
Instead of the complement of the failure set of a process $p$, one can
also take the complement $Cont_p(\sigma)-X$ of every refusal set $X$
within a failure pair $\rec{\sigma,X}$ of $p$. In view of \pr{cont},
the same information stored in $F(p)$ is given by the set of all pairs
$\rec{ \sigma , X} \in Act^\ast \times \pow (Act)$ for which there is
a process $q$ such that $p \goto{\sigma} q$ and $I(q) \subseteq X
\subseteq Cont_p(\sigma)$. In {\sc Hennessy} \hcite{He85}, a model for
nondeterministic behaviours is proposed in which a process is
represented as an \phrase{acceptance tree}. An acceptance tree of a
finitely branching process without internal moves is essentially the
set of pairs described above, conveniently represented as a finitely
branching, deterministic process tree, of which the nodes are labelled
by collections of sets of actions. Thus acceptance trees constitute an
explicit model of failures semantics.
\paragraph{\Purple{Infinite processes}}\textBrown
$\!$For infinite processes, three versions of failures
semantics can be distinguished.
\begin{definition}{infinitary failures} Two processes
$p$ and $q$ are {\em (finitary) failures equivalent} if $F(p)=F(q)$.
$p$ and $q$ are \phrase{infinitary failures equivalent}, notation
$p =_F^\infty q$, if $F(p)=F(q)$ and $T^\infty (p)=T^\infty (q)$.
They are \phrase{finite-failures equivalent}, notation $p =_F^- q$, if
$F^-(p)=F^-(q)$, where $F^-(p)$ denotes the set of failure pairs
$\rec{\sigma,X}$ of $p$ with $X$ finite.
\end{definition}
The original failures semantics of {\sc Brookes, Hoare \& Roscoe}
\hcite{BHR84} is $F^-$, i.e.\ what I call \phrase{finite-failures semantics}.
They ``adopt this view of distinguishability because [they] consider a
{\em realistic} environment to be one that is at any time capable of
performing only a finite number of events.'' In terms of the failures
machine this means that at any time only finitely many switches
can be set on free.
Finitary failures semantics is the default version introduced at the
beginning of this section. This can be regarded to be the semantics
employed in {\sc Brookes \& Roscoe} \hcite{BR85} and {\sc Hoare}
\hcite{Ho85}. Infinitary failures semantics was first discussed in
{\sc Bergstra, Klop \& Olderog} \hcite{BKO86}; it was proposed as a
semantics for CSP in {\sc Roscoe} \hcite{Ro93}. The difference between
the testing scenarios for $F$ and $F^\infty$ is that only the
latter allows observations of infinite duration. Obviously, $F^-
\preceq F \preceq F^\infty$. That the latter inclusion is strict
follows from \ctr{infinitary};
\ctr{finite} shows that also the former is strict:
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-6,1)
LC3: circle at (-5,1)
LC4: circle at (-4,1)
LC5: circle at (-3,1)
LC6: circle at (-2,1)
LC3b: circle at (-5,0)
LC4b: circle at (-4,0)
LC5b: circle at (-3,0)
LC6b: circle at (-2,0)
LB1: box invis "$a$" at (-4.61,1.54)
LB2: box invis "$a$" at (-3.32,1.50)
LB1: box invis "$a$" at (-5.04,1.68)
LB2: box invis "$a$" at (-4.15,1.5)
LB3: box invis "$b_1$" at (-5.2,.6)
LB3: box invis "$b_2$" at (-4.2,.6)
LB3: box invis "$b_3$" at (-3.2,.6)
LB4: box invis "$...$" at (-2,1.5)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC1 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC1 to LC5 chop circlerad chop circlerad
arrow from LC1 to LC6 dashed chop circlerad chop circlerad
arrow from LC3 to LC3b chop circlerad chop circlerad
arrow from LC4 to LC4b chop circlerad chop circlerad
arrow from LC5 to LC5b chop circlerad chop circlerad
arrow from LC6 to LC6b dashed chop circlerad chop circlerad
RC1: circle at (3,2)
RC3: circle at (2,1)
RC4: circle at (3,1)
RC5: circle at (4,1)
RC6: circle at (5,1)
RC3b: circle at (2,0)
RC4b: circle at (3,0)
RC5b: circle at (4,0)
RC6b: circle at (5,0)
LB1: box invis "$a$" at (3.68,1.50)
LB2: box invis "$a$" at (2.39,1.55)
LB2: box invis "$a$" at (2.85,1.5)
LB3: box invis "$b_1$" at (1.8,.6)
LB3: box invis "$b_2$" at (2.8,.6)
LB3: box invis "$b_3$" at (3.8,.6)
LB4: box invis "$...$" at (5,1.5)
arrow from RC1 to RC3 chop circlerad chop circlerad
arrow from RC1 to RC4 chop circlerad chop circlerad
arrow from RC1 to RC5 chop circlerad chop circlerad
arrow from RC1 to RC6 dashed chop circlerad chop circlerad
arrow from RC3 to RC3b chop circlerad chop circlerad
arrow from RC4 to RC4b chop circlerad chop circlerad
arrow from RC5 to RC5b chop circlerad chop circlerad
arrow from RC6 to RC6b dashed chop circlerad chop circlerad
B3: box invis "$=_B^-$" at (0,1.5)
B3: box invis "$\neq_{CT}$" at (0,.5)
.PE
\centerline{\raise 1em\box\graph}}
\caption{HML- and finite-failures equivalent, but not completed trace
equivalent\hlabel{finite}}
\end{counterexample}
one has $F^-({\it left})=F^-({\it right})$, whereas $F({\it left})
\neq F({\it right})$. In fact even $CT({\it left}) \neq CT({\it
right})$, as $a \in CT({\it left})-CT({\it right})$.
Thus, although $T \prec F^-$, $CT \prec F$ and $CT^\infty \prec F^\infty$,
$CT$ and $F^-$ are independent, as are $CT^\infty$ and $F$.
In addition to the three variants of \df{infinitary failures} one could
also define a version of failures semantics based on infinite traces
and finite refusal sets. Such a semantics would distinguish the two
graphs of \ctr{infinitary}, but identify the ones of
\ctr{finite}. As this semantics does not occur in the
literature, and has no clear advantages over the other variants, I
will not further consider it here.
\begin{proposition}{image finite failures}
Let $p$ en $q$ be image finite processes. Then $p =_F^- q
\Leftrightarrow p =_F q \Leftrightarrow p =_F^\infty q$.
\\\pf ``$\Leftarrow$'' has been established for all processes, and
the second ``$\Rightarrow$'' follows immediately from \pr{koenig}
(as $p =_F q \Rightarrow p =_T q \Rightarrow p =_T^\infty q$). So it
remains to show that $p \neq_F q \Rightarrow p \neq_F^- q$.
Suppose $F(p) \neq F(q)$, say there is a failure pair $\rec{\sigma ,
X} \in F(p)-F(q)$. By the image finiteness of $q$ there are only
finitely many processes $r_i$ with $q \goto{\sigma} r_i$, and for each
of them there is an action $a_i \in I(r_i ) \cap X$ (as otherwise
$\rec{\sigma , X}$ would be a failure pair of $q$). Let $Y$ be the
set of all those $a_i$'s. Then $Y$ is a finite subset of $X$, so
$\rec{\sigma ,Y} \in F^- (p)$. On the other hand, $a_i \in I(r_i )
\cap Y$ for all $r_i$, so $\rec{\sigma ,Y} \not\in F^- (q)$.
\end{proposition}
It is not hard to change the leftmost process in Counterexample
\ref{finite} to an image finite one with the same failure pairs.
Thus, in the first statement of \pr{image finite failures} it is
necessary that both processes are image finite. For the subclass of
finitely branching processes a stronger result can be obtained.
\begin{proposition}{finitely branching failures}
Let $p,q \in \IP$ and $p$ is finitely branching. Then $p =_F^- q
\Leftrightarrow p =_F q$.
\\\pf Suppose $p =_F^- q$. As $p$ is finitely branching,
$Cont_p(\sigma)$ is finite for all $\sigma \in T(p)$. And as
$T(q)=T(p)$, $Cont_q(\sigma) = Cont_p(\sigma)$, which is finite, for
all $\sigma \in T(q)$. Now for processes $p$ with this property,
$F(p)$ is completely determined by $F^-(p)$, as follows from \pr{cont}.
\end{proposition}
The second statement of \pr{image finite failures} does not allow such
a strengthening, as will follow from \ctr{infinitary RS}.
\textBlack
\section{{Failure trace semantics}}\hlabel{failure trace}
\paragraph{\Purple{Testing scenario}}
The \phrase{failure trace machine} has the same layout as the failures
machine, but is does not stagnate permanently if the process cannot
proceed due to the circumstance that all actions it is prepared to
continue with are blocked by the observer. Instead it
idles---recognizable from the empty display---until the observer
changes its mind and allows one of the actions the process is ready to
perform. What can be observed are traces with idle periods in
between, and for each such period the set of actions that are not
blocked by the observer. Such observations can be coded as sequences
of members and subsets of $Act$.
{\bf Example}: The sequence $\{a,b\}cdb\{b,c\}\{b,c,d\}a(Act)$ is the
account of the following observation: At the beginning of the execution
of the process $p$, only the actions $a$ and $b$ were allowed by the
observer. Apparently, these actions were not on the menu of $p$, for
$p$ started with an idle period. Suddenly the observer canceled its
veto on $c$, and this resulted in the execution of $c$, followed by $d$
and $b$. Then again an idle period occurred, this time when $b$ and $c$
were the actions not being blocked by the observer. After a while the
observer decided to allow $d$ as well, but the process ignored this
gesture and remained idle. Only when the observer gave the green light
for the action $a$, it happened immediately. Finally, the process
became idle once more, but this time not even one action was blocked.
This made the observer realize that a state of eternal stagnation had
been reached, and disappointed he terminated the observation.
A set $X \subseteq Act$, occurring in such a sequence, can be regarded as an
offer from the environment, that is refused by the process. Therefore
such a set is called a \phrase{refusal set}. The occurrence of a refusal
set may be interpreted as a `failure' of the environment to create a
situation in which the process can proceed without being disturbed.
Hence a sequence over $Act \cup \pow (Act)$, resulting from an
observation of a process $p$ may be called a \phrase{failure trace} of $p$.
The observable behaviour of a process, according to this testing scenario,
is given by the set of its failure traces, its \phrase{failure trace set}.
The semantics in which processes are identified iff their failure trace
sets coincide, is called \phrase{failure trace semantics} ($FT$).
For image finite processes failure trace semantics is exactly
the equivalence that originates from {\sc Phillips} notion of
\phrase{refusal testing} \hcite{Ph87}. (Image infinite processes
are not considered in \hcite{Ph87}.)
There it is called \phrase{refusal equivalence}.
\paragraph{\Purple{Definition \ref{failure trace}}}
%\begin{definition}{refusal relations}\mbox{}
\begin{itemise}
\item
The \phrase{refusal relations} $\goto{X}$ for $X \subseteq Act$ are defined by:
$p \goto{X} q$ iff $p=q$ and $I(p) \cap X= \emptyset$.
\\
$p \goto{X} q$ means that $p$ can evolve into $q$, while being idle during
a period in which $X$ is the set of actions allowed by the environment.
\item
The \phrase{failure trace relations} $\goto{\sigma}$ for $\sigma \in (Act \cup
\pow (Act))^\ast$ are defined as the reflexive and transitive closure
of both the action and the refusal relations. Again the overloading of
notation is harmless.
\item
$\sigma \in (Act \cup \pow (Act))^\ast$ is a \phrase{failure trace} of
a process $p$ if there is a process $q$ such that $p \goto{\sigma}
q$. Let $FT(p)$ denote the set of failure traces of $p$. Two
processes $p$ and $q$ are \phrase{failure trace equivalent}, notation
$p =_{FT} q$, if $FT(p)=FT(q)$.
\end{itemise}
%\end{definition}
\weg{
\paragraph{\Purple{Exercises}}
\begin{enumerate}
\setcounter{enumi}{\value{exer}}
\item
Explain why $a \{ a,b \} a$ can never be a failure trace of a process
$p \in \IP$.
\item
Can $\{ a \} b$ and $\{ b \} a$ be two failure traces of such a
process? And $a \{ a \} b$ and $a \{ b \} a$ ?
\item
$\{a,b\}cc, ~ \{a\}c\{b\}c, ~ \{b\}c\{a\}c, ~ c\{a,b\}c, ~
c\{a\}\{b\}c$ and $c$ are failure traces of a process
$p \in \IP$. Which selections from this series provide the
same information about $p$?
\setcounter{exer}{\value{enumi}}
\end{enumerate}
}
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal failure trace}
The set $\fL_{FT}$ of \phrase{failure trace formulas}
over $Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{FT}$.
\item
If $\phi \in \fL_{FT}$ and $X \subseteq Act$ then $\widetilde{X}
\phi \in \fL_{FT}$.
\item
If $\phi \in \fL_{FT}$ and $a \in Act$ then $a \phi \in \fL_{FT}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{FT}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models \widetilde{X}\phi$ if $I(p) \cap X =\emptyset$ and $p \models \phi$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
$\widetilde{X}\phi$ represents the observation that the process refuses
the set of actions $X$, followed by the observation $\phi$. A modal
failure trace formula satisfied by a process $p$ represents exactly a
failure trace as defined above. Hence one has
\begin{proposition}{modal failure trace}
$p =_{FT} q ~\Leftrightarrow~ \forall \phi \in \fL_{FT} ( p
\models \phi \Leftrightarrow q \models \phi )$.
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $\pi : ~ s_0 \goto{a_1} s_1 \goto{a_2}
\cdots \goto{a_n} s_n \in \pd(g)$. Then
the \phrase{failure trace set} of $\pi$, $FT( \pi )$, is the smallest subset
of $(Act \cup \pow (Act))^\ast$ satisfying
\begin{itemise}
\item
$(Act-I(s_0 )) a_1 (Act-I(s_1 )) a_2 \cdots a_n (Act-I(s_n )) \in FT ( \pi )$,
\item
$\sigma X \rho \in FT ( \pi ) \Rightarrow \sigma \rho \in FT ( \pi )$,
\item
$\sigma X \rho \in FT ( \pi ) \Rightarrow \sigma XX \rho \in FT ( \pi )$,
\item
$\sigma X \rho \in FT ( \pi ) \wedge Y \subset X \Rightarrow \sigma Y \rho \in
FT ( \pi )$.
\end{itemise}
$FT(g)$ can now be characterized as follows.
\begin{proposition}{failure trace paths}
$FT(g) = \bigcup_{\pi \in \pd(g)} FT(\pi)$.
\end{proposition}
\pr{failure trace paths} yields a technique for deciding that two
process graphs are failure trace equivalent, without calculating their
entire failure trace set. \\
Let $g,h \!\in\! \IG^{\it mr}\!\!$, $\pi \! : s_0 \goto{a_1} s_1
\goto{a_2} \cdots \goto{a_n} s_n \in \pd (g)$ and $\pi '\!\! : t_0
\goto{b_1} t_1 \goto{b_2} \cdots \goto{b_m} t_m \in \pd (h)$. Path
$\pi'$ is a \phrase{failure trace augmentation} of $\pi$, notation
$\pi \leq_{FT} \pi '$, if $FT( \pi ) \subseteq FT( \pi ' )$. This is
the case exactly when $n=m$, $a_i = b_i$ and $I(t_i ) \subseteq I(s_i
)$ for $i=1,...,n$. From this the following can be concluded.
\begin{corollary}{augmentation}
Two process graphs $g,h \in \IG^{\it mr}$ are failure trace equivalent iff
\begin{itemise}
\item
for any path $\pi \in \pd (g)$ in $g$ there is a
$\pi ' \in \pd (h)$ such that $\pi \leq_{FT} \pi '$
\item
and for any path $\pi \in \pd (g)$ in $h$ there is a
$\pi ' \in \pd (g)$ such that $\pi \leq_{FT} \pi '$.
\end{itemise}
If $g$ and $h$ are moreover without infinite paths, then it suffices
to check the requirements above for maximal paths.
\end{corollary}
\paragraph{\Purple{Classification}}
%\begin{proposition}{FvsFT}
$F \prec FT$.
\\
\pf For ``$F \preceq FT$'' it suffices to show that $F(p)$ can be
expressed in terms of $FT(p)$:
$$
\rec{ \sigma ,X} \in F(p)~~ \Leftrightarrow ~~ \sigma X \in FT(p).
$$
``$F \not\succeq FT$'' follows from \ctr{RvsFT}; see
\sect{readiness} for details.
\hfill $\Box$%
%\end{proposition}
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,3)
LC2: circle at (-5,2)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC7: circle at (-6,1)
LC4: circle at (-3,2)
LC5: circle at (-3,1)
LC8: circle at (-3,0)
LC9: circle at (-2,1)
LB1: box invis "$a$" at (-4.65,2.65)
LB2: box invis "$a$" at (-3.35,2.65)
LB3: box invis "$c$" at (-4.8,1.5)
LB4: box invis "$c$" at (-3.2,1.5)
LB1: box invis "$b$" at (-5.65,1.65)
LB2: box invis "$f$" at (-2.35,1.65)
LB3: box invis "$d$" at (-4.8,.5)
LB4: box invis "$e$" at (-3.2,.5)
LB: box invis "$a(b+cd)+a(f+ce)$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC2 to LC7 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC8 chop circlerad chop circlerad
arrow from LC4 to LC9 chop circlerad chop circlerad
B3: box invis "$=_F~$" at (0,2)
B3: box invis "$\neq_{FT}$" at (0,1.5)
B3: box invis "$=_R~$" at (0,1)
B3: box invis "$\neq_{RT}$" at (0,.5)
RC1: circle at (4,3)
RC2: circle at (5,2)
RC3: circle at (5,1)
RC6: circle at (5,0)
RC7: circle at (6,1)
RC4: circle at (3,2)
RC5: circle at (3,1)
RC8: circle at (3,0)
RC9: circle at (2,1)
LB1: box invis "$a$" at (4.65,2.65)
LB2: box invis "$a$" at (3.35,2.65)
LB3: box invis "$c$" at (4.8,1.5)
LB4: box invis "$c$" at (3.2,1.5)
LB1: box invis "$f$" at (5.65,1.65)
LB2: box invis "$b$" at (2.35,1.65)
LB3: box invis "$d$" at (4.8,.5)
LB4: box invis "$e$" at (3.2,.5)
LB: box invis "$a(b+ce)+a(f+cd)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC3 to RC6 chop circlerad chop circlerad
arrow from RC2 to RC7 chop circlerad chop circlerad
arrow from RC1 to RC4 chop circlerad chop circlerad
arrow from RC4 to RC5 chop circlerad chop circlerad
arrow from RC5 to RC8 chop circlerad chop circlerad
arrow from RC4 to RC9 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Failures and ready equivalent, but not failure trace or
ready trace equivalent\hlabel{RvsFT}}}
\end{counterexample}
\paragraph{\Purple{Infinite processes}}\textBrown
As for failures semantics, three variants of failure trace semantics
for infinite processes can be defined. Besides the default version
($FT$) there is an infinitary version ($FT^\infty$), motivated by
observations that may last forever, and a finite version ($FT^-$),
motivated by an observer that may only set finitely many switches on
free at any time.
\begin{definition}{infinitary failure trace}
$\sigma_1 \sigma_2 \cdots \in (Act \cup \pow(Act))^\infty$ is an
\phrase{infinite failure trace} of a process $p \in \IP$ if there are
processes $p_1 , p_2 , ...$ such that $p \goto{\sigma_1} p_1
\goto{\sigma_2} \cdots$. Let $FT^\infty (p)$ denote the set of
infinite failure traces of $p$. Two processes
$p$ and $q$ are \phrase{infinitary failure trace equivalent}, notation
$p =_{FT}^\infty q$, if $FT^\infty (p)=FT^\infty (q)$ and $FT (p)=FT (q)$.
They are \phrase{finite-failure trace equivalent}, notation $p
=_{FT}^- q$, if $FT^-(p)=FT^-(q)$, where $FT^-(p)$ denotes the set of
failure traces of $p$ in which all refusal sets are finite.
\end{definition}
Clearly, $FT^- \prec FT \prec FT^\infty$; Counterexamples
\href{infinitary}{\ref{infinitary}} and \href{finite}{\ref{finite}}
show that the inclusions and strict.
One also has $F^- \prec FT^-$, $F \prec FT$ and $F^\infty \prec
FT^\infty$; here strictness follows from \ctr{RvsFT}.
\begin{proposition}{image finite failure trace}
Let $p$ en $q$ be image finite processes. Then $p =_{FT}^- q
\Leftrightarrow p =_{FT} q \Leftrightarrow p =_{FT}^\infty q$.
\\\pf ``$p =_{FT}^- q \Leftarrow p =_{FT} q \Leftarrow p =_{FT}^\infty
q$'' holds for all processes.\\
Note that the definition of $FT(p)$ is exactly like the definition of
$T(p)$, except that the failure trace relations are used instead of
the generalized action relations; the same relation exists between
$FT^\infty(p)$ and $T^\infty(p)$. Moreover, a process $p \in \IP$ is
image finite in terms of the failure trace relations on $\IP$ iff it
is image finite in terms of terms of the (generalized) action
relations on $\IP$, as defined in \df{action relations}. Hence
``$p =_{FT} q \Rightarrow p =_{FT}^\infty q$'' follows immediately from
\pr{koenig}.\\
``$p =_{FT}^- q \Rightarrow p =_{FT} q$'':
Suppose $FT(p) \neq FT(q)$, say $FT(p)-FT(q) \neq \emptyset$. Let
$\sigma$ be a failure trace in $FT(p)-FT(q)$ with at least one
infinite refusal set. I will show that there must be a failure trace
in $FT(p)-FT(q)$ with strictly fewer infinite refusal sets than
$\sigma$. By applying this result a finite number of times, a failure
trace $\rho \in FT(p)-FT(q)$ is found without infinite refusal sets,
showing that $FT^-(p) \neq FT^-(q)$.
So let $\sigma = \sigma_1 X \sigma_2 \in FT(p)-FT(q)$ with $X$ an
infinite refusal set. Clearly $\sigma_1 \sigma_2 \in FT(p)$.
By the image finiteness of $q$ there are only finitely many pairs of
processes $r_i, s_i$ with $q \goto{\sigma_1} r_i \goto{\sigma_2} s_i$,
and for each of them there is an action $a_i \in I(r_i ) \cap X$ (as
otherwise $\sigma_1 X \sigma_2$ would be a failure trace of $q$). Let
$Y$ be the set of all those $a_i$'s. Then $Y$ is finite. As $Y$ is a
subset of $X$, one has $\sigma_1 Y \sigma_2 \in FT (p)$. On the other
hand, $a_i \in I(r_i ) \cap Y$ for all $r_i$, so $\sigma_1 Y \sigma_2
\not\in FT (q)$.
\end{proposition}
Unlike the situation for failures semantics, in the first statement of
\pr{image finite failure trace} it is not necessary that {\em both}
processes are image finite.
\begin{proposition}{finitely branching failure trace}
Let $p,q \in \IP$ and $p$ is image finite. Then $p =_{FT}^- q
\Leftrightarrow p =_{FT} q$.
\\\pf More difficult, and omitted here.
\end{proposition}
The second statement of \pr{image finite failure trace} does not allow
such a strengthening, as will follow from \ctr{infinitary RS}.
\textBlack
\section{{Ready trace semantics}}\hlabel{ready trace}
\paragraph{\Purple{Testing scenario}}
The \phrase{ready trace machine} is a variant of the failure trace machine
that is equipped with a lamp for each action $a \in Act$.
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 1.5; boxht = 1.5
circlerad = 0.25
define switch
X
circle rad 0.1 at $1,$2
\textRed
circle invis "\raisebox{-3.5pt}[0pt][0pt]{\Huge $\cdot$}" at (last circle.w.x -.15,last circle.c.y +.15)
line from 2nd last circle.w to last circle.c
\textBlack
X
define lamp
X
\textYellow
circle rad 0.05 thickness 3 at $1,$2
circle invis rad 0.2 at $1,$2
circle invis rad 0.3 at $1,$2
line from 2nd last circle.n to last circle.n
line from 2nd last circle.ne to last circle.ne
line from 2nd last circle.e to last circle.e
line from 2nd last circle.se to last circle.se
line from 2nd last circle.s to last circle.s
line from 2nd last circle.sw to last circle.sw
line from 2nd last circle.w to last circle.w
line from 2nd last circle.nw to last circle.nw
\textBlack
X
box invis at (-1,0)
box wid 9.6 height 4 at (4.8,2) fill .3
circle invis "$a$" at (1,1)
switch(1,2)
lamp(.6,3)
circle invis "$b$" at (2,1)
switch(2,2)
lamp(1.6,3)
circle invis "$\cdots$" at (2.6,2)
circle invis "$z$" at (4,1)
switch(4,2)
lamp(3.6,3)
box "\Blue{\LARGE $b$}" at (6,2) fill 0
.PE
\centerline{\box\graph}\vspace{-1em}
\caption{The ready trace machine}
\end{figure}
Each time the process idles, the lamps of all actions the process is
ready to engage in are lit. Of course all these actions are blocked
by the observer, otherwise the process wouldn't idle. Now the observer
can see which actions could be released in order to let the process
proceed. During the execution of an action no lamps are lit.
An observation now consists of a sequence of members and subsets of
$Act$, the actions representing information obtained from the display,
and the sets of actions representing information obtained from the
lights. Such a sequence is called a \phrase{ready trace} of the process,
and the subsets occurring in a ready trace are referred to as
{\em menus}\hindex{menu}.
The information about the free and blocked actions is now redundant.
The set of all ready traces of a process is called its \phrase{ready trace
set}, and constitutes its observable behaviour.
\paragraph{\Purple{Definition \ref{ready trace}}}
%\begin{definition}{ready trace relations}\mbox{}
\begin{itemize}
\item
The \phrase{ready trace relations} $\gort{\sigma}$ for $\sigma \in
(Act \cup \pow (Act))^\ast$ are defined recursively by:
\begin{enumerate}
\item
$p \gort{\epsilon} p$, for any process $p$.
\item
$p \goto{a} q$ implies $p \gort{a} q$.
\item
$p \gort{X} q$ with $X \subseteq Act$ whenever $p=q$ and $I(p)=X$.
\item
$p \gort{\sigma} q \gort{\rho} r$ implies $p \gort{\sigma\rho} r$.
\end{enumerate}
The special arrow $\gort{\sigma}$ had to be used, since further overloading of
$\goto{\sigma}$ would cause confusion with the failure trace relations.
\item
$\sigma \in (Act \cup \pow (Act))^\ast$ is a \phrase{ready trace} of a
process $p$ if there is a process $q$ such that $p \gort{\sigma} q$.
Let $RT(p)$ denote the set of ready traces of $p$. Two processes $p$
and $q$ are \phrase{ready trace equivalent}, notation $p =_{RT} q$, if
$RT(p)=RT(q)$. In \phrase{ready trace semantics} ($RT$) two processes are
identified iff they are ready trace equivalent.
\end{itemize}
%\end{definition}
In {\sc Baeten, Bergstra \& Klop} \hcite{BBK87b}, {\sc Pnueli} \hcite{Pn85}
and {\sc Pomello} \hcite{Pm86} ready trace semantics was defined
slightly differently. By \pr{barbed} below, their definition yields
the same equivalence as mine.
\begin{figure}[htb]\vspace{-2ex}
.PS
scale = 2.54
circlerad = 0.05
arrowhead = 7
circle at (0,0)
arrow from last circle to last circle +(.5,.7) chop circlerad
arrow from last circle to last circle +(.7,.4) chop circlerad
arrow from last circle to last circle +(.7,-.2) chop circlerad
arrow from last circle to last circle +(.6,-.5) chop circlerad
circle at (2,.3)
arrow from last circle to last circle +(.6,.5) chop circlerad
arrow from last circle to last circle +(.7,.1) chop circlerad
arrow from last circle to last circle +(.6,-.5) chop circlerad
arrow "$a$" above from 2nd last circle.c to last circle.c chop circlerad chop circlerad
circle at (4,-.2)
arrow from last circle to last circle +(.5,.7) chop circlerad
arrow from last circle to last circle +(.7,.4) chop circlerad
arrow from last circle to last circle +(.7,-.2) chop circlerad
arrow from last circle to last circle +(.6,-.5) chop circlerad
arrow "$b$" above from 2nd last circle.c to last circle.c chop circlerad chop circlerad
circle at (6,.1)
arrow from last circle to last circle +(.2,.7) chop circlerad
arrow from last circle to last circle +(.7,.4) chop circlerad
arrow from last circle to last circle +(.7,-.4) chop circlerad
arrow from last circle to last circle +(.3,-.6) chop circlerad
arrow "$a$" above from 2nd last circle.c to last circle.c chop circlerad chop circlerad
circle at (8,0)
arrow "$c$" above from 2nd last circle.c to last circle.c chop circlerad chop circlerad
arrow from last circle to last circle +(.3,.6) chop circlerad
arrow from last circle to last circle +(.7,.4) chop circlerad
arrow from last circle to last circle +(.8,0) chop circlerad
arrow from last circle to last circle +(.7,-.4) chop circlerad
arrow from last circle to last circle +(.2,-.6) chop circlerad
.PE
\centerline{\raise 1em\box\graph}\vspace{-2ex}
\end{figure}
\begin{definition}{ready trace} $X_0 a_1 X_1 a_2 \cdots a_n X_n \in
\pow (Act) \times (Act \times \pow (Act))^\ast$ is a \phrase{normal ready
trace} of a process $p$ if there are processes $p_1 , ... ,
p_n$ such that $p \goto{a_1} p_1 \goto{a_2} \cdots \goto{a_n} p_n$ and
$I(p_i )=X_i$ for $i= 1, ... ,n$.
Let $RT_N (p)$ denote the set of normal ready traces of $p$.
Two processes $p$ and $q$ are ready trace equivalent in the sense of
\href{BBK87b}{\cite{BBK87b,Pn85,Pm86}} if $RT_N (p) = RT_N (q)$.
\end{definition}
\begin{proposition}{barbed}
Let $p,q \in \IP$. Then $RT_N (p) = RT_N (q)
~ \Leftrightarrow ~ RT(p)=RT(q)$.
\\
\pf The normal ready traces of a process are just the ready traces
which are an alternating sequence of sets and actions, and vice versa
the set of all ready traces can be constructed form the set of normal
ready traces by means of doubling and leaving out menus.
\end{proposition}
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal ready trace}
The set $\fL_{RT}$ of \phrase{ready trace formulas}
over $Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{RT}$.
\item
If $\phi \in \fL_{RT}$ and $X \subseteq Act$ then
$X\phi \in \fL_{RT}$.
\item
If $\phi \in \fL_{RT}$ and $a \in Act$ then $a \phi \in \fL_{RT}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{RT}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models X\phi$ if $I(p) = X$ and $p \models \phi$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
$X\phi$ represents the observation of a menu, followed by the
observation $\phi$. A ready trace formula satisfied by a process
$p$ represents exactly a ready trace in \href{ready trace}{Definition
\ref{ready trace}}. Hence one has
\begin{proposition}{modal ready trace}
$p =_{RT} q ~\Leftrightarrow~ \forall \phi \in \fL_{RT} ( p
\models \phi \Leftrightarrow q \models \phi )$.
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $\pi : ~ s_0 \goto{a_1} s_1
\goto{a_2} \cdots \goto{a_n} s_n \in \pd(g)$. The \phrase{ready
trace} of $\pi$ is given by $RT_N(\pi) := I(s_0 ) a_1 I(s_1 ) a_2
\cdots a_n I(s_n )$.\\ $RT_N(g)$ can now be characterized by:
\begin{proposition}{ready trace paths}
$RT_N(g) = \{RT_N(\pi) \mid \pi \in \pd(g)\}$.
\end{proposition}
Moreover, $RT(g)$ is the smallest subset of $(Act \cup \pow
(Act))^\ast$ containing $RT_N(g)$ and satisfying $$\sigma X \rho \in RT
(g) \Rightarrow \sigma \rho \in RT (g) \wedge \sigma XX \rho \in RT
(g).$$
\paragraph{\Purple{Classification}}
%\begin{proposition}{RvsFTT}
$FT \prec RT$.
\\
\pf For ``$FT \preceq RT$'' it suffices to show that
$FT(p)$ can be expressed in terms of $RT(p)$:
\begin{quote}
$\sigma = \sigma_1 \sigma_2 \cdots \sigma_n \in FT(p) ~( \sigma_i \in Act
\cup \pow (Act)) ~ \Leftrightarrow ~\\ \exists \rho = \rho_1 \rho_2
\cdots \rho_n \in RT(p) ~( \rho_i \in Act \cup \pow (Act))$ such that
for $i=1,...,n$ either\\ $\sigma_i = \rho_i \in Act$ or $\sigma_i ,
\rho_i \subseteq Act$ and $\sigma_i \cap \rho_i = \emptyset$.
\end{quote}
``$FT \not\succeq RT$'' follows from \ctr{FTvsR}; see
\sect{readiness} for details.
\hfill $\Box$%
%\end{proposition}
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-5,1)
LC3: circle at (-5,0)
LC4: circle at (-3,1)
LC5: circle at (-3,0)
LB1: box invis "$a$" at (-4.65,1.65)
LB2: box invis "$a$" at (-3.35,1.65)
LB3: box invis "$b$" at (-5.2,.5)
LB4: box invis "$c$" at (-2.8,.5)
circle invis "$ab+ac$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
B3: box invis "$=_F~$" at (0,1.5)
B3: box invis "$\neq_R~$" at (0,1)
B3: box invis "$=_{FT}$" at (0,.5)
B3: box invis "$\neq_{RT}$" at (0,0)
RC1: circle at (4,2)
RC2: circle at (4,1)
RC3: circle at (3,0)
RC4: circle at (5,0)
RC5: circle at (3,1)
RC6: circle at (5,1)
RB1: box invis "$a$" at (3.85,1.5)
RB2: box invis "$b$" at (3.4,.6)
RB3: box invis "$c$" at (4.6,.6)
RB4: box invis "$a$" at (3.35,1.65)
RB5: box invis "$a$" at (4.65,1.65)
RB7: box invis "$b$" at (2.8,.5)
RB8: box invis "$c$" at (5.2,.5)
circle invis "$ab+a(b+c)+ac$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC1 to RC5 chop circlerad chop circlerad
arrow from RC1 to RC6 chop circlerad chop circlerad
arrow from RC5 to RC3 chop circlerad chop circlerad
arrow from RC6 to RC4 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Failures and failure trace equivalent, but not ready or
ready trace equivalent\hlabel{FTvsR}}
\vspace{-8pt}
\end{counterexample}
\paragraph{\Purple{Explicit model}}
In ready trace semantics a process can be represented by a ready trace
equivalence class of process graphs, or equivalently by its ready
trace set, possibly in the normal form of \df{ready trace}. The next
proposition gives an explicit characterization of the domain
$\dl{R}\IT$ of ready trace sets in this form of process graphs with
multiple roots.
\begin{definition}{explicit ready trace}
The \phrase{ready trace domain} $\dl{R}\IT$ is the set of subsets
${\rm RT}$ of $\pow(Act) \times (Act \times \pow(Act))^*$ satisfying
$$\begin{array}{ll}
\mbox{RT1} & \exists X (X \in \rm RT),\\
\mbox{RT2} & \sigma X \in {\rm RT} \wedge a \in X \Leftrightarrow
\exists Y(\sigma X a Y \in \rm RT).
\end{array}$$
\end{definition}
\begin{proposition}{explicit ready trace}
${\rm RT} \in \dl{R}\IT \Leftrightarrow \exists g \in \IG^{\it mr}:
RT_N(g)={\rm RT}$.\\
\pf ``$\Leftarrow$'' is evident.
For ``$\Rightarrow$'' let ${\rm RT} \in \dl{R}\IT$.
Define the \phrase{canonical graph} $G(\mbox{RT})$ of RT by
\begin{itemise}
\item $\nd(G(\mbox{RT}))=\rm RT$,
\item $\mbox{\sc roots}(G(\mbox{RT}))=\{X \subseteq Act \mid X \in \rm RT\}$,
\item $\ed(G(\mbox{RT}))=\{(\sigma ,a, \sigma aY) \mid
\sigma, \sigma a Y \in \nd(G(\mbox{RT}))\}$.
\end{itemise}
By RT1, $\mbox{\sc roots}(G(\mbox{RT})) \neq \emptyset$. Using R2,
$G(\mbox{RT})$ is connected. So $G(\mbox{RT}) \in \IG^{\it mr}$.
Moreover, for every path $\pi \in \pd(G(\mbox{RT}))$ one has
$RT_N(\pi)=end(\pi)$. Hence $RT_N(G(\mbox{RT}))=\rm RT$.
\end{proposition}
If $\mbox{\sc roots}(g)$ would be allowed to be empty, a
characterization is obtained by dropping requirement RT1. A
characterization of the domain of ready trace sets of process graphs
with single roots is given by strengthening RT1 to $\exists! X (X \in
\rm RT)$, where $\exists! X$ means ``there is exactly one $X$ such
that''.
\paragraph{\Purple{Infinite processes}}\textBrown
An infinitary version of ready trace semantics ($\it RT^\infty$) is
defined analogously to infinitary failure trace semantics. A finite
version is not so straightforward; a definition will be proposed in
the next section.
\begin{definition}{infinitary ready trace}
$\sigma_1 \sigma_2 \cdots \in (Act \cup \pow(Act))^\infty$ is an
\phrase{infinite ready trace} of a process $p \in \IP$ if there are
processes $p_1 , p_2 , ...$ such that $p \gort{\sigma_1} p_1
\gort{\sigma_2} \cdots$. Let $RT^\infty (p)$ denote the set of
infinite ready traces of $p$. Two processes
$p$ and $q$ are \phrase{infinitary ready trace equivalent}, notation
$p =_{RT}^\infty q$, if $RT^\infty (p)=RT^\infty (q)$ and $RT (p)=RT (q)$.
\end{definition}
Clearly, $RT \prec RT^\infty$; \ctr{infinitary} shows
that the inclusion is strict. Moreover $FT^\infty \prec RT^\infty$.
\begin{proposition}{image finite ready trace}
Let $p$ en $q$ be image finite processes. Then
$p =_{RT} q \Leftrightarrow p =_{RT}^\infty q$.
\\\pf Exactly as the corresponding part of \pr{image finite failure trace}.
\end{proposition}
\ctr{infinitary RS} will show that in \pr{image finite
ready trace} {\em both} $p$ and $q$ need to be image finite.
\textBlack
\section{{Readiness semantics and possible-futures semantics}}\hlabel{readiness}
\paragraph{\Purple{Testing scenario}}
The \phrase{readiness machine} has the same layout as the ready trace
machine, but, like the failures machine, can not recover from an idle
period. By means of the lights the menu of initial actions of the
remaining behaviour of an idle process can be recorded, but this happens
at most once during an observation of a process, namely at the end.
An observation either results in a trace of the process, or in a pair of
a trace and a menu of actions by which the observation could have been
extended if the observer wouldn't have blocked them. Such a pair is
called a \phrase{ready pair} of the process, and the set of all ready pairs
of a process is its \phrase{ready set}.
\paragraph{\Purple{Definition \ref{readiness}}}
$\rec{ \sigma , X} \in Act^\ast \times \pow
(Act)$ is a \phrase{ready pair} of a process $p$ if there is a
process $q$ such that $p \goto{\sigma} q$ and $I(q) = X$. Let $R(p)$
denote the set of ready pairs of $p$. Two processes $p$ and $q$ are
\phrase{ready equivalent}, notation $p =_R q$, if $R(p)=R(q)$. In
\phrase{readiness semantics} ($R$) two processes are identified iff they are
ready equivalent.
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal readiness}
The set $\fL_{R}$ of \phrase{readiness formulas}
over $Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{R}$.
\item
$X \in \fL_{R}$ for $X \subseteq Act$.
\item
If $\phi \in \fL_{R}$ and $a \in Act$ then $a \phi \in \fL_{R}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{R}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models X$ if $I(p) = X$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
\end{definition}
$X$ represents the observation of a menu.
A readiness formula satisfied by a process $p$ represents either a
trace (if it has the form $a_1 a_2 \cdots a_n \top$) or a ready pair
(if it has the form $a_1 a_2 \cdots a_n X$). Hence one has
\begin{proposition}{modal readiness}
$p =_{R} q ~\Leftrightarrow~ \forall \phi \in \fL_{R} ( p
\models \phi \Leftrightarrow q \models \phi )$.
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Let $g \in \IG^{\it mr}$ and $\pi \in \pd(g)$. The \phrase{ready pair} of $\pi$
is given by $R(\pi) := \rec{T(\pi),I(end(\pi))}$. $R(g)$ can now be
characterized by:
\begin{proposition}{readiness paths}
$R(g) = \{R(\pi) \mid \pi \in \pd(g)\}$.
\end{proposition}
\paragraph{\Purple{Classification}}
%\begin{proposition}{FvsRT}
$F \prec R \prec RT$, but $R$ and $FT$ are independent.
\\
\pf For ``$F \preceq R$'' it suffices to show that $F(p)$ can be
expressed in terms of $R(p)$:
$$
\rec{ \sigma ,X} \in F(p) ~~ \Leftrightarrow ~~ \exists Y \subseteq
Act: \rec{ \sigma , Y} \in R(p) ~\wedge~ X \cap Y = \emptyset .
$$
For ``$R \preceq RT$'' it suffices to show that $R(p)$ can be
expressed in terms of $RT(p)$:
$$
\rec{ \sigma ,X} \in R(p)~~ \Leftrightarrow ~~ \sigma X \in RT(p).
$$
``$R \not\succeq FT$'' (and hence ``$R \not\succeq RT$'' and ``$F
\not\succeq FT$'') follows from \ctr{RvsFT}, in which
$R({\it left})=R({\it right})$ but $FT({\it left}) \neq FT({\it right})$. The first
statement follows with \pr{readiness paths}. Both graphs have 9 paths
starting from the root, and hence 9 ready pairs. These are easily seen
to be the same at both sides; in the second graph only 4 ready pairs swapped
places. The second statement follows since $a\{b\}ce \in FT({\it left})-FT({\it right})$.
``$R \not\preceq FT$'' (and hence ``$R \not\preceq F$'' and ``$RT
\not\preceq FT$'') follows from \ctr{FTvsR}, in which
$FT({\it left})=FT({\it right})$ but $R({\it left}) \neq R({\it right})$. The first
statement follows from \cor{augmentation}, since the new maximal paths at
the right-hand side are both failure trace augmented by the two
maximal paths both sides have in common. The second one follows since
$\rec{a,\{b,c\}} \in R({\it right})-R({\it left})$. \hfill $\Box$
%\end{proposition}
\paragraph{\Purple{Explicit model}}
In readiness semantics a process can be represented by a ready
equivalence class of process graphs, or equivalently by its ready set.
The next proposition gives an explicit characterization of the
domain $\dl{R}$ of ready sets of process graphs with multiple roots.
\begin{definition}{explicit readiness}
The \phrase{readiness domain} $\dl{R}$ is the set of subsets ${\rm R}$ of
$Act^\ast \times \pow(Act)$ satisfying
\begin{center}
\begin{tabular}{ll}
R1 & $\exists X (\rec{\epsilon, X} \in \rm R)$,\\
R2 & $\exists X (\rec{\sigma, X \cup \{a\}} \in \rm R) \Leftrightarrow
\exists Y(\rec{\sigma a,Y}\in \rm R)$.
\end{tabular}
\end{center}
\end{definition}
\begin{proposition}{explicit readiness}
${\rm R} \in \dl{R} \Leftrightarrow \exists g \in \IG^{\it mr}:
R(g)={\rm R}$.\\
\pf ``$\Leftarrow$'' is evident.
For ``$\Rightarrow$'' let ${\rm R} \in \dl{R}$.
Define the \phrase{canonical graph} $G(\mbox{R})$ of R by
\begin{itemise}
\item $\nd(G(\mbox{R}))=\rm R$,
\item $\mbox{\sc
roots}(G(\mbox{R}))=\{\rec{\epsilon,X}\mid\rec{\epsilon,X}\in \rm R\}$,
\item $\ed(G(\mbox{R}))=\{(\rec{\sigma,X},a,\rec{\sigma a,Y}) \mid
\rec{\sigma,X},\rec{\sigma a,Y} \in \nd(G(\mbox{R})) \wedge a \in X\}$.
\end{itemise}
By R1, $\mbox{\sc roots}(G(\mbox{R})) \neq \emptyset$. Using R2,
$G(\mbox{R})$ is connected. Hence $G(\mbox{R}) \in \IG^{\it mr}$.
Moreover, for every path $\pi \in \pd(G(\mbox{R}))$ one has
$R(\pi)=end(\pi)$. From this it follows that $R(G(\mbox{R}))=\rm R$.
\end{proposition}
If $\mbox{\sc roots}(g)$ would be allowed to be empty, a characterization
is obtained by dropping requirement R1.
A characterization of the domain of ready sets of process graphs
with single roots is given by strengthening R1 to $\exists! X
(\rec{\epsilon, X} \in \rm R)$, where $\exists! X$ means ``there is
exactly one $X$ such that''.
\paragraph{\Purple{Possible-futures and acceptance-refusal
semantics}\hname{possible-futures}}
Readiness semantics was proposed by {\sc Olderog \& Hoare} \hcite{OH86}.
Two preliminary versions stem from {\sc Rounds \& Brookes}
\hcite{RB81}: in \phrase{possible-futures semantics} ($\it PF$) the
menu consists of the entire trace set of the remaining behaviour of an
idle process, instead of only the set of its initial actions; in
\phrase{acceptance-refusal semantics} a menu may be any finite subset
of initial actions, while also the finite refusal sets of
\sect{failures} are observable.
\begin{definition}{possible futures} $\rec{ \sigma , X} \in Act^\ast
\times \pow (Act^\ast )$ is a \phrase{possible future} of a process
$p$ if there is a process $q$ such that $p \goto{\sigma} q$ and
$T(q) = X$. Let ${ \it PF}(p)$ denote the set of possible futures of $p$.
Two processes $p$ and $q$ are \phrase{possible-futures equivalent},
notation $p =_{\it PF} q$, if ${\it PF}(p)={\it PF}(q)$.
\end{definition}
The modal and process graph characterizations of possible-future
semantics are straightforward, but a plausible testing scenario has
not been proposed. Trivially $R \preceq {\it PF}$. That the reverse
does not hold, and even that ${\it PF} \not\preceq RT$, will follow
from \ctr{RSvs2S}. \ctr{PFvsFT} shows
that $FT \not\preceq {\it PF}$. There ${\it PF}({\it left}) = {\it
PF}({\it right})$ but $FT({\it left}) \neq FT({\it right})$. As for
the first statement, both graphs have 18 paths starting from the root,
and hence 18 possible futures. These are easily seen to be the same at
both sides; in the second graph only 2 possible futures swapped
places. The second statement follows since $a\{b\}a\{b\}cd \in
FT({\it left})-FT({\it right})$. Thus possible-future semantics is
incomparable with failure trace and ready trace semantics.
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
L1: circle at (-4.5,4)
LC1: circle at L1 +(-1,-1)
L2: circle at LC1 +(-1.5,-.5)
LC2: circle at LC1 +(-.5,-1)
LC3: circle at last circle +(0,-1)
LC6: circle at last circle +(0,-1)
LC7: circle at LC2 +(-1,-1)
LC4: circle at LC1 +(.5,-1)
LC5: circle at last circle +(0,-1)
LC8: circle at last circle +(0,-1)
"$a$" at L1+(-.65,-.35)
"$b$" at LC1+(-.75,-.1)
"$a$" at LC1+(-.4,-.4)
"$a$" at LC1+(.4,-.4)
"$c$" at LC2+(.2,-.5)
"$c$" at LC4+(-.2,-.5)
"$b$" at LC2+(-.65,-.35)
"$d$" at LC2+(.2,-1.5)
"$e$" at LC4+(-.2,-1.5)
arrow from L1 to LC1 chop
arrow from LC1 to L2 chop
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC2 to LC7 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC8 chop circlerad chop circlerad
LC1: circle at L1 +(1,-1)
LC2: circle at LC1 +(-.5,-1)
LC3: circle at last circle +(0,-1)
LC6: circle at last circle +(0,-1)
LC4: circle at LC1 +(.5,-1)
LC5: circle at last circle +(0,-1)
LC8: circle at last circle +(0,-1)
LC9: circle at LC4 +(1,-1)
"$a$" at L1+(.65,-.35)
"$a$" at LC1+(-.4,-.4)
"$a$" at LC1+(.4,-.4)
"$c$" at LC2+(.2,-.5)
"$c$" at LC4+(-.2,-.5)
"$b$" at LC4+(.65,-.35)
"$d$" at LC2+(.2,-1.5)
"$e$" at LC4+(-.2,-1.5)
"$a(b+a(b+cd)+ace) + a(acd+a(ce+b))$" at L1+(0,-5)
arrow from L1 to LC1 chop
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC8 chop circlerad chop circlerad
arrow from LC4 to LC9 chop circlerad chop circlerad
B3: box invis "$=_{\it PF}$" at (0,3)
B3: box invis "$\neq_{\it FT}$" at (0,2)
B3: box invis "$\neq_{S}~$" at (0,1)
L1: circle at (4.5,4)
LC1: circle at L1 +(-1,-1)
LC2: circle at LC1 +(-.5,-1)
LC3: circle at last circle +(0,-1)
LC6: circle at last circle +(0,-1)
LC7: circle at LC2 +(-1,-1)
LC4: circle at LC1 +(.5,-1)
LC5: circle at last circle +(0,-1)
LC8: circle at last circle +(0,-1)
"$a$" at L1+(-.65,-.35)
"$a$" at LC1+(-.4,-.4)
"$a$" at LC1+(.4,-.4)
"$c$" at LC2+(.2,-.5)
"$c$" at LC4+(-.2,-.5)
"$b$" at LC2+(-.65,-.35)
"$d$" at LC2+(.2,-1.5)
"$e$" at LC4+(-.2,-1.5)
"$a(a(b+cd)+ace) + a(acd+a(ce+b)+b)$" at L1+(0,-5)
arrow from L1 to LC1 chop
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC2 to LC7 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC8 chop circlerad chop circlerad
LC1: circle at L1 +(1,-1)
L2: circle at LC1 +(1.5,-.5)
LC2: circle at LC1 +(-.5,-1)
LC3: circle at last circle +(0,-1)
LC6: circle at last circle +(0,-1)
LC4: circle at LC1 +(.5,-1)
LC5: circle at last circle +(0,-1)
LC8: circle at last circle +(0,-1)
LC9: circle at LC4 +(1,-1)
"$a$" at L1+(.65,-.35)
"$b$" at LC1+(.75,-.1)
"$a$" at LC1+(-.4,-.4)
"$a$" at LC1+(.4,-.4)
"$c$" at LC2+(.2,-.5)
"$c$" at LC4+(-.2,-.5)
"$b$" at LC4+(.65,-.35)
"$d$" at LC2+(.2,-1.5)
"$e$" at LC4+(-.2,-1.5)
arrow from L1 to LC1 chop
arrow from LC1 to L2 chop
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC8 chop circlerad chop circlerad
arrow from LC4 to LC9 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Possible-futures equivalent, but not failure trace or
simulation equivalent\hlabel{PFvsFT}}}
\end{counterexample}
\begin{definition}{acceptance-refusal} $\rec{ \sigma , X, Y} \in Act^\ast
\times \pow (Act) \times \pow (Act)$ is an \phrase{acceptance-refusal
triple} of a process $p$ if $X$ and $Y$ are finite and there is a
process $q$ such that $p \goto{\sigma} q$, $X \subseteq I(q)$ and $Y
\cap I(q) = \emptyset$. Let $AR(p)$ denote the set of
acceptance-refusal triples of $p$. Two processes $p$ and $q$ are
\phrase{acceptance-refusal equivalent}, notation $p =_{\it AR} q$, if
$AR(p)=AR(q)$.
\end{definition}
The modal and process graph characterizations are again straightforward.
A motivating testing scenario would be the same as for readiness
semantics, except that at any time only finitely many switches can be
set on free, and only finitely many lamps can be investigated in a
finite amount of time. Clearly $p =_R q \Rightarrow p =_{\it AR} q$, for
$$AR(p) = \{\rec{\sigma,X,Y} \mid \exists \rec{\sigma,Z} \in R(p) \mid X,Y
\mbox{ finite } \wedge X \subseteq Z \wedge Y \cap Z = \emptyset\}.$$
That this implication is strict follows from \ctr{finite}.
It is not difficult to see that for finitely branching processes
acceptance-refusal equivalence coincides with ready equivalence:
$\rec{ \sigma , X}$ is a ready pair of a process $p$ iff $p$ has an
acceptance-refusal triple $\rec{ \sigma , X, Y}$ with $X \cup Y =
Cont_p (\sigma )$ (cf.\ \df{cont}).
\paragraph{\Purple{Infinite processes}}\textBrown
Note that if in \df{acceptance-refusal} the sets $X$ and $Y$ are
allowed to be infinite the resulting equivalence would be ready
equivalence again.
Namely $\rec{ \sigma , X}$ is a ready pair of a process $p$ iff $p$
has such an acceptance-refusal triple $\rec{ \sigma , X, Act-Y}$.
Thus acceptance-refusal semantics can be regarded as the finite
variant of readiness semantics, and will therefore be denoted $R^-$.
The infinitary variant of readiness semantics ($R^\infty$), motivated
by observations that may last forever, is defined analogously to $F^\infty$:
\begin{definition}{infinitary ready}
$p$ and $q$ are \phrase{infinitary ready equivalent} if
$R(p)=R(q)$ and $T^\infty (p)=T^\infty (q)$.
\end{definition}
Clearly, $R \prec R^\infty$; by \ctr{infinitary}
the inclusion is strict. Moreover, $F^\infty \prec R^\infty \prec RT^\infty$.
\begin{proposition}{image finite ready}
Let $p$ en $q$ be image finite processes. Then $p =_R q
\Leftrightarrow p =_R^\infty q$.
\\\pf ``$\Leftarrow$'' has been established for all processes, and
the second ``$\Rightarrow$'' follows immediately from \pr{koenig}
(as $p =_R q \Rightarrow p =_T q \Rightarrow p =_T^\infty q$).
\end{proposition}
\begin{proposition}{image finite readiness}
Let $p,q \in \IP$ and $p$ is image finite. Then $p =_{\it AR} q
\Leftrightarrow p =_R q$.
\\\pf ``$\Leftarrow$'' holds for all process.
I will prove ``$\Rightarrow$'' assuming that $p$ has the property that
for any $\sigma \in Act^*$ there are only finitely many ready pairs
$\rec{\sigma,X} \in R(p)$. This property (call it {\em RIF\/}) is clearly
implied by image finiteness. So suppose $p$ has the RIF property
and $AR(p)=AR(q)$. I will show that $R(p)=R(q)$.
Suppose $\rec{\sigma , Y} \not\in R(p)$. By RIF there are only
finitely many ready pairs $\rec{\sigma,X_i} \in R(p)$.
For each of them choose an action $a_i \in Y-X_i$ or $b_i \in X_i-Y$.
Let $U$ be the set of all those $a_i$'s, and $V$ the set of the
$b_i$'s. Then $\rec{\sigma ,U,V} \not\in AR(p)=AR(q)$ and hence
$\rec{\sigma,Y} \not\in R(q)$.
It follows that $R(q) \subseteq R(p)$, and thus $q$ has the property
RIF as well. Now the same argument applies in the other direction,
yielding $R(p) \subseteq R(q)$.
\end{proposition}
Inspired by the definition of $R^-$, a finite version of ready trace
semantics ($RT^-$) can be defined likewise. Here I will just give its
modal characterization.
\begin{definition}{finite modal ready trace}
The set $\fL_{RT}^-$ of \phrase{finite ready trace formulas}
over $Act$ is given by:
\begin{itemise}
\item
$\top \in \fL_{RT}^-$.
\item
If $\phi \in \fL_{RT}^-$ and $X \subseteq_{\it fin} Act$ then
$X\phi \in \fL_{RT}^-$ and $\widetilde{X}\phi \in \fL_{RT}^-$.
\item
If $\phi \in \fL_{RT}^-$ and $a \in Act$ then $a \phi \in \fL_{RT}^-$.
\end{itemise}
The \phrase{satisfaction relation}
$\models \; \subseteq \IP \times \fL_{RT}^-$ is given by
the usual clauses for $\top$ and $a\phi$, and:
\begin{itemise}
\item
$p \models X\phi$ if $X \subseteq I(p)$ and $p \models \phi$.
\item
$p \models \widetilde{X}\phi$ if $I(p) \cap X=\emptyset$ and $p \models \phi$.
\end{itemise}
Processes $p$ and $q$ are \phrase{finite-ready trace equivalent},
notation $p =_{RT}^- q$, if $\forall \phi \in \fL_{RT}^-
(p \models \phi \Leftrightarrow q \models \phi )$.
\end{definition}
As these formulas are expressible in terms of the ones of \df{modal
ready trace}, one has $RT^- \prec RT$; \ctr{finite} shows
that the inclusion is strict. Also $FT^- \prec RT^-$ and
$F^- \prec R^- \prec RT^-$.
\begin{proposition}{image finite RT}
Let $p,q \in \IP$ and $p$ is image finite. Then $p =_{RT}^- q
\Leftrightarrow p =_{RT} q$.
\\\pf ``$\Leftarrow$'' holds for all process.
``$\Rightarrow$'' follows just as in \pr{image finite readiness},
using the property that
for any $a_1a_2\cdots a_n \in Act^\omega$ there are only finitely many
normal ready traces $X_0 a_1 X_1 a_2 \cdots a_n X_n \in RT_N(p)$.
\end{proposition}
Unlike the semantics $T$ to $RT$, possible-futures semantics
distinguishes between the two processes of \ctr{infinitary}:
$\rec{a,a^*} \in \it PF({right})-PF({left})$.
Still, $T^\infty \not\prec \it PF$, as can be seen from the variant of
\ctr{infinitary} in which the left-hand process is
appended to the endnodes of both processes. The so obtained systems
have the same possible futures, including $\{\rec{a^n,a^*} \mid n \in
\IN\}$, but only the right-hand side has an infinite trace.
For the sake of completeness I include a definition of infinitary
possible-futures semantics ($\it PF^\infty$), such that $\it PF \prec
PF^\infty$ and $R^\infty \prec \it PF^\infty$. A finite variant of $\it PF$
has not been explored.
\begin{definition}{infinitary possible futures} $\rec{ \sigma , X} \in
Act^\ast \times \pow (Act^\ast )$ is an \phrase{infinitary possible
future} of a process $p$ if there is a process $q$ such that $p
\goto{\sigma} q$ and $T(q) \cup T^\infty(q) = X$. Let ${ \it
PF^\infty}(p)$ denote the set of infinitary possible futures of $p$.
Two processes $p$ and $q$ are \phrase{infinitary possible-futures
equivalent}, notation $p =_{\it PF}^\infty q$, if ${\it
PF^\infty}(p)={\it PF^\infty}(q)$.
\textBlack
\end{definition}
\section{{Simulation semantics}}\hlabel{simulation}
The following concept of \phrase{simulation} occurs frequently in the
literature (see e.g.\ {\sc Park} \hcite{Pa81}).
\paragraph{\Purple{Definition \ref{simulation}}}
A \phrase{simulation} is a binary relation $R$ on processes,
satisfying, for $a \in Act$:
\begin{itemise}
\item
if $pRq$ and $p \goto{a} p '$, then $\exists q ' : ~q \goto{a} q '$
and $p ' Rq '$.
\end{itemise}
Process $p$ {\em can be simulated by} $q$, notation $ p \sii q$, if
there is a simulation $R$ with $pRq$.
\\
$ p $ and $q$ are \phrase{similar}, notation $p \si q$,
if $ p \sii q $ and $ q \sii p $.
\begin{propositioni}{simulation equivalence}
Similarity is an equivalence relation on the domain of processes.
\\
\pf Symmetry is immediate, so it has to be checked that $p \sii p$,
and $p \sii q ~\wedge~ q \sii r~ \Rightarrow ~ p \sii r$.
\begin{itemise}
\item
The identity relation is a simulation with $pRp$.
\item
If $R$ is a simulation with $pRq$ and $S$ is a simulation with $qSr$,
then the relation $R \concatenate S$, defined by $x(R \concatenate
S)z$ iff $\exists y: xRy \wedge ySz$, is a simulation with $p(R
\concatenate S)r$. \hfill $\Box$
\end{itemise}
\end{propositioni}
Hence the relation will be called \phrase{simulation equivalence}.
In \phrase{simulation semantics} ($S$) two processes are
identified iff they are simulation equivalent.
\paragraph{\Purple{Testing scenario and modal characterization}}
The testing scenario for simulation semantics resembles that for trace
semantics, but in addition the observer is, at any time during a run
of the investigated process, capable of making arbitrary many copies
of the process in its present state and observe them independently.
Thus an observation yields a tree rather than a sequence of actions.
Such a tree can be coded as an expression in a simple modal language.
\begin{definition}{modal simulation}
The class $\fL_S$ of \phrase{simulation formulas} over
$Act$ is defined recursively by:
\begin{itemise}
\item
If $I$ is a set and $\phi_i \in \fL_S$ for $i \in I$ then $\bigwedge_{i\in
I}\phi_i \in \fL_S$.
\item
If $\phi \in \fL_S$ and $a \in Act$ then $a \phi \in \fL_S$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_S$ is defined recursively by:
\begin{itemise}
\item
$p \models \bigwedge_{i\in I}\phi_i$ if $p \models \phi_i$ for all $i \in I$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
Let $S(p)$ denote the class of simulation formulas satisfied by the process $p$:
$S(p)=\{\phi \in \fL_S \mid p \models \phi \}$.
Write $p \sqsubseteq_S q$ if $S(p) \subseteq S(q)$ and $p =_S q$ if $S(p) = S(q)$.
\end{definition}
Write $\top$ for $\bigwedge_{i \in \emptyset} \phi_i$, and $\phi_1 \wedge
\phi_2$ for $\bigwedge_{i \in \{1,2\}}\phi_i$. It turns out that
${\fL}_T$ is a sublanguage of $\fL_S$.
\begin{proposition}{modal simulation}
$p \sii q \Leftrightarrow p \sqsubseteq_S q$. Hence $p \si q
\Leftrightarrow p =_S q$.
\\\pf
For ``$\Rightarrow$'' I have to prove that for any simulation $R$ and
for all $\varphi \in \fL_S$ one has
$$pRq \Rightarrow (p \models \varphi \Rightarrow q \models \varphi).$$ I will do so with structural induction on $\varphi$.
Suppose $pRq$.
\begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item Let $p \models a\varphi$. Then there is a
$p' \in \IP$ with $p\goto{a} p'$ and $p' \models \varphi$. As
$R$ is a simulation, there must be a $q'\in \IP$ with
$q\goto{a}q'$ and $p'Rq'$. So by induction $q' \models \varphi$,
and hence $q \models a\varphi$.
\item $p \models \bigwedge_{i\in I}\varphi_i \Leftrightarrow
\forall i\!\in\!I(p \models \varphi_i)\stackrel{\rm ind.}{\Longrightarrow}
\forall i\!\in\!I(q \models \varphi_i) \Leftrightarrow q \models
\bigwedge_{i\in I}\varphi_i$.
\end{list}
\pagebreak[3]
For ``$\Leftarrow$'' it suffices to establish that $\sqsubseteq_S$ is
a simulation.\\
%\begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
%\topsep 2pt \itemsep 1pt \parsep 1pt}\item
Suppose $p \sqsubseteq_S q$ and $p \goto{a} p'$.
I have to show that $\exists q'\in \IP$ with $q\goto{a}q'$ and
$p' \sqsubseteq_S q'$. Let $Q'$ be $$\{q' \in \IP \mid
q\goto{a} q' \wedge p' \not\sqsubseteq_S q'\}.$$ By \df{LTS} $Q'$ is a set.
For every $q' \in Q'$ there is a formula $\varphi_{q'} \in S(p')- S(q')$.
Now $$a\bigwedge_{q'\in Q'} \varphi_{q'} \in S(p) \subseteq S(q),$$ so
there must be a $q'\in \IP$ with $q\goto{a}q'$ and $q'\not\in Q'$,
which had to be shown.
%\hfill $\Box$
%\end{list}
\end{proposition}
\paragraph{\Purple{Process graph characterization}}
Simulation equivalence can also be characterized by means of relations
between the nodes of two process graphs, rather than between process
graphs themselves.
\begin{definition}{simulation graph}
Let $g,h \in \IG$. A \phrase{simulation} of $g$ by $h$ is a
binary relation $R \subseteq \nd (g) \times \nd (h)$, satisfying:
\begin{itemise}
\item
$\rt (g) R \rt (h)$.
\item
If $sRt$ and $(s,a,s') \in \ed (g)$, then there is an edge
$(t,a,t') \in \ed (h)$ such that $s'Rt'$.
\end{itemise}
\end{definition}
This definition is illustrated in \fig{a simulation}. Solid
lines indicates what is assumed, dashed lines what is required. It
follows easily that $g \sii h$ iff there exists a simulation of $g$ by $h$.
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-3,5)
LC2: circle at (-3,4)
LC3: circle at (-3,3)
box invis "$a$" at (-3.2,3.5)
arrow from LC2 to LC3 chop circlerad chop circlerad
arc from (-1.5,2.5) to LC1.se radius 3
arc from LC1.sw to (-4.5,2.5) radius 3
RC1: circle at (3,5)
RC2: circle at (3,4)
RC3: circle at (3,3)
box invis "$a$" at (2.8,3.5)
arrow from RC2 to RC3 chop circlerad chop circlerad dashed
arc cw from (1.5,2.5) to RC1.sw radius 3
arc cw from RC1.se to (4.5,2.5) radius 3
line from LC1 to RC1 chop circlerad chop circlerad dashed
line from LC2 to RC2 chop circlerad chop circlerad
line from LC3 to RC3 chop circlerad chop circlerad dashed
.PE
\centerline{\raise 1em\box\graph}
\caption{A simulation\hlabel{a simulation}}
\end{figure}\\
For process graphs with multiple roots, the first requirement of
\df{simulation graph} generalizes to
\begin{itemise}
\item
$\forall s \in \mbox{\sc roots} (g) \, \exists t \in \mbox{\sc roots}(h):
s R t$.
\end{itemise}
\paragraph{\Purple{Classification}}
%\begin{proposition}{TvsS}
Simulation semantics ($S$) is finer than trace semantics ($T \prec S$),
but independent of $CT$, $F$, $R$, $FT$, $RT$ and $\it PF$.
\\
\pf ``$T \preceq S$'' follows since $\fL_T$ is a sublanguage
of $\fL_S$.\\
``$S \not\succeq CT$'' (and hence ``$S \not\succeq RT$'', ``$S
\not\succeq \it PF$'' etc.) follows from \ctr{TvsCT}.
There ${\it left} \neq_{CT} {\it right}$, although ${\it left} \si
{\it right}$; the construction of the two simulations is left to the reader.\\
``$S \not\preceq RT$'' (and hence ``$S \not\preceq T$'' etc.) follows
from \ctr{SvsRT}. There $RT({\it left}) =
RT({\it left})$, but $S({\it left}) \neq S({\it left})$. The first
statement follows from \pr{ready trace paths} and the insight that it
suffices to check the two ready traces contributed by the maximal
paths; these are the same for both graphs. The second statement
follows since $a(bc\top \wedge bd\top) \in S({\it right})-S({\it left})$.
\begin{counterexample}[ht]\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,3)
LC2: circle at (-5,2)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC4: circle at (-3,2)
LC5: circle at (-3,1)
LC7: circle at (-3,0)
LB1: box invis "$a$" at (-4.65,2.65)
LB2: box invis "$a$" at (-3.35,2.65)
LB3: box invis "$b$" at (-5.2,1.5)
LB4: box invis "$b$" at (-2.8,1.5)
LB3: box invis "$c$" at (-5.2,.5)
LB4: box invis "$d$" at (-2.8,.5)
circle invis "$abc+abd$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC7 chop circlerad chop circlerad
B3: box invis "$=_{\it PW}$" at (0,2.4)
B3: box invis "$=_{RT}$" at (0,1.5)
B3: box invis "$\neq_S~$" at (0,.6)
RC1: circle at (4,3)
RC2: circle at (4,2)
RC3: circle at (3,1)
RC5: circle at (3,0)
RC4: circle at (5,1)
RC6: circle at (5,0)
RB1: box invis "$a$" at (3.8,2.5)
RB2: box invis "$b$" at (3.35,1.65)
RB3: box invis "$b$" at (4.65,1.65)
RB4: box invis "$c$" at (2.8,.5)
RB5: box invis "$d$" at (5.2,.5)
circle invis "$a(bc+bd)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC3 to RC5 chop circlerad chop circlerad
arrow from RC4 to RC6 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Possible worlds and ready trace equivalent, but not
simulation equivalent\hlabel{SvsRT}}}
\end{counterexample}
\\
``$S \not\preceq \it PF$'' follows from \ctr{PFvsFT},
where ${\it PF}({\it left}) = {\it PF}({\it right})$ but
$S({\it left}) \neq S({\it left})$. The latter statement
follows since $a(b\top \wedge a(b\top \wedge cd\top)) \in S({\it
left})-S({\it right})$.
\hfill $\Box$
%\end{proposition}
\paragraph{\Purple{Infinite processes}}\textBrown
In order to make the testing scenario match its formalization in terms
of the modal language $\fL_S$ even for infinite processes, one
has to assume that the amount of copies one can make at any time is
infinite. Moreover, although no single copy can be tested forever, due
to its infinite branching there may be no upperbound upon the
duration of an observation.
One might consider an even more infinitary testing scenario by
allowing observations to go on forever on some or all of the copies.
However, this would not give rise to a more discriminating equivalence;
ordinary simulation equivalences already preserves infinite traces.
\begin{proposition}{infinitary simulation} If $p \sii q$ then
$T^\infty(p) \subseteq T^\infty(q)$. Hence $T^\infty \prec S$.
\\\pf Suppose $R$ is a simulation with $p_0Rq_0$ and $a_1 a_2 \cdots
\in T^\infty(p_0)$. Then there are $p_1 , p_2 , ...$ such that $p_0
\goto{a_1} p_1 \goto{a_2} \cdots$. With induction on $i \in \IN$ it
follows that there are processes $q_{i+1}$ such that \mbox{$q_i
\;-\hspace{-11pt}\stackrel{\raisebox{-2pt}[0pt][0pt]{$\scriptstyle
a_{i+1}$}}{-}\hspace{-7pt} \rightarrow q_{i+1}$} and $p_{i+1}Rq_{i+1}$.
Hence $a_1 a_2 \cdots \in T^\infty(q_0)$.
\end{proposition}
The most radical way to make the testing scenario finitary, is to
allow only finitely many copies to be made in any state of the
process. This also puts an upperbound on the duration of any
observation. Observations can now be modelled with simulation
formulas in which the index sets $I$ of the first clause of \df{modal
simulation} are always finite. The modal language containing such
simulation formulas can equivalently be defined by splitting the
construction $\bigwedge_{i \in I}$ into $\top$ and $\wedge$.
\begin{definition}{modal finitary simulation}
The set $\fL_S^\ast$ of \phrase{finitary simulation formulas} over
$Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_S^\ast$.
\item
If $\phi , \psi \in \fL_S^\ast$ then $\phi \wedge \psi \in {\fL}_S^\ast$.
\item
If $\phi \in \fL_S^\ast$ and $a \in Act$ then $a \phi \in \fL_S^\ast$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{S}^\ast$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models \phi \wedge \psi$ if $p \models \phi$ and $p \models \psi$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\end{itemise}
Let $S^*(p)$ denote the set of all finitary simulation formulas that are
satisfied by the process $p$:
$S^*(p)=\{\phi \in \fL_S^\ast \mid p \models \phi \}$.
Two processes $p$ and $q$ are \phrase{finitary simulation equivalent},
notation $p =_S^* q$, if $S^*(p)=S^*(q)$.
\end{definition}
In contrast, the equivalence $\si$ of \href{simulation}{Definition
\ref{simulation}} is then \phrase{infinitary simulation
equivalence}. Note however, that contrary to the previous equivalences
surveyed, the default version (the one meant when leaving out the
adjective ``finitary'' or ``infinitary'') is the infinitary one. In
general, I use the superscript $*$ for finitary versions and $\infty$
for infinitary versions. However, for the trace oriented equivalences
(Sections \ref{trace}--\ref{readiness}) I leave out the $*$, and for
the simulation oriented equivalences (Sections
\ref{simulation}--\ref{bisimulation}) I leave out the $\infty$.
The next proposition, and hence also the essence of \pr{modal simulation},
stems from in {\sc Hennessy \& Milner} \hcite{HM85}. It
states that for image finite processes finitary and infinitary
simulation equivalence coincide.
\begin{proposition}{modal finitary simulation}
Let $p,q \in \IP$ be image finite processes.
Then $p \si q \Leftrightarrow p =_S^* q$.
\\
\pf Exactly as the proof of \pr{modal simulation}, but for
``$\Leftarrow$'' one shows that the relation
$\sqsubseteq_S^{i.f.}$ given by $p\sqsubseteq_S^{i.f.} q$ iff $p
\sqsubseteq_S q$ and $q$ is image finite is a simulation,
using that, as $q$ is image finite, $Q'$ must be finite.
\end{proposition}
In fact, this proposition is a special case of the following one,
which is proved likewise.
\begin{proposition}{modal kappa simulation}
Let $S_\kappa(p)$ denote the set of all simulation formulas satisfied
by $p$ in which all index sets have cardinality less than $\kappa$.
Let $p,q \in \IP$ and assume $|\{q' \mid q\goto{\sigma} q'\}|<\kappa$
for each $\sigma \in Act^\ast$.
Then $p \sii q \Leftrightarrow S_\kappa(p) \subseteq S_\kappa(q)$.
\end{proposition}
Although only $q$ needs to be image finite in order to obtain
$p \sii q \Leftrightarrow p \sqsubseteq_S^* q$,
\ctr{infinitary RS} will show that {\em both} $p$ and
$q$ need to be image finite in the statement of \pr{modal finitary
simulation}.
A less radical way to finitize the testing scenario for simulation
semantics is to allow infinitely many copies to be made in any state of the
process, but put a finite upperbound on the duration of any
observation. Observations can then be modelled with simulation
formulas in which the index sets can be arbitrary, but there is a
finite upperbound on the nesting of the construction $a\phi$ of the
second clause of \df{modal simulation}.
\begin{definition}{modal omega simulation}
Let $\fL_S^\omega = \bigcup_{n=0}^\infty \fL_S^n$, where
$\fL_S^n$ is given by:
\begin{itemise}
\item
If $I$ is a set and $\phi_i \in \fL_S^n$ for $i \in I$ then
$\bigwedge_{i\in I}\phi_i \in \fL_S^n$.
\item
If $\phi \in \fL_S^n$ and $a \in Act$ then $a \phi \in \fL_S^{n+1}$.
\end{itemise}
Let $S^\omega(p)=\{\phi \in \fL_S^\omega \mid p \models \phi \}$
and write $p =_S^\omega q$ if $S^\omega(p) = S^\omega(q)$.
\end{definition}
Now $p =_S q \Rightarrow p =_S^\omega q \Rightarrow p =_S^* q$, and
for image finite processes all three equivalences coincide.\linebreak[3]
For image infinite processes both implications are strict, as
illustrated by Counterexamples~\ref{omega} and~\ref{infinitary}.%
\begin{counterexample}
\Black{\footnotesize
.PS 6.5in
scale = 3.79
boxwid = 0.05; boxht = 0.05
circlerad = 0.1
arrowhead = 7
box invis "compare the processes with" at (-10,2.2)
box invis "and without the left branch" at (-10,1.7)
LC: circle at (0,2.4)
circle at (-11,1)
box invis "$a$" at LC +(-6,-.42)
arrow from LC to last circle chop circlerad
arrow from 2nd last circle to last circle chop circlerad
circle at last circle +(-2,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$b$" at 2nd last circle +(-1.04,-.36)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at last circle +(.15,.65)
circle at 3rd last circle +(-1,-1)
arrow from 4th last circle to last circle chop circlerad
box invis "$b$" at 4th last circle +(-.58,-.42)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$2$" at last circle +(.15,.65)
circle at 5th last circle +(0,-1)
arrow from 6th last circle to last circle chop circlerad
box invis "$b$" at 6th last circle +(-.15,-.46)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$3$" at last circle +(.15,.65)
circle at 7th last circle +(1,-1)
arrow from 8th last circle to last circle chop circlerad
box invis "$b$" at 8th last circle +(.25,-.48)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$4$" at last circle +(.15,.65)
circle at 9th last circle +(2,-1)
arrow dashed from 10th last circle to last circle chop circlerad
box invis "$\cdots$" at 10th last circle +(2,-.5)
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
box invis "$=_{\it RB}^*$" at (-7.2,.5)
box invis "$\neq_S^\omega$" at (-7.2,-.5)
circle at (-5,1)
arrow from LC to last circle chop circlerad
box invis "$a$" at LC +(-2.5,-.98)
circle invis at last circle +(-2,-1)
circle invis at last circle +(0,-1)
circle at 3rd last circle +(-1,-1)
arrow from 4th last circle to last circle chop circlerad
box invis "$b$" at 4th last circle +(-.58,-.42)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$2$" at last circle +(.15,.65)
circle at 5th last circle +(0,-1)
arrow from 6th last circle to last circle chop circlerad
box invis "$b$" at 6th last circle +(-.15,-.46)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$3$" at last circle +(.15,.65)
circle at 7th last circle +(1,-1)
arrow from 8th last circle to last circle chop circlerad
box invis "$b$" at 8th last circle +(.25,-.48)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$4$" at last circle +(.15,.65)
circle at 9th last circle +(2,-1)
arrow dashed from 10th last circle to last circle chop circlerad
box invis "$\cdots$" at 10th last circle +(2,-.5)
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at (0,1)
box invis "$a$" at LC +(-.15,-.49)
arrow from LC to last circle chop circlerad
circle at last circle +(-2,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$b$" at 2nd last circle +(-1.04,-.36)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at last circle +(.15,.65)
circle invis at 3rd last circle +(-1,-1)
circle invis at last circle +(0,-1)
circle at 5th last circle +(0,-1)
arrow from 6th last circle to last circle chop circlerad
box invis "$b$" at 6th last circle +(-.15,-.46)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$3$" at last circle +(.15,.65)
circle at 7th last circle +(1,-1)
arrow from 8th last circle to last circle chop circlerad
box invis "$b$" at 8th last circle +(.25,-.48)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$4$" at last circle +(.15,.65)
circle at 9th last circle +(2,-1)
arrow dashed from 10th last circle to last circle chop circlerad
box invis "$\cdots$" at 10th last circle +(2,-.5)
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at (5,1)
arrow from LC to last circle chop circlerad
box invis "$a$" at LC +(2.5,-.98)
circle at last circle +(-2,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$b$" at 2nd last circle +(-1.04,-.36)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at last circle +(.15,.65)
circle at 3rd last circle +(-1,-1)
arrow from 4th last circle to last circle chop circlerad
box invis "$b$" at 4th last circle +(-.58,-.42)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$2$" at last circle +(.15,.65)
circle invis at 5th last circle +(0,-1)
circle invis at last circle +(0,-1)
circle at 7th last circle +(1,-1)
arrow from 8th last circle to last circle chop circlerad
box invis "$b$" at 8th last circle +(.25,-.48)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop circlerad
box invis "$4$" at last circle +(.15,.65)
circle at 9th last circle +(2,-1)
arrow dashed from 10th last circle to last circle chop circlerad
box invis "$\cdots$" at 10th last circle +(2,-.5)
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at (10,1)
arrow dashed from LC to last circle chop circlerad
circle at last circle +(-2,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at 3rd last circle +(-1,-1)
arrow dashed from 4th last circle to last circle chop circlerad
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at 5th last circle +(0,-1)
arrow dashed from 6th last circle to last circle chop circlerad
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop circlerad
circle invis at 7th last circle +(1,-1)
circle invis at last circle +(0,-1)
circle at 9th last circle +(2,-1)
arrow dotted from 10th last circle to last circle chop circlerad
box invis "$\cdots$" at 10th last circle +(2,-.5)
box invis ". . . \ \ \ " at 10th last circle +(2.5,.5)
circle at last circle +(0,-1)
arrow dotted from 2nd last circle to last circle chop circlerad
.PE
\centerline{\raise 1em\box\graph}}
\caption{Finitary equivalent, but not $S^\omega$-equivalent\hlabel{omega}}
\end{counterexample}
In \ctr{omega} $S^*({\it with}) = S^*({\it without})$, yet
$a\bigwedge_{i=1}^\infty bi\top \in S^\omega({\it with}) - S^\omega({\it
without})$.\\
In \ctr{infinitary} $S^\omega({\it left}) = S^\omega({\it
right})$, yet ${\it right} \not\sii {\it left}$.
For the first statement, let $\phi \in \fL_S^\omega$. Then there
is an $n$ such that $\phi \in \fL_S^n$. Now parts of trees that
are further than $n$ edges away from the root play no r\^ole in the
satisfaction relation for $\phi$. Thus, the validity of $\phi$ remains
unchanged if in both trees all paths are cut off after $n$
steps. However, the cut versions of both trees are isomorphic, and
hence satisfy the same formulas (cf.\ \cor{isomorphism}). The second
statement follows immediately from \pr{infinitary simulation}.
It follows that $T \prec S^* \prec S^\omega \prec S$ and $T \prec
T^\infty \prec S$, whereas $T^\infty$ is incomparable with $S^*$ and
$S^\omega$. Moreover, $S^*$, $S^\omega$ and $S$ are incomparable with
the semantics ranging from $CT$ or $F^-$ to $RT^\infty$.
\textBlack
\section{Ready simulation semantics\hlabel{ready simulation}}
\hindex{ready simulation semantics}
\paragraph{\Purple{Testing scenario}}
Of course one can also combine the copying facility with any of the
other testing scenarios. The observer can then plan experiments on
one of the machines from the Sections \ref{completed trace} to
\ref{readiness} together with a \phrase{replicator}, an ingenious
device by which one can replicate the machine whenever and as often as
one wants. In order to represent observations, the modal languages
from Sections \ref{completed trace} to \ref{readiness} need to be
combined with the one from \sect{simulation}.
\paragraph{\Purple{Definition \ref{ready simulation}}}
The language $\fL_{\it CS}$ and the
corresponding satisfaction relation is defined recursively by
combining the clauses of \df{modal completed trace} (for ${\fL}_{\it CT}$) with those of \df{modal simulation} (for $\fL_{\it S}$).
Likewise, $\fL_{\it FS}$ is obtained by combining $\fL_{\it F}$ and
$\fL_{\it S}$; $\fL_{\it FTS}$ by combining $\fL_{\it FT}$ and
$\fL_{\it S}$; $\fL_{\it RS}$ by combining $\fL_{\it R}$ and
$\fL_{\it S}$; and $\fL_{\it RTS}$ by combining $\fL_{\it RT}$
and $\fL_{\it S}$. For $p \in \IP$ and $\fO \in \it \{CS,FS,FTS,RS,RTS\}$
let $\fO(p) = \{ \phi \in \fL_\fO \mid p \models \phi\}$.
Two processes $p,q \in \IP$ are
\begin{itemise}
\vspace{-2pt}
\item \phrase{completed simulation equivalent}, notation $p =_{\it CS} q$, if ${\it CS}(p) = {\it CS}(q)$;
\vspace{-2pt}
\item \phrase{failure simulation equivalent}, notation $p =_{\it FS}
q$, if ${\it FS}(p) = {\it FS}(q)$;
\vspace{-2pt}
\item \phrase{failure trace simulation equivalent}, notation $p =_{\it
FTS} q$, if ${\it FTS}(p)={\it FTS}(q)$;
\vspace{-2pt}
\item \phrase{ready simulation equivalent}, notation $p =_{\it RS} q$,
if ${\it RS}(p) = {\it RS}(q)$;
\vspace{-2pt}
\item \phrase{ready trace simulation equivalent}, notation $p =_{\it
RTS} q$, if ${\it RTS}(p)={\it RTS}(q)$.
\end{itemise}\vspace{2ex}
It is obvious that failure trace simulation equivalence coincides with
failure simulation equivalence and ready trace simulation equivalence
with ready simulation equivalence ($p \models X \phi \Leftrightarrow
p \models X \wedge \phi$). Also it is not difficult to see that
failure simulation equivalence and ready simulation equivalence
coincide ($p \models X \Leftrightarrow p \models \widetilde Y
\wedge \bigwedge_{a \in X} a\top$, where $Y={Act-X}$). So one has
\begin{proposition}{failure simulation}
$p =_{\it FS} q \Leftrightarrow p =_{\it FTS} q \Leftrightarrow
p =_{\it RTS} q \Leftrightarrow p =_{\it RS} q$.
\end{proposition}
\paragraph{\Purple{Relational characterizations}}
The two remaining equivalences can be characterized as follows:
\begin{definition}{complete simulation} A \phrase{complete simulation} is a
binary relation $R$ on processes, satisfying, for $a \in Act$:
\begin{itemise}
\item
if $pRq$ and $p \goto{a} p '$, then $\exists q ' : ~q \goto{a} q '$
and $p ' Rq '$;
\item
if $pRq$ then $I(p)=\emptyset \Leftrightarrow I(q)=\emptyset$.
\end{itemise}
\end{definition}
\begin{proposition}{completed simulation}
Two processes $p$ and $q$ are completed simulation equivalent
if there exists a complete simulation $R$ with $pRq$ and a complete
simulation $S$ with $qSp$.
\\\pf A trivial modification of the proof of \pr{modal simulation}.
\end{proposition}
\begin{definition}{ready simulation} A \phrase{ready simulation} is a
binary relation $R$ on processes, satisfying, for $a \in Act$:
\begin{itemise}
\item
if $pRq$ and $p \goto{a} p '$, then $\exists q ' : ~q \goto{a} q '$
and $p ' Rq '$;
\item
if $pRq$ then $I(p)=I(q)$.
\pagebreak[3]
\end{itemise}
\end{definition}
\begin{proposition}{ready simulation}
Two processes $p$ and $q$ are ready simulation equivalent
if there exists a ready simulation $R$ with $pRq$ and a ready
simulation $S$ with $qSp$.
\\\pf A trivial modification of the proof of \pr{modal simulation}.
\end{proposition}
A variant of ready simulation equivalence was originally proposed by
{\sc Bloom, Istrail \& Meyer} \hcite{BIM95} under the name \phrase{GSOS
trace congruence}; they provided a modal characterization, to be discussed in
\sect{reactive}. A relational characterization
was first given by {\sc Larsen \& Skou} \hcite{LS91} under the
name \phrase{$\frac{2}{3}$-bisimulation equivalence}. A
$\frac{2}{3}$-bisimulation is defined just like a ready simulation,
except that the second clause reads ``if $pRq$ and $\exists q': q
\goto{a} q'$ then $\exists p': p \goto{a} p'$''. This is clearly
equivalent.
\paragraph{\Purple{Classification}}
%\begin{proposition}{RTvsRS}
$RT \prec RS$, $CT \prec CS$ and $S \prec CS \prec RS$.
$CS$ is independent of $F$ to $RT$.
\\
\pf ``$RT \preceq RS$'' follows since $\fL_{RT}$ is a sublanguage
of $\fL_{\it RTS}$, using \pr{failure simulation}.\\
``$CT \preceq CS$'' and ``$S \preceq CS \preceq RS$'' follow since ${\fL}_{CT}$ and $\fL_{S}$ are sublanguages
of $\fL_{\it CS}$, which is a sublanguage of $\fL_{\it FS}$.
\\
``$RT \not\succeq RS$'' follows from \ctr{SvsRT}, using
``$RS \succeq S$''; similarly $RT \not\succeq CS$ and $CT \not\succeq CS$.
\\
``$S \not\succeq CS$'' follows from \ctr{TvsCT}, using
``$CS \succeq CT$''.\\
``$CS \not\succeq F$'' (and hence ``$CS \not\succeq RS$'') follows
from \ctr{CTvsF}, in which $F({\it left}) \neq F({\it
right})$ but ${\it left} =_{\it CS} {\it right}$; the construction of
the two complete simulations is left to the reader. \hfill $\Box$
\begin{proposition}{RSvsPF}
$\it PF$ is incomparable with $CS$ and $RS$.
\\\pf ``$CS \not\preceq \it PF$'' (and hence ``$RS \not\preceq \it PF$'')
follows from \ctr{PFvsFT}, using ``$CS \succeq S$''.
\begin{counterexample}
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
define token
X
"\Red{$\bullet$}" at $1,$2
X
LC1: circle at (-4,3)
LC2: circle at (-5,2)
token(-5,2)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC4: circle at (-3,2)
LC5: circle at (-4,1)
LC7: circle at (-4,0)
LC8: circle at (-2,1)
LC9: circle at (-2,0)
LB1: box invis "$a$" at (-4.65,2.65)
LB2: box invis "$a$" at (-3.35,2.65)
LB3: box invis "$b$" at (-5.2,1.5)
LB4: box invis "$b$" at (-3.65,1.65)
LB4: box invis "$b$" at (-2.35,1.65)
LB3: box invis "$c$" at (-5.2,.5)
LB3: box invis "$c$" at (-4.2,.5)
LB4: box invis "$d$" at (-1.8,.5)
LB: box invis "$abc+a(bc+bd)$" at (-3.5,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC7 chop circlerad chop circlerad
arrow from LC4 to LC8 chop circlerad chop circlerad
arrow from LC8 to LC9 chop circlerad chop circlerad
B3: box invis "$=_{RS}$" at (0,2)
B3: box invis "$\neq_{\it PF}$" at (0,1)
RC1: circle at (4,3)
RC2: circle at (4,2)
token(4,2)
RC3: circle at (3,1)
RC5: circle at (3,0)
RC4: circle at (5,1)
RC6: circle at (5,0)
RB1: box invis "$a$" at (3.8,2.5)
RB2: box invis "$b$" at (3.35,1.65)
RB3: box invis "$b$" at (4.65,1.65)
RB4: box invis "$c$" at (2.8,.5)
RB5: box invis "$d$" at (5.2,.5)
RB: box invis "$a(bc+bd)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC3 to RC5 chop circlerad chop circlerad
arrow from RC4 to RC6 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Ready simulation equivalent, but not possible-futures
equivalent\hlabel{RSvs2S}}
\end{counterexample}
\\``$RS \not\succeq \it PF$'' (and hence ``$CS \not\succeq \it PF$'')
follows from \ctr{RSvs2S}, which shows two graphs that
are ready simulation equivalent but not possible-futures equivalent.
Concerning the first claim, note that there exists exactly one
simulation of {\it right} by {\it left}, namely the one mapping {\it
right} on the right-hand side of {\it left}. There also exists
exactly one simulation of {\it left} by {\it right}, which relates the
red (or shaded) node on the {\it left} to the red (or shaded) shaded
node on the {\it right}. Both simulations are ready simulations, as
related nodes have the same menu of initial actions. The second claim
follows since $\rec{a,\{ \epsilon ,~b,~bc\}} \in \it PF({\it
left})-PF({\it right})$.
\end{proposition}
\paragraph{\Purple{Infinite processes}}\textBrown
For each of the semantics {\it CS, FS, FTS, RS} and {\it RTS} a
finitary variant (superscripted with a *), motivated by allowing
finite replication only, is defined by combining the
modal languages $\fL_{\it CT}$, $\fL_{\it F}$, ${\fL}_{\it FT}$, $\fL_{\it R}$ and $\fL_{\it RT}$,
respectively, with $\fL_{\it S}^*$. Likewise, an intermediate
variant (superscripted with an $\omega$), motivated by requiring any
observation to be over within a finite amount of time, is defined by
combining these languages with $\fL_{\it S}^\omega$. Finally, a
finite variant (superscripted with a $-$), motivated by observers that
can only engage in finite replication, can only set finitely many
switches on free, and can only inspect finitely many lamps in a finite
time, is obtained by combining the (obvious) modal languages
$\fL_{\it F}^-$, $\fL_{\it FT}^-$, $\fL_{\it R}^-$ and
$\fL_{\it RT}^-$ with $\fL_{\it S}^*$ (there is no $\it CS^-$).
Exactly as in the case of \pr{failure simulation} one finds:
\begin{proposition}{intermediate failure simulation}
${\it FS}^\omega = {\it FTS}^\omega = {\it RTS}^\omega = {\it
RS}^\omega$ and ${\it FS}^- = {\it FTS}^- = {\it RTS}^- = {\it RS}^-$.
Moreover, ${\it FS}^* = {\it FTS}^*$ and
${\it RTS}^* = {\it RS}^*$.
\end{proposition}
However, as pointed out in {\sc Schnoebelen} \hcite{Sch91}, $\it FS^*$
and $\it RS^*$ are different:
\begin{counterexample}
\Black{\footnotesize
.PS 6.5in
scale = 3.79
boxwid = 0.05; boxht = 0.05
circlerad = 0.1
arrowhead = 7
box invis "compare the processes with" at (-10,2.2)
box invis "and without the left branch" at (-10,1.7)
LC: circle at (0,2.4)
circle at (-11,1)
box invis "$a$" at LC +(-6,-.42)
arrow from LC to last circle chop circlerad
circle at last circle +(-2,-1.4)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at 2nd last circle +(-1.04,-.5)
circle at 2nd last circle +(-1,-1.4)
arrow from 3rd last circle to last circle chop circlerad
box invis "$2$" at 3rd last circle +(-.58,-.6)
circle at 3rd last circle +(0,-1.4)
arrow from 4th last circle to last circle chop circlerad
box invis "$3$" at 4th last circle +(-.15,-.64)
circle at 4th last circle +(1,-1.4)
arrow from 5th last circle to last circle chop circlerad
box invis "$4$" at 5th last circle +(.25,-.67)
circle at 5th last circle +(2,-1.4)
arrow dashed from 6th last circle to last circle chop circlerad
box invis "$...$" at 6th last circle +(2,-.7)
box invis "$=_{\it FB}^*$" at (-7.2,.7)
box invis "$\neq_R ~$" at (-7.2,0)
# box invis "$\neq_{RS}^*$" at (-7.2,-.3)
circle at (-5,1)
arrow from LC to last circle chop circlerad
box invis "$a$" at LC +(-2.5,-.98)
circle invis at last circle +(-2,-1.4)
circle at 2nd last circle +(-1,-1.4)
arrow from 3rd last circle to last circle chop circlerad
box invis "$2$" at 3rd last circle +(-.58,-.60)
circle at 3rd last circle +(0,-1.4)
arrow from 4th last circle to last circle chop circlerad
box invis "$3$" at 4th last circle +(-.15,-.64)
circle at 4th last circle +(1,-1.4)
arrow from 5th last circle to last circle chop circlerad
box invis "$4$" at 5th last circle +(.25,-.67)
circle at 5th last circle +(2,-1.4)
arrow dashed from 6th last circle to last circle chop circlerad
box invis "$...$" at 6th last circle +(2,-.7)
circle at (0,1)
box invis "$a$" at LC +(-.15,-.49)
arrow from LC to last circle chop circlerad
circle at last circle +(-2,-1.4)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at 2nd last circle +(-1.04,-.5)
circle invis at 2nd last circle +(-1,-1.4)
circle at 3rd last circle +(0,-1.4)
arrow from 4th last circle to last circle chop circlerad
box invis "$3$" at 4th last circle +(-.15,-.64)
circle at 4th last circle +(1,-1.4)
arrow from 5th last circle to last circle chop circlerad
box invis "$4$" at 5th last circle +(.25,-.67)
circle at 5th last circle +(2,-1.4)
arrow dashed from 6th last circle to last circle chop circlerad
box invis "$...$" at 6th last circle +(2,-.7)
circle at (5,1)
arrow from LC to last circle chop circlerad
box invis "$a$" at LC +(2.5,-.98)
circle at last circle +(-2,-1.4)
arrow from 2nd last circle to last circle chop circlerad
box invis "$1$" at 2nd last circle +(-1.04,-.50)
circle at 2nd last circle +(-1,-1.4)
arrow from 3rd last circle to last circle chop circlerad
box invis "$2$" at 3rd last circle +(-.58,-.59)
circle invis at 3rd last circle +(0,-1.4)
circle at 4th last circle +(1,-1.4)
arrow from 5th last circle to last circle chop circlerad
box invis "$4$" at 5th last circle +(.25,-.67)
circle at 5th last circle +(2,-1.4)
arrow dashed from 6th last circle to last circle chop circlerad
box invis "$...$" at 6th last circle +(2,-.7)
circle at (10,1)
arrow dashed from LC to last circle chop circlerad
circle at last circle +(-2,-1.4)
arrow dashed from 2nd last circle to last circle chop circlerad
circle at 2nd last circle +(-1,-1.4)
arrow dashed from 3rd last circle to last circle chop circlerad
circle at 3rd last circle +(0,-1.4)
arrow dashed from 4th last circle to last circle chop circlerad
circle invis at 4th last circle +(1,-1.4)
circle at 5th last circle +(2,-1.4)
arrow dotted from 6th last circle to last circle chop circlerad
box invis "$...$" at 6th last circle +(1.9,-.5)
box invis ". . ." at 6th last circle +(1.8,.5)
.PE
\centerline{\raise 1em\box\graph}}
\caption{Finitary failure simulation equivalent, but not ready
equivalent\hlabel{FSvsRS}}
\end{counterexample}
in \ctr{FSvsRS} one has
$\it FS^* ({\it with})= \it FS^*({\it without})$, but
$\rec{a,\{1, 2, ... \}} \in R({\it with})-R({\it without})$.
Clearly one has $\it CS^* \prec CS^\omega \prec CS$ and $\it RS^-
\prec FS^* \prec RS^* \prec RS^\omega \prec RS$. The strictness of
these inclusions is given by Counterexamples \href{finite}{\ref{finite}},
\href{FSvsRS}{\ref{FSvsRS}}, \href{omega}{\ref{omega}} and
\href{infinitary}{\ref{infinitary}}. In addition one has
$\it RT^- \prec RS^-$, $\it S^* \prec RS^-$, $\it RT \prec RS^*$,
$\it FT \prec FS^*$, $CT \prec CS^*$ and $S^* \prec CS^* \prec \it FS^*$;
as well as $\it RT^\infty \prec RS$, $CT^\infty \prec CS$, $S^\omega
\prec CS^\omega \prec RS^\omega$ and $S \prec CS \prec RS$.
Counterexamples against further inclusions have already been provided.
\begin{proposition}{modal finitary ready simulation}
Let $p,q \in \IP$ be image finite.
Then $p =_{CS} q \Leftrightarrow p =_{CS}^* q$ and
$p =_{RS} q \Leftrightarrow p =_{RS}^- q$.
\\\pf Two trivial modifications of the proof of \pr{modal finitary simulation}.
In the second one, one uses that if $\forall \varphi \in {\fL}_{RS}^-
(p \models \varphi \Rightarrow q \models \varphi)$ then surely $I(p)=I(q)$.
\end{proposition}
In fact, if it is merely known that only $q$ is image finite it
follows already that $p \sqsubseteq_{CS} q \Leftrightarrow p
\sqsubseteq_{CS}^* q$ and $p \sqsubseteq_{RS} q \Leftrightarrow p
\sqsubseteq_{RS}^- q$. However, the following variant of
\ctr{infinitary} shows that in the statement of
\pr{modal finitary ready simulation} it is essential that {\em both}
$p$ and $q$ are image finite. In \ctr{infinitary RS}
{\it right} is image finite---in fact, it is even finitely
branching---but {\it left} is not. It turns out that ${\it left}
=_{RS}^\omega {\it right}$ (and hence ${\it left} =_{RS}^- {\it
right}$, ${\it left} =_{CS}^* {\it right}$, ${\it left} =_{RT}
{\it right}$, ${\it left} =_{F} {\it right}$, etc.) but ${\it left}
\neq_{T}^\infty {\it right}$ (and hence ${\it left} \neq_{F}^\infty
{\it right}$, ${\it left} \neq_{RT}^\infty {\it right}$, ${\it left}
\neq_{CS} {\it right}$, ${\it left} \neq_{RS} {\it right}$, etc.).
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-6,1)
LC3: circle at (-5,1)
LC4: circle at (-4,1)
LC5: circle at (-3,1)
LC6: circle at (-2,1)
LC3b: circle at (-5,0)
LC4b: circle at (-4,0)
LC5b: circle at (-3,0)
LC6b: circle at (-2,0)
LB1: box invis "$a$" at (-4.61,1.54)
LB2: box invis "$a$" at (-3.32,1.50)
LB1: box invis "$a$" at (-5.04,1.68)
LB2: box invis "$a$" at (-4.15,1.5)
LB3: box invis "$a$" at (-5.3,1.15)
LB3: box invis "$a$" at (-4.4,1.15)
LB3: box invis "$a$" at (-3.5,1.15)
LB3: box invis "$a$" at (-5.2,.6)
LB3: box invis "$a$" at (-4.2,.6)
LB3: box invis "$a$" at (-3.2,.6)
LB4: box invis "$...$" at (-2,1.5)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC1 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC1 to LC5 chop circlerad chop circlerad
arrow from LC1 to LC6 dashed chop circlerad chop circlerad
arrow from LC3 to LC2 chop circlerad chop circlerad
arrow from LC4 to LC3 chop circlerad chop circlerad
arrow from LC5 to LC4 chop circlerad chop circlerad
arrow from LC6 to LC5 dashed chop circlerad chop circlerad
arrow from LC3 to LC3b chop circlerad chop circlerad
arrow from LC4 to LC4b chop circlerad chop circlerad
arrow from LC5 to LC5b chop circlerad chop circlerad
arrow from LC6 to LC6b dashed chop circlerad chop circlerad
RC1: circle at (3,2)
RC3: circle at (2,1)
RC4: circle at (3,1)
LB1: box invis "$a$" at (3.18,1.6)
LB2: box invis "$a$" at (2.38,1.62)
LB3: box invis "$a$" at (2.6,.85)
arrow from last circle+(.2,.15) to last circle.ne
box invis "$a$" at last circle +(.73,-.3)
circle at last circle+(.4,-.3) rad .5
circle at 2nd last circle fill 0
arrow from RC1 to RC3 chop circlerad chop circlerad
arrow from RC1 to RC4 chop circlerad chop circlerad
arrow from RC4 to RC3 chop circlerad chop circlerad
B3: box invis "$=_{RS}^\omega$" at (0,1.5)
B3: box invis "$\neq_{T}^\infty~$" at (0,.5)
.PE
\centerline{\raise 1em\box\graph}}
\caption{Finitary ready simulation equivalent but not infinitary
equivalent\hlabel{infinitary RS}}
\end{counterexample}
\\
For general (non-image-finite) processes, no relational
characterizations of the finite, finitary and intermediate
equivalences are known.\textBlack
\paragraph{\Purple{Testing scenario}}
An alternative and maybe more natural testing scenario for finitary
ready simulation semantics (or simulation semantics) can be obtained
by exchanging the replicator for an $undo$-button on the (ready) trace
machine (\fig{ready simulation machine}).
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 1.5; boxht = 1.5
circlerad = 0.25
define switch
X
circle rad 0.1 at $1,$2
\textRed
circle invis "\raisebox{-3.5pt}[0pt][0pt]{\Huge $\cdot$}" at (last circle.w.x -.15,last circle.c.y +.15)
line from 2nd last circle.w to last circle.c
\textBlack
X
define lamp
X
\textYellow
circle rad 0.05 thickness 3 at $1,$2
circle invis rad 0.2 at $1,$2
circle invis rad 0.3 at $1,$2
line from 2nd last circle.n to last circle.n
line from 2nd last circle.ne to last circle.ne
line from 2nd last circle.e to last circle.e
line from 2nd last circle.se to last circle.se
line from 2nd last circle.s to last circle.s
line from 2nd last circle.sw to last circle.sw
line from 2nd last circle.w to last circle.w
line from 2nd last circle.nw to last circle.nw
\textBlack
X
box invis at (-1,0)
box wid 11 height 4 at (5,2) fill .3
circle invis "$a$" at (1,1)
switch(1,2)
lamp(.6,3)
circle invis "$b$" at (2,1)
switch(2,2)
lamp(1.6,3)
circle invis "$\cdots$" at (2.6,2)
circle invis "$z$" at (4,1)
switch(4,2)
lamp(3.6,3)
box "\Blue{\LARGE $c$}" at (6,2) fill 0
circle rad .13 at (8,2.02)
token(8.005,2)
circle invis "$undo$" at (8,1)
.PE
\centerline{\box\graph}\vspace{-1em}
\caption{The ready simulation machine\hlabel{ready simulation
machine}\hindex{ready simulation machine}}
\end{figure}
It is assumed that all intermediate states that are past through during
a run of a process are stored in a memory inside the black box. Now
pressing the $undo$-button causes the machine to shift one state
backwards.
% In case the button is pressed during the execution of an
% action, this execution will be interrupted and the process assumes the
% state just before this action began.
In the initial state pressing the button has no effect. An
observation now consists of a (ready) trace, enriched with
$undo$-actions. Such observations can easily be translated into
finitary (ready) simulation formulas.
\section{Reactive versus generative testing scenarios}\hlabel{reactive}
In the testing scenarios presented so far, a process is considered to
perform actions and make choices autonomously. The investigated
behaviours can therefore be classified as \phrase{generative
processes}. The observer merely restricts the spontaneous behaviour
of the generative machine by cutting off some possible courses of
action. An alternative view of the investigated processes can be obtained by
considering them to react on stimuli from the environment and be
passive otherwise. {\em Reactive\hindex{reactive machines} machines}
can be obtained out of the generative machines presented so far by
replacing the switches by buttons and the display by a green light.
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 1.5; boxht = 1.5
circlerad = 0.25
define lamp
X
\textYellow
circle rad 0.05 thickness 3 at $1,$2
circle invis rad 0.2 at $1,$2
circle invis rad 0.3 at $1,$2
line from 2nd last circle.n to last circle.n
line from 2nd last circle.ne to last circle.ne
line from 2nd last circle.e to last circle.e
line from 2nd last circle.se to last circle.se
line from 2nd last circle.s to last circle.s
line from 2nd last circle.sw to last circle.sw
line from 2nd last circle.w to last circle.w
line from 2nd last circle.nw to last circle.nw
\textBlack
X
define greenlamp
X
\textGreen
circle rad 0.08 thickness 5 at $1,$2
circle invis rad 0.3 at $1,$2
circle invis rad 0.45 at $1,$2
line from 2nd last circle.n to last circle.n
line from 2nd last circle.ne to last circle.ne
line from 2nd last circle.e to last circle.e
line from 2nd last circle.se to last circle.se
line from 2nd last circle.s to last circle.s
line from 2nd last circle.sw to last circle.sw
line from 2nd last circle.w to last circle.w
line from 2nd last circle.nw to last circle.nw
\textBlack
X
define token
X
"\Red{$\bullet$}" at $1,$2
X
box invis at (-.4,0)
box wid 11 height 4 at (5.5,2) fill .3
circle invis "$a$" at (1,1)
circle rad .13 at (1,2.02)
token(1.005,2)
lamp(.9,3)
circle invis "$b$" at (2,1)
circle rad .13 at (2,2.02)
token(2.005,2)
lamp(1.9,3)
circle invis "$\cdots$" at (2.9,2)
circle invis "$z$" at (4,1)
circle rad .13 at (4,2.02)
token(4.005,2)
lamp(3.9,3)
greenlamp(6,2)
circle rad .13 at (8,2.02)
token(8.005,2)
circle invis "$undo$" at (8,1)
.PE
\centerline{\box\graph}\vspace{-1em}
\caption{The reactive ready simulation machine\hlabel{reactive ready
simulation machine}}
\end{figure}
Initially the process waits
patiently until the observer tries to press one of the buttons. If the
observer tries to press an $a$-button, the machine can react in two
different ways: if the process can not start with an $a$-action the
button will not go down and the observer may try another one; if the
process can start with an $a$-action it will do so and the button
goes down. Furthermore the green light switches on. During the
execution of $a$ no buttons can be pressed. As soon as the execution of
$a$ is completed the light switches off, so that the observer knows that
the process is ready for a new trial. Reactive machines as described
above originate from {\sc Milner} \href{Mi80}{\cite{Mi80,Mi81}}.
One family of testing scenarios with reactive machines can
be obtained by allowing the observer to try to depress more than one
button at a time. In order to influence a particular choice, the
observer could already start exercising pressure on buttons during the
execution of the preceding action (when no button can go down). When
this preceding action is finished, at most one of the buttons will go
down. These testing scenarios are equipotent with the generative
ones: putting pressure on a button is equivalent to setting the
corresponding switch on `free'; moreover an action $a$ appearing in
the display is mimicked by the $a$-button going down, and the
disappearance of $a$ from the display by the green light going off.
Another family of testing scenarios is obtained by allowing the user
to try only one button at a time. They are equipotent with those
generative testing scenarios in which at any time only one switch can
be set on `free'. Next I will discuss the equivalences that originate
from these scenarios.
First consider the reactive machine that resembles the failure
trace machine, thus without menu-lights and $undo$-button. An
observation on such a machine consists of a sequence of accepted and
refused actions, indicating which buttons went down in a sequence of
trials of the user. Such a sequence can be seen as a failure trace
where all refusal sets are singletons. Call the resulting semantics
$FT^1$. Clearly, the failure trace set of any process $p$ satisfies
$$\sigma (X \cup Y) \rho \in FT(p) ~\Leftrightarrow~
\sigma X Y \rho \in FT(p).$$
Thus, any failure trace $\sigma \{a_1,\ldots,a_n\} \rho$ can be
rewritten as (contains the same information as)
$\sigma \{a_1\} \{a_2\} \cdots \{a_n\} \rho$. It follows that the
singleton-failure trace set $FT^1(p)$ of a process $p$ contains as
much information as its finite-failure trace set $FT^-(p)$, so the
semantics $FT^1$ coincides with $FT^-$.
In order to arrive at a reactive counterpart to failures semantics,
one could suppose that an observer continues an experiment only as
long as all buttons he tries to depress actually go down; when a
button refuses to go down, he will not try another one. This testing
scenario gives rise to the variant $F^1$ of failures semantics in
which all refusal sets are singletons.\vspace{-6pt}
\paragraph{\Purple{Definition \ref{reactive}}}
$\rec{ \sigma , a} \in Act^\ast \times Act$ is a
\phrase{singleton-failure pair} of a process $p$ if there is a process
$q$ such that $p \goto{\sigma} q$ and $a \not\in I(q)$. Let $F^1(p)$
denote the set of singleton-failure pairs of $p$. Two processes $p$
and $q$ are \phrase{singleton-failures equivalent}, $p =_F^1 q$, if
$T(p)=T(q)$ and $F^1(p)=F^1(q)$.
\\[10pt]
Unlike for $F$ and $F^-$, $F^1(p)=F^1(q)$ does not always imply
that $T(p)=T(q)$, so one has to keep track of traces explicitly. These
model observations ended by the observer before stagnation occurs.
\hindex{singleton-failures semantics}{\em Singleton-failures semantics}
($F^1$) is situated strictly between trace ($T$) and finite-failures
semantics ($F^-$). For \ctr{TvsCT} shows two processes
with $T({\it left}) = T({\it right})$ but $\rec{a,b} \in F^1({\it
left}) - F^1({\it right})$, and \ctr{F1vsF} shows two
processes with $F^1({\it left}) = F^1({\it right})$ (both contain
$\rec{a,b}$ and $\rec{a,c}$), but $\rec{a,\{b,c\}} \in F({\it
left}) - F({\it right})$.
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-5,1)
LC3: circle at (-5,0)
LC4: circle at (-4,1)
LC5: circle at (-3,1)
LC6: circle at (-3,0)
LB1: box invis "$a$" at (-4.65,1.65)
LB2: box invis "$a$" at (-3.35,1.65)
LB: box invis "$a$" at (-3.85,1.5)
LB3: box invis "$b$" at (-5.2,.5)
LB4: box invis "$c$" at (-2.8,.5)
LB: box invis "$ab+a+ac$" at (-4,-.6)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC1 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC6 chop circlerad chop circlerad
B3: box invis "$=_F^1~$" at (0,1.7)
B3: box invis "$\neq_{CT}$" at (0,1)
B3: box invis "$\neq_F^-~$" at (0,.3)
RC1: circle at (4,2)
RC2: circle at (3,1)
RC3: circle at (3,0)
RC4: circle at (5,1)
RC5: circle at (5,0)
RB1: box invis "$a$" at (3.35,1.65)
RB2: box invis "$b$" at (2.8,.5)
RB1: box invis "$a$" at (4.65,1.65)
RB2: box invis "$c$" at (5.2,.5)
RB3: box invis at (5.2,0)
RB: box invis "$ab+ac$" at (4,-.6)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC1 to RC4 chop circlerad chop circlerad
arrow from RC4 to RC5 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Singleton-failures equivalent, but not completed trace or failures
equivalent\hlabel{F1vsF}}
\end{counterexample}
Furthermore, $F^1$ is independent of $CT$, $S$ and $CS$, for in
\ctr{F1vsF} one has $CT({\it left}) \neq CT({\it
right})$, in \ctr{SvsRT} one has ${\it left} =_F^1
{\it right}$ but ${\it left} \neq_S {\it right}$, and in
\ctr{CTvsF} one has ${\it left} =_{CS} {\it right}$ but
$\rec{a,c} \in F^1({\it left}) - F^1({\it right})$.
Adding the $undo$-button to the reactive failure trace machine gives a
semantics ${\it FS}^1$ characterized by the modal language ${\fL}^*_S$ to which has been added a modality ``Can't$(a)$'', with $p
\models \mbox{Can't}(a)$ iff $a \not\in I(p)$. This modality denotes a
failed attempt to depress the $a$-button. If fact, {\sc Bloom, Istrail
\& Meyer} studied the coarsest equivalence finer than trace
equivalence that is a congruence for the class of so-called {\em
GSOS-operators}, and characterized this \phrase{GSOS trace congruence}
by the modal language above; its formulas were called \phrase{denial
formulas}. As in the modal language $\fL^-_{\it FS}$ one has $p
\models \widetilde{X \cup Y} \Leftrightarrow p \models \widetilde{X}
\wedge \widetilde{Y}$, and $\mbox{Can't}(a)$ is the same as
$\widetilde{\{a\}}$, it follows that the language of denial formulas is
equally expressive as $\fL_{\it FS}^-$, and hence ${\it FS}^1$
coincides with $\it FS^-$ and $\it RS^-$.
If the menu-lights are added to the reactive failure trace machine
considered above one can observe ready trace sets, and the green light
is redundant. Likewise, adding menu-lights to the reactive failure
scenario would give readiness semantics, and adding them to the
reactive failure simulation machine would yield ready simulation.
If the green light (as well as the menu-lights) are removed from the
reactive failure trace machine, one can only test trace equivalence,
since any refusal may be caused by the last action not being ready yet.
Likewise, removing the green light from the reactive failure
simulation machine (with $undo$-button) yields (finitary) simulation
semantics. Reactive machines on which only one button at a time is
depressed appear to be unsuited for testing completed trace, completed
simulation and failures equivalence.
\section{2-nested simulation semantics}\hlabel{2-nested simulation}
\hindex{2-nested simulation semantics}
\phrase{2-nested simulation equivalence} popped up naturally in
{\sc Groote \& Vaandrager} \hcite{GrV92}
as the coarsest congruence with respect to a large and general class of
operators that is finer than completed trace equivalence.\vspace{-6pt}
\paragraph{\Purple{Definition \ref{2-nested simulation}}}
A \phrase{2-nested simulation} is a simulation contained in simulation
equivalence ($\si$). Two processes $p$ and $q$ are {\em 2-nested
simulation equivalent}, notation $p =_{\it 2S} q$, if there exists a
2-nested simulation $R$ with $pRq$ and a 2-nested simulation $S$ with
$qSp$.\vspace{-6pt}
\paragraph{\Purple{Modal characterization}} A modal characterization of
this notion is obtained by the fragment of the infinitary
Hennessy-Milner logic (cf.\ \df{modal bisimulation}) without nested
negations.
\begin{definition}{modal 2-nested simulation}
The class $\fL_{\it 2S}$ of \phrase{2-nested simulation formulas} over
$Act$ is defined recursively by:
\begin{itemise}
\item
If $I$ is a set and $\phi_i \in \fL_{\it 2S}$ for $i \in I$ then
$\bigwedge_{i\in I}\phi_i \in \fL_{\it 2S}$.
\item
If $\phi \in \fL_{\it 2S}$ and $a \in Act$ then $a \phi \in \fL_{\it 2S}$.
\item
If $\phi \in \fL_{S}$ then $\neg \phi \in \fL_{\it 2S}$.
\end{itemise}
Note that $\fL_S \subseteq \fL_{\it 2S}$.
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{\it 2S}$ is defined recursively by:
\begin{itemise}
\item
$p \models \bigwedge_{i\in I}\phi_i$ if $p \models \phi_i$ for all $i \in I$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\item
$p \models \neg \phi$ if $p \not\models \phi$.
\end{itemise}
\end{definition}
\begin{proposition}{modal 2-nested simulation}
$p =_{\it 2S} q ~\Leftrightarrow~ \forall \phi \in \fL_{\it 2S}
(p \models \phi \Leftrightarrow q \models \phi)$.
\\\pf A trivial modification of the proof of \pr{modal simulation}.
\end{proposition}
\paragraph{\Purple{Testing scenario}}
In order to obtain a testing scenario for this equivalence one has
to introduce the rather unnatural notion of a \phrase{lookahead}
\hcite{GrV92}: The \phrase{2-nested simulation machine} is a variant of
the ready trace machine with replicator, where in an idle state the
machine not only tells which actions are on the menu, but even which
simulation formulas are (not) satisfied in the current state.
\paragraph{\Purple{Classification}}
$RS \prec \it 2S$ and ${\it PF} \prec \it 2S$.
\\
\pf For ``$RS \preceq \it 2S$'' it suffices to show that each 2-nested simulation
is a ready simulation. This follows since $p \si q \Rightarrow I(p)=I(q)$.
$\it PF \prec 2S$ is easily established using that $T \prec S$.
That both inclusions are strict follows immediately from the fact that
$RS$ and $\it PF$ are incomparable (\pr{RSvsPF}).\hfill $\Box$
\paragraph{\Purple{Infinite processes}}\textBrown
Exactly as for ready simulations semantics, 5 versions of 2-nested
simulation semantics can be defined that differ for infinite
processes. $\it 2S^-$ is the semantics whose modal characterization has
the constructs $\top$, $\wedge$, $a\phi$ and $\neg \phi'$ with $\phi'
\in \fL^*_S$. The constructs $\widetilde X$ and $X$ for $X
\subseteq_{\it fin} Act$ are expressible in this logic. $\it F2S^*$
additionally has the construct $\widetilde X$, and $\it R2S^*$ the
construct $X$, for $X \subseteq Act$. Finally $\it 2S^\omega$ is
characterized by the class of 2-nested simulation formulas with a
finite upperbound on the nesting of the $a\phi$ construct. The
constructs $\widetilde X$ and $X$ for $X \subseteq Act$ are
expressible in $\fL_{\it 2S}^\omega$, and hence also in $\fL_{\it 2S}$.
We have $\it 2S^- \prec F2S^* \prec R2S^* \prec 2S^\omega \prec 2S$.
The strictness of these inclusions is given by Counterexamples
\href{finite}{\ref{finite}}, \href{FSvsRS}{\ref{FSvsRS}},
\href{omega}{\ref{omega}} and \href{infinitary}{\ref{infinitary}}. In
addition one has $\it RS^- \prec 2S^-$, $\it FS^* \prec F2S^*$, $\it
RS^* \prec R2S^*$, $\it RS^\omega \prec 2S^\omega$ and $\it RS \prec 2S$;
as well as $\it PF^\infty \prec 2S$. \ctr{infinitary}
shows that $\it PF \not\prec 2S^\omega$: $\it 2S^\omega({\it left}) =
2S^\omega({\it right})$ (cf.\ \pr{infinitary}),
but $\rec{a,a^*} \in \it PF({\it right})-PF({\it left})$.
\begin{propositioni}{modal finitary 2-nested simulation}
Let $p,q \in \IP$ be image finite.
Then $p =_{\it 2S} q \Leftrightarrow p =_{\it 2S}^- q$.
\\\pf An easy modification of the proof of \pr{modal finitary
simulation}, also using its result.
\textBlack
\hfill \Brown{$\Box$}
\end{propositioni}
\section{Bisimulation semantics}\hlabel{bisimulation}
The concept of \phrase{bisimulation equivalence} stems from {\sc
Milner} \hcite{Mi80}. Its formulation below is due to {\sc Park}
\hcite{Pa81}.
\paragraph{\Purple{Definition \ref{bisimulation}}}
A \phrase{bisimulation} is a binary relation $R$ on processes,
satisfying, for $a \in Act$:
\begin{itemise}
\item
if $pRq$ and $p \goto{a} p '$, then $\exists q ' : ~q \goto{a} q '$
and $p ' Rq '$;
\item
if $pRq$ and $q \goto{a} q '$, then $\exists p ' : ~p \goto{a} p '$
and $p ' Rq '$.
\end{itemise}
Two processes $p$ and $q$ are \phrase{bisimilar}, notation $p \bis{} q$,
if there exists a bisimulation $R$ with $pRq$.
\\[2ex]
The relation $\bis{}$ is again a bisimulation. As for similarity,
one easily checks that bisimilarity is an equivalence relation on $\IP$.
Hence the relation will be called \phrase{bisimulation equivalence}.
In \phrase{bisimulation semantics} ($B$) two processes are
identified iff they are bisimulation equivalent.
Note that the concept of bisimulation does not change if
in the definition above the action relations $\goto{a}$ were replaced by
generalized action relations $\goto{\sigma}$.
\paragraph{\Purple{Modal characterization}}
\begin{definition}{modal bisimulation}
The class $\fL_B$ of \phrase{infinitary Hennessy-Milner formulas} over
$Act$ is defined by:
\begin{itemise}
\item
If $I$ is a set and $\phi_i \in \fL_B$ for $i \in I$ then $\bigwedge_{i\in
I}\phi_i \in \fL_B$.
\item
If $\phi \in \fL_B$ and $a \in Act$ then $a \phi \in \fL_B$.
\item
If $\phi \in \fL_B$ then $\neg \phi \in \fL_B$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_B$ is defined recursively by:
\begin{itemise}
\item
$p \models \bigwedge_{i\in I}\phi_i$ if $p \models \phi_i$ for all $i \in I$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\item
$p \models \neg \phi$ if $p \not\models \phi$.
\end{itemise}
Let $B(p)$ denote the class of all infinitary Hennessy-Milner formulas
satisfied by the process $p$: $B(p)=\{\phi \in \fL_B \mid p
\models \phi \}$. Write $p \sqsubseteq_B q$ if $B(p) \subseteq B(q)$
and $p =_B q$ if $B(p) = B(q)$.
\end{definition}
\begin{proposition}{bisimulation preorder}
$p \sqsubseteq_B q \Leftrightarrow p =_B q$.
\\\pf If $\phi \in B(q)-B(p)$ then $\neg\phi \in B(p)-B(q)$.
\end{proposition}
\begin{proposition}{modal bisimulation}
$p \bis{} q \Leftrightarrow p =_B q$.
\\\pf
For ``$\Rightarrow$'' I have to prove that for any bisimulation $R$ and
for all $\varphi \in \fL_B$ one has
$$pRq \Rightarrow (p \models \varphi \Leftrightarrow q \models \varphi).$$
I will do so with structural induction on $\varphi$.
Suppose $pRq$.
\begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item Let $p \models a\varphi$. Then there is a
$p' \in \IP$ with $p\goto{a} p'$ and $p' \models \varphi$.
As $R$ is a bisimulation, there must be a $q'\in \IP$ with
$q\goto{a}q'$ and $p'Rq'$. So by induction $q' \models \varphi$,
and hence $q \models a\varphi$. \\ By symmetry one also obtains $q
\models a\varphi \Rightarrow p \models a\varphi$.
\item $p \models \bigwedge_{i\in I}\varphi_i \Leftrightarrow
\forall i\!\in\!I(p \models \varphi_i)\stackrel{\rm ind.}{\Longleftrightarrow}
\forall i\!\in\!I(q \models \varphi_i) \Leftrightarrow q \models
\bigwedge_{i\in I}\varphi_i$.
\item $p \models \neg \varphi \Leftrightarrow p \not\models \varphi
\stackrel{\rm ind.}{\Longleftrightarrow}
q \not\models \varphi \Leftrightarrow q \models \neg \varphi$.
\end{list}
For ``$\Leftarrow$'' it suffices to establish that $\sqsubseteq_B$ is
a simulation (\pr{bisimulation preorder} then implies that $=_B \;=\;
\sqsubseteq_B \;=\; \sqsubseteq_B^{-1}$ is a bisimulation).
% \begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
% \topsep 2pt \itemsep 1pt \parsep 1pt}
% \item Suppose $p \sqsubseteq_B q$ and $p \goto{a} p'$.
% I have to show that $\exists q'\in \IP$ with $q\goto{a}q'$ and
% $p' \sqsubseteq_B q'$. Let $Q'$ be $$\{q' \in \IP \mid
% q\goto{a} q' \wedge p' \not\sqsubseteq_B q'\}.$$ By \df{LTS} $Q'$ is a set.
% For every $q' \in Q'$ there is a formula $\varphi_{q'} \in B(p')- B(q')$.
% Now $$a\bigwedge_{q'\in Q'} \varphi_{q'} \in% B(p) \subseteq B(q),$$ so
% there must be a $q'\in \IP$ with $q\goto{a}q'$ and $q'\not\in Q'$,
% which had to be shown.
This goes exactly as in the proof of \pr{modal simulation}.
\end{proposition}
\paragraph{\Purple{Testing scenario}}
The testing scenario for bisimulation semantics, as presented in {\sc
Milner} \hcite{Mi80}, is the oldest and most powerful testing scenario,
from which most others have been derived by omitting some of its
features. It was based on a reactive failure trace machine
with replicator, but additionally the observer is equipped with the
capacity of \phrase{global testing}. Global testing is described in
{\sc Abramsky} \hcite{Ab87} as: ``the ability to enumerate all (of
finitely many) possible `operating environments' at each stage of the
test, so as to guarantee that all nondeterministic branches will be
pursued by various copies of the subject process''. {\sc Milner}
\hcite{Mi80} implemented global testing by assuming that
\begin{list}{$\bullet$}{\leftmargin 25pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 4pt \itemsep 2pt \parsep 2pt}
\item [``(i)]
It is the \phrase{weather} at any moment which determines the choice
of transition (in case of ambiguity [...]);
\item [(ii)]
The weather has only finitely many states---at least as far as
choice-resolution is concerned;
\item [(iii)]
We can control the weather.''
\end{list}
Now it can be ensured that all possible moves a process can perform in
reaction on a given $a$-experiment will be investigated by simply
performing the experiment in all possible weather conditions.
Unfortunately, as remarked in {\sc Milner} \hcite{Mi81}, the second
assumption implies that the amount of different moves an
investigated process can perform in response to any given experiment
is bounded by the number of possible weather conditions
(i.e.\ $\exists n \in \IN\; \forall p \in \IP\; \forall a \in
Act:\linebreak |\{q \in \IP \mid p \goto{a} q\}| < n$). So for
general application this condition has to be dropped, thereby losing
the possibility of effective implementation of the testing scenario.
% It helps a little to
% allow the number of possible weather conditions to be finite but
% variable in time; still this doesn't give us all image finite
% processes, and certainly no image infinite processes.
An observation in the global testing scenario can be represented
as an infinitary Hennessy-Milner formula $\phi \in \fL_B$.
This is essentially a simulation formula in which it is
possible to indicate that certain branches are not present.
A formula $\neg \phi$ says that by making sufficiently many copies of
the investigated process, and exposing them to all possible weather
conditions, it can be observed that none of these copies permits the
observation $\phi$.\\[2ex]
{\bf Remark:} Let $[a]\phi$ denote $\neg a \neg \phi$.
Now the negation in $\fL_B$ can be eliminated in favour of the
modalities $[a]$ and infinitary disjunction $\bigvee_{i\in I}$.
A formula $[a]\phi$ says that in all possible weather conditions,
after an $a$-move it is always possible to make the observation $\phi$.
\\[2ex]
In order to justify the observations of $\fL_B$ in a generative
testing scenario no switches or
menu-lights are needed; the architecture of the completed trace
machine suffices. However, in order to warrant negative observations,
one has to assume that actions take only a finite amount of time, and
idling can be detected (either by observations that last forever, or
by means of the display becoming empty).
Adding switches and or menu-lights does not increase the
discriminating power of the observers. It would give rise to
observations that can be modelled as formulas in languages ${\fL}_{\it FB}$, $\fL_{\it RTB}$, etc., obtained by combining ${\fL}_{F}$, $\fL_{RT}$, etc.\ with $\fL_{B}$. These
observations can already be expressed in $\fL_{B}$: $p \models
\widetilde{X} \Leftrightarrow p \models \bigwedge_{a \in X} \neg a
\top$ and $p \models X\phi \Leftrightarrow p \models (\bigwedge_{a
\not\in X} \neg a \top) \wedge (\bigwedge_{a \in X} a\top) \wedge
\phi$.
A different implementation of global testing is given in
{\sc Larsen \& Skou} \hcite{LS91}.
They assumed that every transition in a transition system has a certain
probability of being taken. Therefore an observer can with an arbitrary
high degree of confidence assume that all transitions have been
examined, simply by repeating an experiment many times.
As argued among others in {\sc Bloom, Istrail \& Meyer} \hcite{BIM95},
global testing in the above sense is a rather unrealistic testing
ability. Once you assume that the observer is really as powerful
as in the described scenarios, in fact more can be tested then only
bisimulation equivalence: in the testing scenario of Milner also the
correlation between weather conditions and transitions being taken by
the investigated process can be recovered, and in that of Larsen \& Skou
one can determine the relative probabilities of the various transitions.
\paragraph{\Purple{Process graph characterization}}
Also bisimulation equivalence can be characterized by means of relations
between the nodes of two process graphs.
\begin{definition}{bisimulation graph}
Let $g,h \in \IG$. A \phrase{bisimulation} between $g$ and $h$ is a
binary relation $R \subseteq \nd (g) \times \nd (h)$, satisfying:
\begin{itemise}
\item
$\rt (g) R \rt (h)$.
\item
If $sRt$ and $(s,a,s') \in \ed (g)$, then there is an edge
$(t,a,t') \in \ed (h)$ such that $s'Rt'$.
\item
If $sRt$ and $(t,a,t') \in \ed (h)$, then there is an edge
$(s,a,s') \in \ed (g)$ such that $s'Rt'$.
\end{itemise}
\end{definition}
This definition is illustrated in \fig{a bisimulation}. Solid
lines indicates what is assumed, dashed lines what is required. It
follows easily that $g \bis{} h$ iff there exists a bisimulation
between $g$ and $h$.
\begin{figure}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-3,5)
LC2: circle at (-3,4)
LC3: circle at (-3,3)
LC4: circle at (-3,2)
LC5: circle at (-3,1)
box invis "$a$" at (-3.2,3.5)
box invis "$a$" at (-3.2,1.5)
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad dashed
arc from (-1,0.5) to LC1.se radius 8
arc from LC1.sw to (-5,0.5) radius 8
RC1: circle at (3,5)
RC2: circle at (3,4)
RC3: circle at (3,3)
RC4: circle at (3,2)
RC5: circle at (3,1)
box invis "$a$" at (2.8,3.5)
box invis "$a$" at (2.8,1.5)
arrow from RC2 to RC3 chop circlerad chop circlerad dashed
arrow from RC4 to RC5 chop circlerad chop circlerad
arc cw from (1,0.5) to RC1.sw radius 8
arc cw from RC1.se to (5,0.5) radius 8
line from LC1 to RC1 chop circlerad chop circlerad dashed
line from LC2 to RC2 chop circlerad chop circlerad
line from LC3 to RC3 chop circlerad chop circlerad dashed
line from LC4 to RC4 chop circlerad chop circlerad
line from LC5 to RC5 chop circlerad chop circlerad dashed
.PE
\centerline{\raise 1em\box\graph}
\caption{A bisimulation\hlabel{a bisimulation}}
\end{figure}\\
For process graphs with multiple roots, the first requirement of
\df{bisimulation graph} generalizes to
\begin{itemise}
\item
$\forall s \in \mbox{\sc roots} (g) \, \exists t \in \mbox{\sc roots}(h):
s R t$.
\item
$\forall t \in \mbox{\sc roots} (h) \, \exists s \in \mbox{\sc roots}(g):
s R t$.
\end{itemise}
\paragraph{\Purple{Classification}}
%\begin{proposition}{RSvsB}
$\it 2S \prec B$.
\\
\pf
``$\it 2S \preceq B$'' follows since $\fL_{\it 2S}$ is a sublanguage of
$\fL_B$.\\
``$\it 2S \not\succeq B$'' follows from \ctr{2SvsB},
which shows two graphs that are 2-nested simulation equivalent, but
not bisimulation equivalent. Concerning the first claim, as in
\ctr{RSvs2S} there exists exactly one simulation of
{\it left} by {\it right}, which relates the red (or shaded) node on
the {\it left} to the red (or shaded) node on the {\it right}. Unlike in
\ctr{RSvs2S}, this simulation is 2-nested, for the two
subgraphs originating from the two red (or shaded) nodes are simulation
equivalent, as are the graphs {\it left} and {\it right} themselves.
Likewise, the simulation mapping {\it right} on the right-hand side of
{\it left} is also 2-nested. The second claim follows since
$a \neg b \neg c\top \in \it B({left})-B({right})$.
\hfill $\Box$%
%\end{proposition}
\begin{counterexample}
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,3)
LC2: circle at (-5,2)
token(-5,2)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC4: circle at (-3,2)
LC5: circle at (-4,1)
LC7: circle at (-4,0)
LC8: circle at (-2,1)
LB1: box invis "$a$" at (-4.65,2.65)
LB2: box invis "$a$" at (-3.35,2.65)
LB3: box invis "$b$" at (-5.2,1.5)
LB4: box invis "$b$" at (-3.65,1.65)
LB4: box invis "$b$" at (-2.35,1.65)
LB3: box invis "$c$" at (-5.2,.5)
LB3: box invis "$c$" at (-4.2,.5)
LB: box invis "$abc+a(bc+b)$" at (-3.5,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC7 chop circlerad chop circlerad
arrow from LC4 to LC8 chop circlerad chop circlerad
B3: box invis "$=_{\it 2S}$" at (0,2)
B3: box invis "$\neq_B~$" at (0,1)
RC1: circle at (4,3)
RC2: circle at (4,2)
token(4,2)
RC3: circle at (3,1)
RC5: circle at (3,0)
RC4: circle at (5,1)
RB1: box invis "$a$" at (3.8,2.5)
RB2: box invis "$b$" at (3.35,1.65)
RB3: box invis "$b$" at (4.65,1.65)
RB4: box invis "$c$" at (2.8,.5)
RB: box invis "$a(bc+b)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC3 to RC5 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{2-nested simulation equivalent, but not bisimulation
equivalent\hlabel{2SvsB}}
\end{counterexample}\\
Thus bisimulation equivalence is the finest semantic equivalence
treated so far. The following shows however that on $\IG$ graph
isomorphism is even finer, i.e.\ isomorphic graphs are always bisimilar.
In fact, a graph isomorphism can be seen as a bijective bisimulation.
That not all bisimilar graphs are isomorphic will follow from
\ctr{BvsU}.\pagebreak[3]
\begin{proposition}{isomorphism}
For $g,h \in \IG$, $g \cong h$ iff there exists
a bisimulation $R$ between $g$ and $h$, satisfying
\begin{itemise}
\item
If $sRt$ and $uRv$ then $s=u \Leftrightarrow t=v$. \hfill (*)
\end{itemise}
\pf Suppose $g \cong h$. Let $f: \nd (g) \rightarrow \nd (h)$ be a
graph isomorphism. Define $R \subseteq \nd (g) \times \nd (h)$ by
$sRt$ iff $f(s)=t$. Then it is routine to check that $R$ satisfies
all clauses of \df{bisimulation graph} and (*). Now suppose
$R$ is a bisimulation between $g$ and $h$ satisfying (*). Define $f:
\nd (g) \rightarrow \nd (h)$ by $f(s)=t$ iff $sRt$. Since $g$ is
connected it follows from the definition of a bisimulation that for
each $s$ such a $t$ can be found. Furthermore direction
``$\Rightarrow$'' of (*) implies that $f(s)$ is
uniquely determined. Hence $f$ is well-defined. Now direction
``$\Leftarrow$'' of (*) implies that $f$ is injective.
From the connectedness of $h$ if follows that $f$ is also surjective,
and hence a bijection. Finally, the clauses of \df{bisimulation
graph} imply that $f$ is a graph isomorphism.
\end{proposition}
\begin{corollary}{isomorphism}
If $g \cong h$ then $g$ and $h$ are equivalent according
to all semantic equivalences encountered so far.
\end{corollary}
\paragraph{\Purple{Non-well-founded sets}}
Another characterization of bisimulation semantics can be given by
means of {\sc Aczel}'s universe ${\cal V}$ of non-well-founded sets
\hcite{Ac88}. This universe is an extension of the Von Neumann
universe of well-founded sets, where the axiom of foundation (every
chain $x_0 \ni x_1 \ni \cdots$ terminates) is replaced by an
\phrase{anti-foundation axiom}.
\begin{definition}{non-well-founded} Let ${\cal B}$ denote the unique
function ${\cal M} : \IP \rightarrow {\cal V}$ satisfying
$${\cal M} (p)=\{\rec{a, {\cal M} (q)} \mid p \goto{a} q\}$$ for all $p
\in \IP$.
Two processes $p$ and $q$ are \phrase{branching equivalent} (my
terminology) if ${\cal B}(p)={\cal B}(q)$.
\end{definition}
It follows from Aczel's anti-foundation axiom that such a function
exists. In fact the axiom amounts to saying that systems of
equations like the one above have unique solutions. In \hcite{Ac88}
there is also a section on communicating systems. There two processes
are identified iff they are branching equivalent.
A similar idea underlies the semantics of
{\sc De Bakker \& Zucker} \hcite{BZ82},
but there the domain of processes is a complete metric space and the
definition of ${\cal B}$ above only works for finitely branching processes,
and only if $=$ is interpreted as \phrase{isometry}, rather then equality,
in order to stay in well-founded set theory. For finitely branching
processes the semantics of De Bakker and Zucker coincides with the one
of Aczel and also with bisimulation semantics. This is observed in
{\sc Van Glabbeek \& Rutten} \hcite{vGR89},
where also a proof can be found of the next proposition, saying that
bisimulation equivalence coincides with branching equivalence.
\begin{proposition}{AFA bisimulation}
Let $p,q \in \IP$. Then $p \bis{} q ~ \Leftrightarrow ~
{\cal B}(p)={\cal B}(q)$.
\\\pf
``$\Leftarrow$'': Let $B$ be the relation defined by $pBq$ iff
${\cal B}(p)={\cal B}(q)$; then it suffices to prove that $B$ is a
bisimulation. Suppose $pBq$ and $p \goto{a} p '$. Then
$\rec{a,{\cal B}(p')} \in {\cal B}(p)={\cal B}(q)$. So
by the definition of ${\cal B}(q)$ there must be a process $q '$ with
${\cal B}(p')={\cal B}(q')$ and $q \goto{a} q'$. Hence $p ' B q '$,
which had to be proved. The second requirement for $B$ being
a bisimulation follows by symmetry.
``$\Rightarrow$'': Let ${\cal B}^*$ denote the unique solution of
${\cal M}^*(p)= \{\rec{a, {\cal M}^* (r')} \mid \exists r:r \bis{} p
\wedge r \goto{a} r'\}$.\linebreak As for $\cal B$ it follows from the
anti-foundation axiom that such a unique solution exists. From the
symmetry and transitivity of $\bis{}$ it follows that
\begin{equation}
p \bis{} q ~~ \Rightarrow ~~ {\cal B}^* (p)={\cal B}^* (q).
\hlabel{*}
\end{equation}
Hence it remains to be proven that ${\cal B}^* =\cal B$. This can be
done by showing that ${\cal B}^*$ satisfies the equations ${\cal M}
(p)= \{\rec{a, {\cal M} (q)} \mid p \goto{a} q\}$, which have $\cal
B$ as unique solution. So it has to be established that ${\cal B}^*
(p)= \{\rec{a,{\cal B}^* (q)} \mid p \goto{a} q\}$. The direction
``$\supseteq$'' follows directly from the reflexivity of $\bis{}$.
For ``$\subseteq$'', suppose $\rec{a,X} \in {\cal B}^* (p)$. Then
$\exists r:r \bis{} p$, $r \goto{a} r '$ and $X={\cal B}^* (r ' )$.
Since $\bis{}$ is a bisimulation, $\exists p ' : p \goto{a} p '$ and
$r ' \bis{} p '$. From (\href{*}{\ref{*}}) it follows that $X={\cal B}^* (r
' )={\cal B}^* (p ' )$. Therefore $\rec{a,X} \in \{\rec{a,{\cal B}^*
(q)} \mid p \goto{a} q\}$, which had to be established.
\end{proposition}
\paragraph{\Purple{Infinite processes}}\textBrown
The following predecessor of bisimulation equivalence was proposed in
{\sc Hennessy \& Milner} \href{HM85}{\cite{HM80-85,HM85}}.
\begin{definition}{obs} Let $p,q \in \IP$. Then:
\begin{itemise}
\item
$p \sim_0 q$ is always true.
\item
$p \sim_{n+1} q$ if for all $a \in Act$:
\begin{itemise}
\item
$p \goto{a} p '$ implies $\exists q ' : ~q \goto{a} q '$
and $p ' \sim_n q '$;
\item
$q \goto{a} q '$ implies $\exists p ' : ~p \goto{a} p '$
and $p ' \sim_n q '$.
\end{itemise}
\item
$p$ and $q$ are \hindex{observational equivalence}{\em observationally
equivalent}, notation $p \sim q$, if $p \sim_n q$ for every $n
\in \IN$.
\end{itemise}
\end{definition}
Hennessy and Milner provided the following modal characterization of
observational equivalence on image finite processes.
\begin{definition}{HML}
The set $\fL_{\rm HM}$ of \phrase{Hennessy-Milner formulas} over
$Act$ is defined recursively by:
\begin{itemise}
\item
$\top \in \fL_{\rm HM}$.
\item
If $\phi , \psi \in \fL_{\rm HM}$ then $\phi \wedge \psi \in {\fL}_{\rm HM}$.
\item
If $\phi \in \fL_{\rm HM}$ and $a \in Act$ then $a \phi \in \fL_{\rm HM}$.
\item
If $\phi \in \fL_{\rm HM}$ then $\neg \phi \in \fL_{\rm HM}$.
\end{itemise}
The \phrase{satisfaction relation} $\models \; \subseteq \IP \times
\fL_{\rm HM}$ is defined recursively by:
\begin{itemise}
\item
$p \models \top$ for all $p \in \IP$.
\item
$p \models \phi \wedge \psi$ if $p \models \phi$ and $p \models \psi$.
\item
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q$ and
$q \models \phi$.
\item
$p \models \neg \phi$ if $p \not\models \phi$.
\end{itemise}
The modal logic above is now known as the {\em Hennessy-Milner logic}
({\em HML})\hindex{Hennessy-Milner logic (HML)}.
Let ${\it HM}(p)$ denote the set of all Hennessy-Milner formulas that are
satisfied by the process $p$:
${\it HM}(p)=\{\phi \in \fL_{\rm HM} \mid p \models \phi \}$.
Two processes $p$ and $q$ are \phrase{HML-equivalent}, notation $p
=_B^- q$, if ${\it HM}(p) = {\it HM} (q)$.
\end{definition}
Theorem 2.2 in {\sc Hennessy \& Milner}
\href{HM85}{\cite{HM80-85,HM85}} says that
$\sim$ and $=_B^-$ coincide for image finite processes.
This result will be strengthened by \pr{HML}.
Below I provide a modal characterization of $\sim$ that is valid for
arbitrary processes.
\begin{definition}{modal omega bisimulation}
Let $\fL_B^\omega = \bigcup_{n=0}^\infty \fL_B^n$, where
$\fL_B^n$ is given by:
\begin{itemise}
\item
If $I$ is a set and $\phi_i \in \fL_B^n$ for $i \in I$ then
$\bigwedge_{i\in I}\phi_i \in \fL_B^n$.
\item
If $\phi \in \fL_B^n$ and $a \in Act$ then $a \phi \in \fL_B^{n+1}$.
\item
If $\phi \in \fL_B^n$ then $\neg \phi \in \fL_B^n$.
\end{itemise}
Let $B^\omega(p)=\{\phi \in \fL_B^\omega \mid p \models \phi \}$
and write $p =_B^\omega q$ if $B^\omega(p) = B^\omega(q)$.
\end{definition}
\begin{proposition}{modal omega bisimulation}
$p \sim_n q \Leftrightarrow \forall \phi \in \fL_B^n (p\models
\phi \Leftrightarrow q\models \phi)$ for all $n \in \IN$.
Hence $p \sim q \Leftrightarrow p =_B^\omega q$.
\\\pf
{\it Induction Base:} Formulas in $\fL_B^0$ do not contain the
construct $a\phi$. Hence for such formulas $\psi$ the statement $p
\models \psi$ is independent of $p$. Thus $\forall p,q \in \IP:
\forall \phi \in \fL_B^0 (p\models \phi \Leftrightarrow q\models \phi)$.
\noindent
{\it Induction Step:} Suppose $p \sim_{n+1} q$.
I now use structural induction on $\phi$.
\begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item Let $p \models a\varphi$ with $a\phi \in \fL_B^{n+1}$. Then
there is a $p' \in \IP$ with $p\goto{a} p'$ and $p' \models \varphi
\in \fL_B^n$. As $p \sim_{n+1} q$, there must be a $q'\in \IP$ with
$q\goto{a}q'$ and $p' \sim_{n} q'$. So by induction $q' \models \varphi$,
and hence $q \models a\varphi$. \\ By symmetry one also obtains $q
\models a\varphi \Rightarrow p \models a\varphi$.
\item $p \models \bigwedge_{i\in I}\varphi_i \Leftrightarrow
\forall i\!\in\!I(p \models \varphi_i)\stackrel{\rm ind.}{\Longleftrightarrow}
\forall i\!\in\!I(q \models \varphi_i) \Leftrightarrow q \models
\bigwedge_{i\in I}\varphi_i$.
\item $p \models \neg \varphi \Leftrightarrow p \not\models \varphi
\stackrel{\rm ind.}{\Longleftrightarrow}
q \not\models \varphi \Leftrightarrow q \models \neg \varphi$.
\end{list}
Now suppose $\forall \phi \in \fL_B^{n+1} (p\models \phi
\Leftrightarrow q\models \phi)$ and $p \goto{a} p'$. Considering the
symmetry in the definitions involved, all I have to show is that
$\exists q'\in \IP$ with $q\goto{a}q'$ and $p' \sim_n q'$. Let $Q'$
be $$\{q' \in \IP \mid q\goto{a} q' \wedge p' \not\sim_n q'\}.$$ By
\df{LTS} $Q'$ is a set. For every $q' \in Q'$ there must, by
induction, be a formula $\varphi_{q'} \in \fL_B^n$ with $p'
\models \phi_{q'}$ but $q' \not\models \phi_{q'}$ (use negation if
necessary). Now $p \models a\bigwedge_{q'\in Q'} \varphi_{q'} \in
\fL_B^{n+1}$ and therefore $q \models a\bigwedge_{q'\in Q'}
\varphi_{q'}$. So there must be a $q'\in \IP$ with $q\goto{a}q'$ and
$q'\not\in Q'$, which had to be shown.
\end{proposition}
Comparing their modal characterizations ($=_B$ of $\bis{}$ and
$=_B^\omega$ of $\sim$) one finds
$$p \bis{} q ~~\Rightarrow~~ p \sim q ~~\Rightarrow~~ p =_B^- q.$$
Theorem 2.1 in {\sc Hennessy \& Milner} \href{HM85}{\cite{HM80-85,HM85}} says,
essentially, that for image finite processes the relation $\sim$
satisfies the defining properties of a bisimulation (cf.\
\href{bisimulation}{Definition \ref{bisimulation}}). Inspired by this
insight, {\sc Park} \hcite{Pa81} proposed the concise formulation of
bisimulation equivalence employed in \href{bisimulation}{Definition
\ref{bisimulation}}. It follows immediately that if $p,q \in \IP$ are
image finite, then $p \bis{} q \Leftrightarrow p \sim q$. The
following strengthening of this result is due to {\sc Hollenberg}
\hcite{Ho95}.
\begin{propositioni}{HML}
Let $p,q \in \IP$ and $p$ is image finite.
Then $p \bis{} q \Leftrightarrow p =_B^- q$.
\\
\newcommand{\HML}{{\it HM}}
\newcommand{\eqHML}{B}
\pf
Write $p \eqHML q$ iff $p =_B^- q$ and $p$ is image finite.
It suffices to establish that $\eqHML$ is a bisimulation.
\begin{list}{{\bf --}}{\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item
Suppose $p \eqHML q$ and $q \goto{a} q'$. I have to show that
$\exists r\in \IP$ with $p\goto{a}r$ and $\HML(r)=\HML(q')$. Let $R$
be $$\{r \in \IP \mid p\goto{a} r \wedge \HML(r)\neq \HML(q')\}.$$
As $p$ is image finite, $R$ is finite. For every $r \in R$ take a
formula $\varphi_{r} \in \HML(q')- \HML(r)$ (note that if $\psi \in
\HML(r)-\HML(q')$ then $\neg\psi \in \HML(q')-\HML(r)$). Now
$$a\bigwedge_{r\in R} \varphi_{r} \in \HML(q) = \HML(p),$$ so
there must be a $r\in \IP$ with $p\goto{a}r$ and $r \models
\bigwedge_{r\in R} \varphi_{r}$. The latter implies $r\not\in R$,
i.e.\ $\HML(r) = \HML(q')$, which had to be shown.
\item
Suppose $p \eqHML q$ and $p \goto{a} p'$. I have to show that
$\exists q'\in \IP$ with $q\goto{a}q'$ and $\HML(p')=\HML(q')$. Let
$S$ be $$\{s \in \IP \mid p\goto{a} s \wedge \HML(s)\neq\HML(p')\}.$$
As $p$ is image finite, $S$ is finite. For every $s \in S$ take a
formula $\varphi_{s} \in \HML(p')-\HML(s)$. Now $$a\bigwedge_{s\in S}
\varphi_{s} \in \HML(p) = \HML(q),$$ so there must be a $q'\in
\IP$ with $q\goto{a}q'$ and $q'\models \bigwedge_{s\in S}
\varphi_{s}$. By the previous item in this proof, $\exists r\in \IP$
with $p\goto{a}r$ and $\HML(r) = \HML(q')$, hence $r \models
\bigwedge_{s\in S} \varphi_{s}$. The latter implies $r \not\in S$, so
$\HML(r)=\HML(p')$. Thus $\HML(p')=\HML(q')$, which had to be shown.
\hfill $\Box$
\end{list}
\end{propositioni}
By \ctr{infinitary RS}, a result like the one above
does not hold for (ready) simulation semantics.
For the sake of completeness, two more variants of bisimulation
equivalence can be considered. Let $\it FB^*$ be characterized by the
Hennessy-Milner logic augmented with formulas $\widetilde X$, and $\it RB^*$
by the Hennessy-Milner logic augmented with formulas $X$, for $X
\subseteq Act$.
Then $\it B^- \prec FB^* \prec RB^* \prec B^\omega \prec B$, and
for image finite processes all five equivalences coincide.
The strictness of these inclusions is given by Counterexamples
\ref{finite}, \ref{FSvsRS}, \ref{omega} and \ref{infinitary}:
\begin{proposition}{finite}
$CT \not\preceq B^-$, and hence $\it FB^* \not\preceq B^-$.
\\\pf
\ctr{finite} shows two processes with $CT({\it left})
\neq CT({\it right})$. It remains to be shown that ${\it HM}({\it left}) =
{\it HM}({\it right})$, i.e.\ that for all $\phi \in \fL_{\rm HM}$:
${\it left} \models \phi \Leftrightarrow {\it right} \models \phi$.
Using \df{HML} it is sufficient to restrict attention to formulas
$\phi$ which are of the form $a(\bigwedge_{i \in I} b_i \top \wedge
\bigwedge_{j \in J} \neg b_j \top)$ with $I$ and $J$ finite sets of
indices. It is not difficult to see that each such formula that is
satisfied on one side is also satisfied on the other side.
\end{proposition}
\begin{proposition}{FSvsRS}
$R \not\preceq \it FB^*$, and hence $\it RB^* \not\preceq FB^*$.
\\\pf
\ctr{FSvsRS}, shows two processes with $R({\it with})
\neq R({\it without})$. It remains to be shown that ${\it FB}^*({\it
with}) = {\it FB}^*({\it without})$. The argument is the same as in
the previous proof, but this time focusing on formulas of the form
$a(\widetilde{X} \wedge \bigwedge_{i \in I} i\top \wedge \bigwedge_{j
\in J} \neg j\top)$ with $I$ and $J$ finite sets of numbers and $X$ a
possibly infinite set of numbers (= actions).
\end{proposition}
\begin{proposition}{omega}
$S^\omega \not\preceq RB^*$, and hence $RB^\omega \not\preceq RB^*$.
\\\pf
\ctr{omega} shows two processes with $S^\omega({\it
with}) \neq S^\omega({\it without})$. It remains to be shown that
$RB^*({\it with}) = RB^*({\it without})$. The argument is the same as
in the previous proofs---this time using formulas
$a(\{b\} \wedge \bigwedge_{i \in I} bi\top \wedge \bigwedge_{j
\in J} \neg bj\top)$ with $I$ and $J$ finite sets of numbers.
\end{proposition}
\begin{proposition}{infinitary}
$T^\infty \not\preceq B^\omega$, and hence $B \not\preceq B^\omega$.
In addition, ${\it PF} \not\preceq B^\omega$.
\\\pf
\ctr{infinitary} shows two processes with $T^\infty({\it
left}) \neq T^\infty({\it right})$. As remarked at the
\href{df-infinitary possible futures}{end of Section \ref{readiness}},
also $\it PF({\it left}) \neq PF({\it right})$. It
remains to be shown that ${\it left} =_B^\omega {\it right}$, i.e.\
that for all $n \in \IN$: ${\it left} \sim_n {\it right}$. In order
to establish $p \sim_n q$ for two trees $p$ and $q$, the parts of
$p$ and $q$ that are further than $n$ edges away from the root play no
r\^ole, and can just as well be omitted. As the cut versions of $\it
left$ and $\it right$ are isomorphic, by \cor{isomorphism} surely
${\it left} \sim_n {\it right}$.
\end{proposition}
In addition one has $\it 2S^- \prec B^-$, $\it F2S^* \prec FB^*$, $\it
R2S^* \prec RB^*$, $\it 2S^\omega \prec B^\omega$ and $\it 2S \prec B$.
\textBlack
\section{Tree semantics}\hlabel{tree}
\begin{trivlist}
\item[]\Purple{{\bf Definition \ref{tree}}}~
Let $g \in \IG$. The \phrase{unfolding} of $g$ is the graph $U(g) \in
\IG$ defined by
\begin{itemise}
\item $\nd(U(g))=\pd(g)$,
\item $\rt(U(g))=\rt(g)$, i.e.\ the empty path, starting and ending at
the root of $g$,
\item $(\pi,a,\pi') \in \ed(U(g))$ iff
% $\pi' = \pi(end(\pi),a,end(\pi'))end(\pi')$, i.e.\
$\pi'$ extends $\pi$ by one edge, which is labelled $a$.
\end{itemise}
Two processes $p$ and $q$ are \phrase{tree equivalent}, notation $p
=_U q$, if their unfoldings are isomorphic, i.e.\ if $U(G(p)) \cong U(G(p))$.
In \phrase{tree semantics} ($U$) two processes are identified iff they
are tree equivalent.
\end{trivlist}
It is easy to see that the unfolding of any process graph is a tree,
and the unfolding of a tree is isomorphic to itself. It follows that
up to isomorphism every tree equivalence class of process graphs
contains exactly one tree, which can be obtained from an arbitrary
member of the class by means of unfolding.
\begin{proposition}{unfolding}
Let $g \in \IG$. Then $U(g) \bis{} g$.
Hence $g =_U h \Rightarrow g \bis{} h$.
\\\pf
As is easily verified, $\{(\pi,end(\pi)) \mid \pi \in
\pd(g)\}$ is a bisimulation between $U(g)$ and $g$.
\end{proposition}
Tree semantics is employed in {\sc Winskel} \hcite{Wi84b}. No
plausible testing scenario or modal characterization is known for it.
\pr{unfolding} shows that $B\! \preceq U$. That $B \not\succeq U$
follows from Counterexample \ref{BvsU}.
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
LC2: circle at (-5,1)
LC4: circle at (-3,1)
LB1: box invis "$a$" at (-4.65,1.65)
LB2: box invis "$a$" at (-3.35,1.65)
LB: box invis "$a+a$" at (-4,0)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
B3: box invis "$=_B$" at (0,1.7)
B3: box invis "$\neq_{U}$" at (0,1)
RC1: circle at (4,2)
RC2: circle at (4,1)
RB1: box invis "$a$" at (3.8,1.5)
RB3: box invis at (5,0)
RB: box invis "$a$" at (4,0)
arrow from RC1 to RC2 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Bisimulation equivalent, but not tree equivalent\hlabel{BvsU}}
\end{counterexample}
Although above tree equivalence is defined entirely in terms of action
relations, such a definition is in fact misleading, as action
relations abstract from an aspect of system behaviour that tree
semantics tries to capture. The problem can best be explained by
considering the process
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
circle at (0,0)
circle at (1,0)
arc "$a$" from 2nd last circle.se to last circle.sw
arc "$a$" from last circle.nw to 2nd last circle.ne
arrow from last circle.nw+(-.1,.07) to last circle.nw
arrow from last circle.sw+(-.1,-.07) to last circle.sw
.PE
{\raise 1.5em\box\graph}
that can proceed from its initial to its final state by performing one
of two different $a$-transitions. In tree semantics, such a process
should be considered equivalent to the leftmost process of
Counterexample \ref{BvsU}, and hence different from the rightmost one.
However, action relations only tell whether a process $p$ can evolve
into $q$ by performing an $a$-action; they do not tell in how many
ways this can happen. So in labelled transition systems as defined in
this paper the mentioned process is represented as
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
circle at (0,0)
circle at (1,0)
arrow "$a$" "" from 2nd last circle to last circle chop
.PE
{\raise .7em\box\graph}
and hence considered tree equivalent to the rightmost process of
Counterexample \ref{BvsU}. The mishap that ensues this way will be
illustrated in \sect{axiomatizations}.
Tree semantics on labelled transitions systems as in \sect{LTS}
is a sensible notion only if one knows that each transition in the
system can be taken in only one way. In general, more satisfactory
domains for defining tree equivalence are labelled transition systems
in which the transitions $(p,a,q)$ are equipped with a multiplicity,
telling in how many different ways this transition can be taken, or
process graphs $g=(\nd(g),\rt(g),\ed(g), \it begin, end, label)$ in
which $\nd(g)$ and $\ed(g)$ are sets, $\rt(g) \in \nd(g)$, ${\it
begin,~end}: \ed(g) \rightarrow \nd(g)$ and $\it label:\ed(g)
\rightarrow Act$. The functions {\it begin}, {\it end} and {\it label}
associate with every edge a triple $(s,a,t) \in \nd(g) \times Act
\times \nd(g)$, but contrary to the situation in \df{process graph}
the identity of an edge is not completely determined by such a triple.
On such process graphs, the notions $\pd$, unfolding and tree
equivalence are defined exactly as for the process graphs of
\df{process graph}.
\section{Possible worlds semantics}\hlabel{possible worlds}
In {\sc Veglioni \& De Nicola} \hcite{VD98}, a nondeterministic process
is viewed as a set of deterministic ones: its {\em possible
worlds}\hindex{possible world}.
Two processes are said to be \phrase{possible worlds equivalent} iff
they have the same possible worlds. Two different approaches by which a
nondeterministic process can be resolved into a set of deterministic
ones need to be distinguished; I call them the {\em state-based} and
the {\em path-based} approach. In the state-based approach a
deterministic process $h$ is obtained out of a nondeterministic process
$g \in \IG$ by choosing, for every state $s$ of $g$ and every action
$a \in I(s)$ a single edge $s \goto{a} s'$. Now $h$ is the reachable
part of the subgraph of $g$ consisting of the chosen edges.
In the path-based approach on the other hand, one chooses for every
path $\pi \in \pd(g)$ and every action $a \in I(end(\pi))$ a single
edge $end(\pi) \goto{a} s'$ to continue with. The chosen edges may now be
different for different paths ending in the same state. The difference
between the two approaches is illustrated in Counterexample
\ref{state-based}.
\begin{counterexample}[htb]
\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
circle at (-7,2)
circle at last circle +(0,-1)
"$a$" at last circle +(.2,.6)
arrow from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$b$" at last circle +(.2,.6)
arrow from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$c$" at last circle +(.2,.6)
arrow from 2nd last circle to last circle chop
circle at 3rd last circle +(-1,0)
"$b$" at last circle +(.6,.15)
arrow from 4th last circle to last circle chop
circle at last circle +(-1,0)
"$b$" at last circle +(.6,.15)
arrow from 2nd last circle to last circle chop
circle at last circle +(-1,0)
"$b$" at last circle +(.6,.15)
arrow dashed from 2nd last circle to last circle chop
box invis "$=_{\it PW}^{\rm state}$" at (-5,1.5)
box invis "$\neq_{\it PW}^{\rm path}$" at (-5,.7)
box invis "$\neq_{T}~~$" at (-5,-.1)
circle at (-2.5,2)
circle at last circle +(0,-1)
"$a$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
arrow from last circle+(.2,.15) to last circle.ne
box invis "$b$" at last circle +(.2,-.5)
circle at last circle+(.4,-.3) rad .5
circle at 2nd last circle fill 0
circle at 3rd last circle +(-.8,-.6)
arrow "$b$" "" from 4rd last circle to last circle chop
circle at last circle +(0,-1)
"$c$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
box invis "$\neq_{\it PW}^{\rm state}$" at (0,1.5)
box invis "$=_{\it PW}^{\rm path}$" at (0,.7)
box invis "$=_{U}~~$" at (0,-.1)
circle at (2,2)
circle at last circle +(0,-1)
"$a$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$b$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$c$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
circle at 3rd last circle +(1,0)
"$b$" at last circle +(-.6,.15)
arrow from 4th last circle to last circle chop
circle at last circle +(0,-1)
"$b$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$c$" at last circle +(-.2,.6)
arrow from 2nd last circle to last circle chop
circle at 3rd last circle +(1,0)
"$b$" at last circle +(-.6,.15)
arrow from 4th last circle to last circle chop
circle at last circle +(0,-1)
"$b$" at last circle +(-.2,.6)
arrow dashed from 2nd last circle to last circle chop
circle at last circle +(0,-1)
"$c$" at last circle +(-.2,.6)
arrow dashed from 2nd last circle to last circle chop
circle at 3rd last circle +(1,0)
"$b$" at last circle +(-.6,.15)
arrow dashed from 4th last circle to last circle chop
.PE
\centerline{\raise 1em\box\graph}}
\caption{State-based versus path-based possible worlds equivalence
\hlabel{state-based}}
\end{counterexample}
In the state-based approach, the process in the middle has two
possible worlds, depending on which of the two $b$-edges is chosen.
These worlds are essentially $abc$ and $ab^\infty$. In the path-based
approach, the process in the middle has countably many possible
worlds, namely $ab^n c$ for $n\geq 1$ and $ab^\infty$.
In \hcite{VD98}, Veglioni \& De Nicola take the state-based approach:
``once we have resolved the underspecification present in a state $s$
by saying, for example, $s \goto{a} s$, then, we cannot choose $s
\goto{a} 0$ in the same possible world.'' However, they provide a
denotational characterization of possible worlds semantics on finite
processes, namely by inductively allocating sets of deterministic
trees to BCCSP expressions (cf.\ \sect{axiomatizations}), which
can be regarded as path-based. In addition, they give an operational
characterization of possible world semantics, essentially following
the state-based approach outlined above. They claim that both
characterizations agree. This, however, is not the case, as
\ctr{state-based finite} reveals a difference between
the two approaches even on finite processes.
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
circle rad .5 at (4,2.5)
RC0: circle at (4,3) fill 0
RC1: circle at (4,2) fill 0
arrow from last circle +(.15,.04) to last circle.e
arrow from last circle +(-.15,.04) to last circle.w
RC2: circle at (3,1)
RC3: circle at (5,1)
RC4: circle at (3,0)
RC5: circle at (5,0)
"$a$" at (3.3,2.5)
"$b$" at (4.7,2.5)
"$c$" at (3.35,1.65)
"$c$" at (4.65,1.65)
"$d$" at (3.2,.6)
"$e$" at (4.8,.6)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC1 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC3 to RC5 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{State-based versus path-based possible worlds equivalence for
finite processes
\hlabel{state-based finite}}
\end{counterexample}
In the path-based approach the process displayed has a possible world
$acd+bce$ (i.e.\ a process with branches $acd$ and $bce$), which it
has not in the state-based approach. As it turns out, the complete
axiomatization they provide w.r.t.\ BCCSP is correct for the
path-based, denotational characterization, but is unsound for the
state-based, operational characterization. To be precise: their
operational semantics fails to be compositional w.r.t.\ BCCSP.
\mbox{}\ctr{state-based} shows that a suitable formulation%
\footnote{Let two processes be \phrase{possible worlds equivalent} iff
each possible world of the one is $\sim$-equivalent to a possible
world of the other, where $\sim$ is any of the equivalences treated in
this paper. \thm{determinism} will imply that the choice of $\sim$ is
immaterial.}
of the state-based approach to possible worlds semantics is
incomparable with any of the semantics encountered so far.
The processes {\it left} and {\it middle} are state-based possible
worlds equivalent, yet $abbc \in T({\it middle}) - T({\it left})$.
Furthermore, the processes {\it right} and {\it middle} are
tree equivalent, yet in the state-based approach one has $abbc
\in \it PW(right) - PW(middle)$.
Below I propose a formalization of the path-based approach to possible
worlds semantics that, on finite processes, agrees with the
denotational characterization of \hcite{VD98}.
\vspace{-1ex}
\paragraph{\Purple{Definition \ref{possible worlds}}}
A process $p$ is a \phrase{possible world} of a process $q$ if $p$ is
deterministic and $p \sqsubseteq_{RS} q$. Let ${\it PW}(q)$ denote the
class of possible worlds of $q$. Two processes $q$ and $r$ are
\phrase{possible worlds equivalent}, notation $q =_{\it PW} r$, if
$\it PW(q)=PW(r)$. In \phrase{possible worlds semantics} ($\it PW$)
two processes are identified iff they are possible worlds equivalent.
Write $q \sqsubseteq_{\it PW} r$ iff $\it PW(q) \subseteq PW(r)$.
\\[10pt]
It can be argued that the philosophy underlying possible worlds
semantics is incompatible with the view on labelled transition systems
taken in this paper. The informal explanation of the action relations
in \sect{LTS} implies for instance that the right-hand process
graph of \ctr{SvsRT} has a state in which $a$ has
happened already and both $bc$ and $bd$ are possible continuations. In
the possible worlds philosophy on the other hand, this process graph
is just a compact representation of the set of deterministic processes
$\{abc,~abd\}$. None of the two processes in this set has such a state.
This could be a reason not to treat possible worlds semantics on the
same footing as the other semantics of this paper. However, one can
give up on thinking of non-deterministic processes as sets of
deterministic ones, and justify possible worlds semantics---at least
the path-based version of Definition \ref{possible worlds}---by an
appropriate testing scenario. This makes it fit in the present paper.
\vspace{-1ex}
\paragraph{\Purple{Testing scenario}}
A testing scenario for possible worlds semantics can be obtained by
making one change in the reactive testing scenario of failure
simulation semantics. Namely in each state only as many copies of the
process can be made as there are actions in $Act$, and, for $a \in
Act$, the first test on copy $p_a$ of $p$ is pressing the
$a$-button. If it goes down, one goes on testing that copy, but is has
already changed its state; if it does not go down, the test on $p_a$ ends.
\vspace{-1ex}
\paragraph{\Purple{Modal characterization}}
On well-founded processes, a modal characterization of possible worlds
semantics can be obtained out of the modal characterization of ready
simulation semantics by changing the modality $\bigwedge_{i \in I}
\phi_i$ into $\bigwedge_{a \in X} a\phi_a$ with $X \subseteq Act$.
Possible worlds of a well-founded process $p$ can be simply
encoded as modal formulas in the resulting language.
Probably, this modal characterization applies to image finite
processes as well. For processes that are neither well-founded nor
image finite this characterization is not exact, as it fails to
distinguish the two processes of \ctr{infinitary}.
\vspace{-1ex}
\paragraph{\Purple{Classification}}
$RT \prec \it PW \prec RS$. $\it PW$ is independent of $S$, $CS$ and $PF$.
\\
\pf
``$\it PW \preceq RS$''\footnote{The counterexample against ``$\it PW
\preceq RS$'' given in \hcite{VD98} is incorrect. The two processes
displayed there are not ready simulation equivalent.} follows by the
transitivity of $\sqsubseteq_{RS}$.\\
``$RT \preceq \it PW$'' holds as $\sigma$ is a ready trace of
$p \in \IP$ iff it is a ready trace of a possible world of $p$.\\
``$S \not\preceq \it PW$'' (and hence ``$RS \not\preceq \it PW$'')
follows from \ctr{SvsRT}. There $\it S(left)\neq
S(right)$, but $\it PW(left)=PW(right)=\{abc,abd\}$.\\
``$\it PF \not\preceq PW$'' follows since $\it PF \not\preceq RS$.\\
``$CS \not\succeq \it PW$'' follows since $CS \not\succeq RT$,
and ``$PF \not\succeq \it PW$'' since $\it PF \not\succeq RT$.\\
Finally, ``$RT \not\succeq \it PW$'' follows from Counterexample
\ref{RTvsPW}, taken from \hcite{VD98}. There the first process denotes
two possible worlds, whereas the second one denotes four.
\hfill $\Box$
\begin{counterexample}[ht]\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,3)
LC2: circle at (-5,2)
circle at last circle +(-1,-1)
arrow from 2nd last circle to last circle chop
"$b$" at (-5.65,1.65)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop
"$d$" at (-6.2,.5)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC4: circle at (-3,2)
circle at last circle +(1,-1)
arrow from 2nd last circle to last circle chop
"$b$" at (-2.35,1.65)
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop
"$g$" at (-1.8,.5)
LC5: circle at (-3,1)
LC7: circle at (-3,0)
LB1: box invis "$a$" at (-4.65,2.65)
LB2: box invis "$a$" at (-3.35,2.65)
LB3: box invis "$c$" at (-4.8,1.5)
LB4: box invis "$c$" at (-3.2,1.5)
LB3: box invis "$e$" at (-4.8,.5)
LB4: box invis "$f$" at (-3.2,.5)
circle invis "$a(bd+ce)+a(cf+bg)$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC5 to LC7 chop circlerad chop circlerad
B3: box invis "$=_{RT}$" at (0,2)
B3: box invis "$\neq_{\it PW}$" at (0,1)
RC1: circle at (4,3)
RC2: circle at (4,2)
circle at last circle +(-2,-1)
arrow "$b$" "" from 2nd last circle to last circle chop
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop
"$d$" at (1.8,.5)
circle at 3rd last circle +(2,-1)
arrow "$b$" "" from 4th last circle to last circle chop
circle at last circle +(0,-1)
arrow from 2nd last circle to last circle chop
"$g$" at (6.2,.5)
RC3: circle at (3,1)
RC5: circle at (3,0)
RC4: circle at (5,1)
RC6: circle at (5,0)
RB1: box invis "$a$" at (3.8,2.5)
RB2: box invis "$c$" at (3.65,1.35)
RB3: box invis "$c$" at (4.35,1.35)
RB4: box invis "$e$" at (3.2,.5)
RB5: box invis "$f$" at (4.8,.5)
circle invis "$a(bd+ce+cf+bg)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
arrow from RC3 to RC5 chop circlerad chop circlerad
arrow from RC4 to RC6 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Ready trace equivalent, but not possible worlds
equivalent\hlabel{RTvsPW}}}
\vspace{-12pt}
\end{counterexample}
\paragraph{\Purple{Infinite processes}}\textBrown
The version of possible worlds semantics defined above is the
infinitary one. Note that $RT^\infty \prec \it PW$. Exactly as above
one even establishes $p \sqsubseteq_{\it RS} q \Rightarrow p
\sqsubseteq_{\it PW} q \Rightarrow p \sqsubseteq_{\it RT}^\infty q$,
i.e.\ $RT^\infty \preceq^* \it PW \preceq^* RS$. Finitary versions
could be defined by means of the modal characterization given above. I
will not pursue this here. \textBlack
\section{Summary}\hlabel{summary}
In Sections \ref{trace}--\ref{possible worlds} fifteen semantics were
defined that are different for finitely branching processes. These
are abbreviated by {\it T, CT, $F^1$, F, R, FT, RT, PF, S, CS, RS, PW, 2S,
B} and {\it U}.
For each of these semantics $\fO$, except $U$, a modal language $\fL_\fO$ (a
set of modal formulas $\varphi$) has been defined:
\begin{list}{}{\renewcommand{\makelabel}[1]{#1\hfill}
\leftmargin 30pt \labelwidth\leftmargin\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item [$\fL_{T}$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{T})$
\hfill the {\em (partial)} \phrase{trace formulas}
\item [$\fL_{CT}$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in
\fL_{CT}) \mid 0$
\hfill the \phrase{completed trace formulas}
\item [$\fL_{F^1}$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{F^1})
\mid \widetilde{a}$ ($a \in Act$)
\hfill the \phrase{singleton-failure formulas}
\item [$\fL_F$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{F})
\mid \widetilde{X}$ ($X \subseteq Act$)
\hfill the \phrase{failure formulas}
\item [$\fL_R$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{R})
\mid X$ ($X \subseteq Act$)
\hfill the \phrase{readiness formulas}
\item [$\fL_{FT}$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{FT})
\mid \widetilde{X}\varphi'$ ($X \subseteq Act$,
$\varphi' \in \fL_{FT}$)
\hfill the \phrase{failure trace formulas}
\item [$\fL_{RT}$] $\varphi ::= \top \mid a\varphi'~(\varphi' \in \fL_{RT})
\mid X\varphi'$ ($X \subseteq Act$,
$\varphi' \in \fL_{RT}$)
\hfill the \phrase{ready trace formulas}
\item [$\fL_{\it PF}$] $\varphi::=a\varphi'~(\varphi'\in\fL_{\it PF})
\mid \bigwedge_{i\in I}\varphi_i \wedge \bigwedge_{j\in J}
\neg\varphi'_j~(\varphi_i,~\varphi'_j \in \fL_{T})$
\hfill the \phrase{possible-futures formulas}
\item [$\fL_{S}$] $\varphi ::= a\varphi'~(\varphi' \in \fL_{S})
\mid \bigwedge_{i\in I}\varphi_i~(\varphi_i \in \fL_{S})$
\hfill the \phrase{simulation formulas}
\item [$\fL_{CS}$] $\varphi ::= a\varphi'~(\varphi' \in \fL_{CS})
\mid \bigwedge_{i\in I}\varphi_i~(\varphi_i \in \fL_{CS})
\mid 0$
\hfill the \phrase{completed simulation formulas}
\item [$\fL_{RS}$] $\varphi ::= a\varphi'~(\varphi' \in \fL_{RS})
\mid \bigwedge_{i\in I}\varphi_i~(\varphi_i \in \fL_{RS})
\mid {X}$ ($X \subseteq Act$)
\hfill the \phrase{ready simulation formulas}
\item [$\fL_{\it PW}$] $\bigwedge_{a\in X}a\varphi_a~(\varphi_a \in
\fL_{\it PW},~ X \subseteq Act) \mid {X}$ ($X \subseteq Act$)
\hfill the \phrase{possible worlds formulas}
\item [$\fL_{\it 2S}$] $\varphi ::= a\varphi'~(\varphi' \in \fL_{\it 2S})
\mid \bigwedge_{i\in I}\varphi_i~(\varphi_i \in \fL_{\it 2S})
\mid \neg \varphi' ~(\varphi' \in \fL_{S})$
\hfill the \phrase{2-nested simulation formulas}
\item [$\fL_B$] $\varphi ::= a\varphi'~(\varphi' \in \fL_{B})
\mid \bigwedge_{i\in I}\varphi_i~(\varphi_i \in \fL_{B})
\mid \neg \varphi'~(\varphi' \in \fL_{B})$
\hfill the \phrase{bisimulation formulas}.
\end{list}
All these languages can be regarded as sublanguages of $\fL_B$,
the {\em infinitary Hennessy-Milner logic}, namely by considering the
constructs not in $\fL_B$ as abbreviations:
$$\begin{array}{l@{~~~~}l@{~~~~}l@{~~~~}l}
\top := \bigwedge_{i \in \emptyset}\varphi_i &
\widetilde{X} := \bigwedge_{a \in X} \neg a\top &
\widetilde{X}\varphi' := \widetilde{X} \wedge \varphi' &
0 := \widetilde{Act} \\
\varphi_1 \wedge \varphi_2 := \bigwedge_{i \in \{1,2\}}\varphi_i &
X := \bigwedge_{a \in X} a\top \wedge \bigwedge_{a \not\in X} \neg a \top &
X\varphi' := X \wedge \varphi' &
\widetilde{a} := \neg a\top
\end{array}$$
On any labelled transition system $\IP$, the satisfaction relation
$\models \; \subseteq \IP \times \fL_B$ is given by:\\[5pt]
\mbox{}\hfill
$p \models a \phi$ if for some $q \in \IP$: $p \goto{a} q \wedge
q \models \phi$;
\hfill\hfill
$p \models \bigwedge_{i\in I}\phi_i$ if $\forall i \in I: p \models \phi_i$;
\hfill\hfill
$p \models \neg \phi$ if $p \not\models \phi$.
\hfill\mbox{}\\[5pt]
For each semantics ${\fO} \in\{{\it T,CT,}F^1\!{\it,F,R,FT,RT,PF,S,CS,RS,PW,2S,B}\}$
this definition specializes to the sublanguage $\fL_\fO$.
Now a \phrase{modal characterization} of $\fO$-equivalence\footnote{In
case $\fO=\it PW$ the modal characterization is known to be valid for
well-founded processes only.} is given by:
$$p =_\fO q ~\Leftrightarrow~ \forall \varphi \in \fL_\fO (p \models
\varphi \Leftrightarrow q \models \varphi).$$
In the cases ${\fO} \in \{{\it T,CT,}F^1\!{\it,F,R,FT,RT}\}$
$\fO$-equivalence was defined by $p =_\fO q \Leftrightarrow \fO(p)=\fO(q)$.
Writing $\fO_{\rm modal}(p)$ for $\{\varphi \in \fL_\fO \mid p \models
\varphi\}$, it can be observed that the formulas in $\fO_{\rm
modal}(p)$ are mild syntactic variations of the elements in $\fO(p)$.
Thus, the modal characterization is a rather trivial restatement of
the original definition of the equivalence.
The modal characterization of $\it PF$ is fairly easy to check. This
is left to the reader. In the cases ${\fO} \in \it
\{S,CS,RS,2S,B\}$ the modal characterization of $=_\fO$ has been
proven equivalent to a relational characterization in Propositions
\href{pr-modal simulation}{\ref{pr-modal simulation}},
\href{pr-completed simulation}{\ref{pr-completed simulation}},
\href{pr-ready simulation}{\ref{pr-ready simulation}},
\href{pr-modal 2-nested simulation}{\ref{pr-modal 2-nested simulation}}
and \href{pr-modal bisimulation}{\ref{pr-modal bisimulation}}. It is a
matter of taste which one is taken to be the official definition.
The same applies to the modal characterizations of the
$\fO$-preorders\addtocounter{footnote}{-1}\footnotemark, given by
$$p \sqsubseteq_\fO q ~\Leftrightarrow~ \forall \varphi \in \fL_\fO (p \models
\varphi \Rightarrow q \models \varphi).$$
For each of the semantics ${\it T,CT,}F^1\!{\it,F,R,FT,RT,S,CS,RS,PW,B}$
a testing scenario has been proposed in which the modal formulas
satisfied by a process $p$ are interpreted as the possible observations
that can be made on a suitable machine interacting with $p$. In
particular, the formula $a\varphi$ represents the observation of
``$a$'' appearing in the display of a generative machine (or the
$a$-button going down on a reactive machine) followed by the
observation $\varphi$. The formula $\widetilde{X}$ represents the
display of the generative machine becoming empty, while $X$ is the set
of actions that are allowed to happen by the environment/observer,
i.e.\ the ones whose switches are set ``free''. In particular, $0$
represents the display becoming empty while all actions are free (in
the absence of switches). On a reactive machine, $\widetilde{a}$
represents the $a$-button refusing to go down, and $\widetilde{X}$
means that none of the $a$-buttons for $a \!\in\! X$ go down when they
all receive pressure. $X$ represents the menu lights for the actions in $X$
being lit while the machine is idling. $\top$ represents the act of
the observer terminating his observations, and $\bigwedge_{i\in
I}\varphi_i$ represents the observations that can be made on $|I|$
copies of the investigated process in its current state, obtained by
means of a replication facility. Finally $\neg\varphi$ represents the
observation that $\varphi$ cannot be observed---an observation which
occurs when copies of the investigated process are exposed to all
possible weather conditions, and in none of them the observation
$\varphi$ is made. A testing scenario for a particular semantics is
obtained by allowing machines that are equipped with (only) those
features corresponding with its modal characterization.
I write ${\fO} \preceq {\fN}$ if semantics $\fO$ makes at
least as much identifications as semantics $\fN$, i.e.\ if $=_{\fO} \;\supseteq\; =_{\fN}$. Clearly, if $\fL_\fO$ is a sublanguage
of $\fL_\fN$ it must be that ${\fO} \preceq {\fN}$.
This immediately yields\footnote{The statements involving {\it PW} and
$U$ do not follow this way, but have been established in Sections
\href{tree}{\ref{tree}} and \href{possible worlds}{\ref{possible worlds}}.}
the following theorem, whose proof has also appeared in the various
subsections entitled ``classification''.
\begin{theorem}{spectrum fb}
$T \preceq CT \preceq F \preceq R \preceq RT$,
$~T \preceq F^1 \preceq F \preceq FT \preceq RT \preceq \it PW \preceq
RS \preceq \it 2S \preceq B \preceq U$, $\it R \preceq PF \preceq \it 2S$,
$~T \preceq S \preceq CS \preceq RS$ and $CT \preceq CS$.
\end{theorem}
\thm{spectrum fb} is illustrated in \fig{fig-spectrum}. There,
however, singleton-failures semantics and completed simulation
semantics are missing, since they did not occur in the literature, and
appear to be of minor interest.
The theorem applies to any labelled transition system $( \IP ,
\rightarrow )$. Whether the inclusions are strict depends on the
choice of $( \IP , \rightarrow)$. In the subsections
``classification'' a number of counterexamples have been presented,
showing that on $\IG$ all semantic notions mentioned in \thm{spectrum
fb} are different and ${\fO} \preceq {\fN}$ holds only if this follows
from that theorem. Moreover, all relevant examples use finite
processes only.
Let $\dl{H}$ be the set of finite connected process graphs. Here
\href{df-process graph}{\em finite} is used in the sense of Definition
\ref{df-action relations}; a process graph $g \in \IG$ is finite iff
$\pd (g)$ is finite, which is the case iff $g$ is acyclic and has only
finitely many nodes and edges. Now the next theorem follows.
\begin{theorem}{spectrum finite strict}
Let $\fO, \fN \in {\it \{\!T,CT,}F^1\!{\it,F,R,FT,RT,PF,S,CS,RS,PW,2S,B,U\}}$.
% on $\dl{H}$ from the
% series ${\it T, CT,}F^1\!{\it, F, R, FT, RT, PF, S, CS,}$ {\it RS, 2S, B}.
Then $\fO \not\preceq \fN$, and even \plat{\fO \not\preceq_{\dl{H}}
\fN}, unless $\fO \preceq \fN$ follows from \thm{spectrum fb} (and the
fact that $\preceq$ is a partial order).
\end{theorem}
The following theorem says that the inclusion hierarchy of the
preorders {\it T, CT, $F^1\!$, F, R, FT, RT, PF, S, CS, RS, PW, 2S}
and $B$ is the same as the inclusion hierarchy of the corresponding
equivalences (there is no preorder for $U$).
\begin{theorem}{hierarchy preorders}
Let ${\fO}, {\fN} \!\in\! {\it
\{\!T,CT,}F^1\!{\it,F,R,FT,RT,PF,S,CS,RS,PW,2S,B\}}$.
Then $\fO \!\preceq^*\! \fN$ iff $\fO \!\preceq \fN\!$.
\end{theorem}
\begin{proof}
Clearly, if $\fL_\fO$ is a sublanguage of $\fL_\fN$ it must be that
$p \sqsubseteq_{\fN} q \Rightarrow p \sqsubseteq_{\fO} q$, i.e.\ $\fO
\preceq^* \fN$. This yields ``if'' (except for $\it RT \preceq^* PW
\preceq^* RS$, which have been established in \sect{possible worlds}).
``Only if'' is immediate (cf.\ \sect{equivalences}).
\end{proof}
\begin{figure}[htb]\Black{
.PS
scale = 4
arrowhead = 1
circlerad = .5
B1: circle invis "$T$" at (0,0)
B2: circle invis "\Red{$CT$}" at (0,2)
B3: circle invis "$F$" at (0,4)
B4: circle invis "$R$" at (2,6)
B5: circle invis "$FT$" at (-2,6)
B6: circle invis "$RT$" at (0,8)
B7: circle invis "$RS^*$" at (0,10)
B7f: circle invis "${\it FS}^*$" at (-2,9.5)
B8: circle invis "\Red{${\it R2S}^*$}" at (0,12)
B8f: circle invis "\Red{${\it F2S}^*$}" at (-2,11.5)
B9: circle invis "\Red{$\it RB^*$}" at (0,14)
B9f: circle invis "\Red{$\it FB^*$}" at (-2,13.5)
S1: circle invis "$S^*$" at (-4,4)
S2: circle invis "\Red{$CS^*$}" at (-4,6)
PF: circle invis "$\it PF$" at (4,9.5)
arrow from B2 to B1 chop .4 chop circlerad
arrow from B3 to B2 chop .4 chop circlerad
arrow from B4 to B3 chop .4 chop circlerad
arrow from B5 to B3 chop .4 chop circlerad
arrow from B6 to B4 chop .4 chop circlerad
arrow from B6 to B5 chop .4 chop circlerad
arrow from B7 to B6 chop .4 chop circlerad
arrow from B7f to B5 chop .4 chop circlerad
arrow from B8 to B7 chop .4 chop circlerad
arrow from B8f to B7f chop .4 chop circlerad
arrow from B9 to B8 chop .4 chop circlerad
arrow from B9f to B8f chop .4 chop circlerad
arrow from B7f to S2 chop .4 chop circlerad
arrow from S2 to S1 chop .4 chop circlerad
arrow from S2 to B2 chop .4 chop circlerad
arrow from S1 to B1 chop .4 chop circlerad
arrow from PF to B4 chop .4 chop circlerad
IB1: circle invis "{$T^\infty$}" at (12,3)
IB2: circle invis "\Red{$CT^\infty$}" at (12,5)
IB3: circle invis "{$F^\infty$}" at (12,7)
IB4: circle invis "{$R^\infty$}" at (14,9)
IB5: circle invis "{$FT^\infty$}" at (10,9)
IB6: circle invis "{$RT^\infty$}" at (12,11)
IB7a: circle invis "\Red{\footnotesize\it PW}" at (12,12)
IB7: circle invis "$RS$" at (12,13)
IB8: circle invis "$\it 2S$" at (12,15)
IB9: circle invis "$B$" at (12,17)
IB10: circle invis "\Red{$U$}" at (12,19)
IS1: circle invis "$S$" at (8,7)
IS2: circle invis "\Red{$CS$}" at (8,9)
IPF: circle invis "${\it PF}^\infty$" at (14,12)
arrow from IB2 to IB1 chop .4 chop circlerad
arrow from IB3 to IB2 chop .4 chop circlerad
arrow from IB4 to IB3 chop .4 chop circlerad
arrow from IB5 to IB3 chop .4 chop circlerad
arrow from IB6 to IB4 chop .4 chop circlerad
arrow from IB6 to IB5 chop .4 chop circlerad
arrow from IB7 to IB7a chop .25 chop .25 height .2
arrow from IB7a to IB6 chop .15 chop .35 height .2
arrow from IB8 to IB7 chop .4 chop circlerad
arrow from IB9 to IB8 chop .4 chop circlerad
arrow from IB10 to IB9 chop .4 chop circlerad
arrow from IB7 to IS2 chop .4 chop circlerad
arrow from IS2 to IS1 chop .4 chop circlerad
arrow from IS2 to IB2 chop .4 chop circlerad
arrow from IS1 to IB1 chop .4 chop circlerad
arrow from IB8 to IPF chop .4 chop circlerad
arrow from IPF to IB4 chop .4 chop circlerad
WB7: circle invis "$RS^\omega$" at (6,11.5)
WB8: circle invis "$\it 2S^\omega$" at (6,13.5)
WB9: circle invis "$B^\omega$" at (6,15.5)
WS1: circle invis "$S^\omega$" at (6,6.5)
WS2: circle invis "\Red{$CS^\omega$}" at (6,8.5)
arrow from WB8 to WB7 chop .4 chop circlerad
arrow from WB9 to WB8 chop .4 chop circlerad
arrow from WB7 to WS2 chop .4 chop circlerad
arrow from WS2 to WS1 chop .4 chop circlerad
FB2: circle invis "\Red{$F^1$}" at (-4,1)
FB3: circle invis "\Red{$F^-$}" at (-8,2)
FB4: circle invis "\Red{$R^-$}" at (-6,4)
FB5: circle invis "\Red{$FT^-$}" at (-10,4)
FB6: circle invis "\Red{$RT^-$}" at (-8,6)
FB7: circle invis "\Red{$RS^-$}" at (-8,8)
FB8: circle invis "\Red{$\it 2S^-$}" at (-8,10)
FB9: circle invis "\Red{$B^-$}" at (-8,12)
arrow from FB7 to S1 chop .4 chop circlerad
arrow from FB2 to B1 chop .4 chop circlerad
arrow from FB3 to FB2 chop .4 chop circlerad
arrow from FB4 to FB3 chop .4 chop circlerad
arrow from FB5 to FB3 chop .4 chop circlerad
arrow from FB6 to FB4 chop .4 chop circlerad
arrow from FB6 to FB5 chop .4 chop circlerad
arrow from FB7 to FB6 chop .4 chop circlerad
arrow from FB8 to FB7 chop .4 chop circlerad
arrow from FB9 to FB8 chop .4 chop circlerad
arrow dotted from IB1 to B1 chop .7 chop .7
arrow dotted from IB2 to B2 chop .7 chop .7
arrow dotted from IB3 to B3 chop .7 chop .7
arrow dotted from IB4 to B4 chop .7 chop .7
arrow dotted from IB5 to B5 chop .7 chop .7
arrow dotted from IB6 to B6 chop .7 chop .7
arrow dotted from IB7 to WB7 chop .7 chop .7
arrow dotted from IB8 to WB8 chop .7 chop .7
arrow dotted from IB9 to WB9 chop .7 chop .7
arrow dotted from IS1 to WS1 chop .5 chop .7
arrow dotted from IS2 to WS2 chop .5 chop .7
arrow dotted from WB7 to B7 chop .7 chop .8
arrow dotted from WB8 to B8 chop .7 chop .8
arrow dotted from WB9 to B9 chop .7 chop .8
arrow dotted from WS1 to S1 chop .7 chop .7
arrow dotted from WS2 to S2 chop .7 chop .7
arrow dotted from IPF to PF chop .7 chop .7
arrow dotted from B3 to FB3 chop .7 chop .7
arrow dotted from B4 to FB4 chop .7 chop .7
arrow dotted from B5 to FB5 chop .7 chop .7
arrow dotted from B6 to FB6 chop .7 chop .7
arrow dotted from B7 to B7f chop .7 chop .7 height .2
arrow dotted from B8 to B8f chop .8 chop .8 height .2
arrow dotted from B9 to B9f chop .7 chop .7 height .2
arrow dotted from B7f to FB7 chop .8 chop .7
arrow dotted from B8f to FB8 chop .8 chop .7
arrow dotted from B9f to FB9 chop .8 chop .7
.PE
\vspace*{-29pt}
\centerline{\raise 1em\box\graph}}
\caption{The infinitary linear time -- branching time spectrum\hlabel{spectrum}}
\end{figure}
\textBrown When the restriction to finitely branching processes is
dropped, there exists a finitary and an infinitary variant of each of
these semantics, depending on whether or not infinite observations are
taken into account (I do not consider the finitary version of {\it PW}
or the infinitary version of $F^1$
though). These versions are notationally distinguished by means of
superscripts ``*'' and ``$\infty$'', respectively; the unsubscripted
abbreviation will refer to the infinitary versions in case of
simulation-like semantics (treated in Sections
\ref{simulation}--\ref{bisimulation}) and to the finitary versions for
the \phrase{decorated trace semantics} (treated in Section
\ref{trace}--\ref{readiness}). The modal characterizations summarized
above apply to the default (= unsubscripted) versions. Modal
characterizations of $T^\infty$, $CT^\infty$, $F^\infty$, $R^\infty$,
resp.\ $FT^\infty$ and $RT^\infty$, are obtained by allowing traces,
resp.\ failure traces or ready traces, of infinite length as modal
formulas; a modal characterization of $\it PF^\infty$ is obtained by
replacing the reference to $T$ by one to $T^\infty$. Modal
characterizations of $S^*$, $CS^*$, etc.\ are obtained by requiring
the index sets $I$ to be finite. For the simulation-like semantics
also an intermediate variant is considered---superscripted with
``$\omega$''---based on the assumption that observers can investigate
arbitrary many copies of a process in parallel, but have only a finite
amount of time to do so. Modally, this corresponds to the restriction
to modal formulas with a finite upperbound on the number of nestings
of the $a \varphi$ construct. For the semantics that incorporate
refusal sets, the finitary versions come in two variants, depending on
whether the refusal sets are required to be finite (superscript
``$-$'') or not (the default assumption). A similar distinction is
made for semantics where menus of actions can be observed: in $R^-$,
$RT^-$ and $RS^-$ the modal formula $X$ is replaced by $\bigwedge_{a
\in Y} \neg a\top \wedge \bigwedge_{a \in Z} a\top$, where the sets of
actions $Y$ and $Z$ are required to be finite. Finally, whereas
failure simulation semantics, modally characterized by
\begin{list}{}{\renewcommand{\makelabel}[1]{#1\hfill}
\leftmargin 30pt \labelwidth\leftmargin\advance\labelwidth-\labelsep
\topsep 2pt \itemsep 1pt \parsep 1pt}
\item [$\fL_{\it FS}$] $\varphi ::= a\varphi'~(\varphi' \in
\fL_{\it FS}) \mid \bigwedge_{i\in I}
\varphi_i~(\varphi_i \in \fL_{\it FS})
\mid \widetilde{X}$ ($X \subseteq Act$)
\hfill the \phrase{failure simulation formulas},
\end{list}
coincides with ready simulation semantics, its finitary version
($\it FS^*$) can be distinguished from $RS^*$. The intermediate
notions $\it FS^\omega$ and $RS^\omega$ coincide again, as do $\it
FS^-$ and $RS^-$. By analogy, new semantics $\it FB^*$ and $RB^*$ can be
defined by adding the modality $\widetilde{X}$ resp.\ $X$ to $\fL_B^-$.
These modalities would be redundant on top of $\fL_B^\omega$ or
$\fL_B$. A similar situation occurs for 2-nested simulation.
All semantics encountered are displayed in \fig{spectrum}, in
which the $\preceq$-relation is represented by solid and dotted arrows.
\begin{theorem}{spectrum}
For all semantics ${\fO}$ and ${\fN}$ defined so far,
the formula ${\fO} \preceq {\fN}$ holds iff there is a path
${\fO} \rightarrow \cdots \rightarrow {\fN}$ (consisting of
solid and dotted arrows alike) in \fig{spectrum}.
Furthermore, semantics connected by dotted arrows coincide for
image finite processes.
\end{theorem}
\begin{proof}
That $T^\infty \preceq S$ has been established in \pr{infinitary
simulation}; that $CT^\infty \preceq CS$, $RT^\infty \preceq RS$ and
$\it PF^\infty \preceq 2S$ follows in the same way. $R^\infty
\preceq \it PW \preceq RS$ has been established in \sect{possible worlds}.
All other implications ${\fO} \preceq {\fN}$ follow from the observation
that the modal language $\fL_\fO$ is included in $\fL_\fN$. The
latter statement has been established in Propositions \hhref{pr-koenig},
\hhref{pr-image finite failures}, \hhref{pr-image finite failure trace},
\hhref{pr-image finite ready trace}, \hhref{pr-image finite ready},
\hhref{pr-image finite readiness}, \hhref{pr-image finite RT},
\hhref{pr-modal finitary simulation}, \hhref{pr-modal finitary ready
simulation}, \hhref{pr-modal finitary 2-nested simulation} and
\hhref{pr-HML} (except that the case of possible-futures
semantics is left to the reader). In order to show that on $\IG$ there
are no inclusions that are not indicated in \fig{spectrum}, is
suffices, in view of \thm{spectrum finite strict}, the already
established parts of \thm{spectrum}, and the fact that $\preceq$ is a
partial order, to show that $CT \not\preceq B^-$, $R \not\preceq
\it FB^*$, $S^\omega \not\preceq RB^*$, $T^\infty \not\preceq B^\omega$,
$\it PF \not\preceq B^\omega$ and $T^\infty \not\preceq \it PF$. This has been
done in Propositions \hhref{pr-finite}, \hhref{pr-FSvsRS}, \hhref{pr-omega}
and \hhref{pr-infinitary}, and at the end of \sect{readiness}.
\end{proof}
Again, the inclusion hierarchy for the preorders is the same as for the
equivalences.
\begin{theorem}{spectrum preorders}
For all semantics ${\fO}$ and ${\fN}$ defined so far,
the formula ${\fO} \preceq^* {\fN}$ holds iff there is a path
${\fO} \rightarrow \cdots \rightarrow {\fN}$ (consisting of
solid and dotted arrows alike) in \fig{spectrum}.
\end{theorem}
\pf That $p \sqsubseteq_{S} q \Rightarrow p \sqsubseteq_{T}^\infty q$
has been established in \pr{infinitary simulation}; that $p
\sqsubseteq_{CS} q \Rightarrow p \sqsubseteq_{CT}^\infty q$, $p
\sqsubseteq_{RS} q \Rightarrow p \sqsubseteq_{RT}^\infty q$ and $p
\sqsubseteq_{\it 2S} q \Rightarrow p \sqsubseteq_{\it PF}^\infty q$
follows in the same way. In \sect{possible worlds} it has been
established that $p \sqsubseteq_{\it RS} q \Rightarrow p
\sqsubseteq_{\it PW} q \Rightarrow p \sqsubseteq_{\it RT}^\infty
q$. All other implications $p \sqsubseteq_{\fO} q \Rightarrow p
\sqsubseteq_{\fN} q$ follow from the observation that the modal
language $\fL_\fO$ is included in $\fL_\fN$. The ``only if'' part is
an immediate consequence of \thm{spectrum}. \textBlack \hfill
\Brown{$\Box$}
\section{Deterministic and saturated processes}\hlabel{determinism}
If the labelled transition system $\IP$ on which the semantic
equivalences of \sect{summary} are defined is large enough,
then they are all different and ${\fO} \preceq_{\IP} {\fN}$
holds only if this is indicated in \fig{spectrum}.\linebreak[3]
However, for certain labelled transition systems much more
identifications can be made. Is has been remarked already that for
image finite processes all semantics that are connected by dotted
arrows coincide. In this section various other classes of processes
are examined on which parts of the linear time -- branching time
spectrum collapse. All results of this section, expect for
Propositions \hhref{pr-determinism preorders} and \hhref{pr-determinacy},
will be used in the completeness proofs in \sect{axiomatizations}.
\begin{trivlist}\item[]
Recall that a process $p$ is \phrase{deterministic} iff $p
\goto{\sigma} q \wedge p \goto{\sigma} r \Rightarrow q=r$.
\end{trivlist}
{\bf Remark}: If $p$ is deterministic and $p \goto{\sigma} p'$ then also
$p'$ is deterministic. Hence any domain of processes on which
action relations are defined, has a subdomain of deterministic processes
with the inherited action relations. (A similar remark can be made for
image finite processes.)
\\
\pf Suppose $p' \goto{\rho} q$ and $p' \goto{\rho} r$. Then $p
\goto{\sigma\rho} q$ and $p \goto{\sigma\rho} r$, so $q=r$.
\hfill $\Box$
\begin{theorem}{determinism}({\sc Park} \hcite{Pa81})
On a domain of deterministic processes all semantics in
the infinitary linear time -- branching time spectrum coincide.
\end{theorem}
\begin{proof}
Because of \thm{spectrum} it suffices to show that $g =_T h
\Rightarrow g =_U h$ for any two deterministic process graphs $g,h \in
\IG$. Note that a process graph $g \in \IG$ is deterministic iff for
every trace $\sigma \in T(g)$ there is exactly one path $\pi \in
\pd(g)$ with $T(\pi)=\sigma$. Now let $g$ and $h$ be deterministic
process graphs with $g =_T h$. Then the relation $i \subseteq \pd(g)
\times \pd(h)$ that relates $\pi \in \pd(g)$ with $\pi'\in\pd(h)$ iff
$T(\pi)=T(\pi')$ clearly is an isomorphism between $U(g)$ and $U(h)$.
\end{proof}
Thus, if two processes $p$ and $q$ are both deterministic, then
$p =_T q \Leftrightarrow p =_F^1 q \Leftrightarrow p \bis{} q
\Leftrightarrow p =_U q$. In case only one of them is
deterministic, this cannot be concluded, for in Counterexamples
\hhref{TvsCT} and \hhref{BvsU} the right-hand processes are
deterministic. However, in such cases one still has $p =_F^1 q
\Leftrightarrow p \bis{} q$. In fact, a stronger statement holds: if
$q$ is deterministic, then $p \sqsubseteq_F^1 q \Leftrightarrow p
\bis{} q$.
\begin{lemma}{initials}
If $p \sqsubseteq_F^1 q$ then $I(p) = I(q)$. \\\pf Let $p
\sqsubseteq_F^1 q$, i.e.\ $T(p) \subseteq T(q)$ and $F^1(p)
\subseteq F^1(q)$. Then $a \in I(p) \Leftrightarrow a \in T(p)
\Rightarrow a \in T(q) \Leftrightarrow a \in I(q)$ and $a \not\in I(p)
\Leftrightarrow \rec{\epsilon,a} \in F^1(p) \Rightarrow
\rec{\epsilon,a} \in F^1(q) \Leftrightarrow a \not\in I(q)$.
\end{lemma}
\begin{proposition}{determinism preorders}
If $q$ is deterministic then $p \sqsubseteq_F^1 q \Leftrightarrow p
\bis{} q$. \\\pf Let $R$ be the binary relation on $\IP$ defined by
$pRq$ iff $q$ is deterministic and $p \sqsubseteq_F^1 q$, then it
suffices to prove that $R$ is a bisimulation. Suppose $pRq$ and
$p \goto{a} p'$. Then $a \in I(p)=I(q)$. So there is a process $q'
\in \IP$ with $q \goto{a} q'$. As $q$ is deterministic, so is $q'$.
Now let $\rec{\sigma,b} \in F^1(p')$. Then $\exists r: p' \goto{\sigma} r
\wedge b \not\in I(r)$. Hence $p \goto{a\sigma} r$ and
$\rec{a\sigma,b} \in F^1(p)\subseteq F^1(q)$. So there must be a
process $s$ with $q \goto{a\sigma} s \wedge b \not\in I(s)$. By
the definition of the generalized action relations $\exists t: q
\goto{a} t \goto{\sigma} s$, and since $q$ is deterministic, $t=q'$.
Thus $\rec{\sigma,b}\in F^1(q')$. From this it follows that $F^1(p')
\subseteq F^1(q')$. Similarly one finds $T(p')\subseteq T(q')$, hence
$p' \sqsubseteq_F^1 q'$.
Now suppose $pRq$ and $q \goto{a} q'$. Then $a \in I(q)=I(p)$. So
there is a process $p' \in \IP$ with $p \goto{a} p'$. Exactly as above
it follows that $q'$ is deterministic and $p' \sqsubseteq_F^1 q'$.
\end{proposition}
Call a process $p$ \phrase{deterministic up to $\equiv$}, for $\equiv$
an equivalence relation or preorder, if there exists a deterministic
process $p'$ with $p \equiv p'$. Now the above proposition implies that
determinism up to $\bis{}$ coincides with determinism up to $=_F^1$, and
even with determinism up to $\sqsubseteq_F^1$. In contrast, {\em any}
process is deterministic up to $=_T$, as the canonical graphs
constructed in the proof of \pr{explicit trace} are deterministic.
Furthermore, determinism up to $=_U$ is just determinism,
for $g \in \IG$ is deterministic iff $U(g)$ is, and determinism is
preserved under isomorphism.
The following notion of {\em determinacy} was proposed in {\sc
Engelfriet} \hcite{En85}.
\begin{definition}{determinacy}
Let $\equiv$ be an equivalence relation on $\IP$. A process $p \in \IP$
is $\equiv$-\phrase{determinate} if $p \goto{\sigma} q \wedge p
\goto{\sigma} r \Rightarrow q \equiv r$.
\end{definition}
Note that $=$-determinacy is determinism. Furthermore, if $\fO \leq
\fN$ then $=_\fO$-determinacy is implied by $=_\fN$-determinacy.
Besides $=_T$-determinacy, $=_F$-determinacy and $\!\bis{}$-determinacy,
Engelfriet also considers $=_I$-determinacy, where $=_I$ is given by
$p =_I q$ iff $I(p) = I(q)$.
Clearly $=_I$ is coarser than any of the equivalences of
\sect{summary}: $p =_T q \Rightarrow p =_I q$. Moreover, $=_I$ is
even coarser than most of the preorders: $p \sqsubseteq_F^1 q
\Rightarrow p =_I q$, as established in \lem{initials}.
Engelfriet established the following three results:
\begin{itemise}
\item[(1)] $\bis{}$-determinacy and $=_I$-determinacy are the same. Hence
$\equiv$-determinacy is the same for all equivalences $\equiv$ of
\sect{summary}, except $U$. Therefore, he just calls this {\em
determinacy}.
\item[(2)] For determinate processes, bisimulation equivalence and trace
equivalence (and hence all equivalences in between) are the same.
\item[(3)] Determinacy is preserved under failures equivalence (and
hence under $\bis{}$). Even stronger, if $q$ is determinate and $p
\sqsubseteq_F q$, then $p$ is determinate and $p \bis{} q$.
(In \hcite{En85}, $\sqsupseteq_F$ is written $\subseteq_{\rm f}$.)
\end{itemise}
Using \pr{determinism preorders} I show that both $=_I$-determinacy
and $\bis{}$-determinacy coincide with determinism up to $\bis{}$,
from which (1), (2) and (3) follow.
\begin{proposition}{determinacy}
Let $p \in \IP$. The following are equivalent:
\begin{list}{$\bullet$}{\leftmargin 25pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 1pt \itemsep 1pt \parsep 0pt}
\item [(a)] $p$ is $\bis{}$-determinate
\item [(b)] $p$ is $=_I$-determinate
\item [(c)] $p$ is deterministic up to $=_R$
\item [(d)] $p$ is deterministic up to $\bis{}$.
\end{list}
\pf ``$\mbox{(a)} \Rightarrow \mbox{(b)}$'' is immediate as $=_I$ is
coarser than $\bis{}$.
``$\mbox{(b)} \Rightarrow \mbox{(c)}$'': Suppose $p$ is
$=_I$-determinate. Let $G(T(p))$ be the canonical graph of the trace
set of $p$ as defined in the proof of \pr{explicit trace}. By
construction, $G(T(p))$ is deterministic and $T(p) = T(G(T(p)))$. It
remains to be shown that $p =_R G(T(p))$.
As $p$ is $=_I$-determinate, one has $\rec{\sigma,X}, \rec{\sigma,Y}
\in R(p) \Rightarrow X=Y$. Hence $\rec{\sigma,X} \in R(p)$ iff $\sigma
\in T(p) \wedge X=\{a \in Act \mid \sigma a \in T(p)\}$, i.e.\ $R(p)$
is completely determined by $T(p)$. As also $G(T(p))$ is
$=_I$-determinate (for it is even deterministic), also $R(G(T(p)))$ is
completely determined by $T(G(T(p)))$: $\rec{\sigma,X} \in R(G(T(p)))$
iff $\sigma \in T(G(T(p))) \wedge X=\{a \in Act \mid \sigma a \in
T(G(T(p)))\}$. It follows that $R(p)=R(G(T(p)))$.
``$\mbox{(c)} \Rightarrow \mbox{(d)}$'' has been established in
\pr{determinism preorders}.
``$\mbox{(d)} \Rightarrow \mbox{(a)}$'': Suppose $p \bis{} q$ and $q$
is deterministic. Let $p \goto{\sigma} p'$ and $p \goto{\sigma} p''$.
Then $\exists q': q \goto{\sigma} q' \wedge p'\bis{}q'$
and $\exists q'': q \goto{\sigma} q'' \wedge p''\bis{}q''$.
As $q$ is deterministic, $q'=q''$. Hence $p' \bis{} p''$.
It follows that $p$ is $\bis{}$-determinate.
\end{proposition}
Now (1) is part of \pr{determinacy}. (2) is a generalization of
\thm{determinism}, that is now implied by it: Suppose $p$ and $q$ are
determinate and $p =_T q$. By \pr{determinacy} there are
deterministic processes $p'$ with $p \bis{} p'$ and $q'$ with $q
\bis{} q'$. Hence $p' =_T q'$, so by \thm{determinism} $p' \bis{}
q'$. Thus $p \bis{} q$, yielding (2).\linebreak[3] (3) holds even for
$F^1$ instead of $F$. For let $q$ be determinate and $p
\sqsubseteq_F^1 q$. Then there is a deterministic process $q'$ with $q
\bis{} q'$. Hence $p \sqsubseteq_F^1 q'$. By \pr{determinism
preorders} $p \bis{} q'$, so $p$ is determinate and $p \bis{} q$.
Note that a process $p$ is deterministic iff for $\pi,\pi'
\in \pd(G(p))$ one has $T(\pi)=T(\pi') \Rightarrow \pi=\pi'$.
For this reason, determinism could have been called {\em trace
determinism}, and the notions of {\em ready trace determinism}
and {\em completed trace determinism} can be defined analogously.
\begin{definition}{RT-determinism}
A process $p$ is \phrase{ready trace deterministic} if for
$\pi,\pi' \in \pd(G(p))$ one has $RT(\pi)=RT(\pi') \Rightarrow \pi=\pi'$.
It is \phrase{completed trace deterministic} if for $\pi,\pi' \in
\pd(G(p))$ one has \plat{T(\pi)=T(\pi') \wedge \left( I(end(\pi))=\emptyset
\Leftrightarrow\rule{0pt}{11pt} I(end(\pi'))=\emptyset \right)
\Rightarrow \pi=\pi'}.
\end{definition}
A process $p \in \IP$ is ready trace deterministic iff there is are no
$p',q,r \in \IP$ and $a \in Act$ such that $p'$ is reachable from $p$,
$p' \goto{a} q$, $p' \goto{a} r$, $I(q)=I(r)$ and $q \neq r$. For
trace determinism the condition $I(q)=I(r)$ is dropped, and for
completed trace determinism it is weakened to $I(q)=\emptyset
\Leftrightarrow I(r)=\emptyset$. Note that if $p$ is ready (or
completed) trace deterministic and $p \goto{\sigma} p'$ then so is $p'$.
Now the following variants of \thm{determinism} can be established.
\begin{proposition}{RT-determinism}
If $g$ and $h \in \IG$ are ready trace deterministic then $g =_{RT} h
\Leftrightarrow g =_U h$.\\
Likewise, if $g$ and $h \in \IG$ are completed trace deterministic
then $g =_{CT} h \Leftrightarrow g =_U h$.
\\\pf
% Note that a process graph $g \in \IG$ is ready trace deterministic iff for
% every ready trace $\sigma \in RT(g)$ there is exactly one path $\pi \in
% \pd(g)$ with $RT(\pi)=\sigma$. Now l
Let $g$ and $h$ be ready trace deterministic process graphs with $g
=_{RT} h$. Then the relation $i \subseteq \pd(g) \times \pd(h)$ that
relates $\pi \in \pd(g)$ with $\pi'\in\pd(h)$ iff $RT(\pi)=RT(\pi')$
clearly is an isomorphism between $U(g)$ and $U(h)$.
The proof of the second statement goes likewise.
\end{proposition}
For completed trace deterministic processes, the equivalences $=_T$
and $=_{CT}$ are different, as can be seen from \ctr{TvsCT}. For
ready trace deterministic processes, the equivalences $=_T$, $=_{CT}$,
$=_F^1$, $=_{F}$, $=_{FT}$, $=_{R}$, $=_{RT}$, $=_{S}$ and $=_{CS}$
are all different, as can be seen from Counterexamples \hhref{TvsCT},
\hhref{CTvsF}, \hhref{RvsFT}, \hhref{FTvsR} and \hhref{F1vsF}.
\thm{determinism} and \pr{RT-determinism} do not generalize to the
corresponding preorders, for in \ctr{TvsCTvsF} one finds two
deterministic processes {\it middle} and {\it right} with ${\it
middle} \sqsubseteq_{CT} {\it right}$ but ${\it middle}
\not\sqsubseteq_{B} {\it right}$, and in \ctr{CTvsF} one finds two
ready trace deterministic processes {\it right} and {\it left} with
${\it right} \sqsubseteq_{RT} {\it left}$ but ${\it right}
\not\sqsubseteq_{B} {\it left}$. However, the following variants of
these results can be obtained.
\begin{proposition}{RT-determinism preorders}
If $q$ is ready trace deterministic then $p \sqsubseteq_{RT} q
\Leftrightarrow p \sqsubseteq_{RS} q$.\\
Likewise, if $q$ is completed trace deterministic then
$p \sqsubseteq_{CT} q \Leftrightarrow p \sqsubseteq_{CS} q$,\\
and if $q$ is (trace) deterministic then
$p \sqsubseteq_{T} q \Leftrightarrow p \sqsubseteq_{S} q$.
\\\pf Let $R$ be the binary relation on $\IP$ given by $pRq$ if $q$ is
ready trace deterministic and $p\sqsubseteq_{RT}q$.
For the first statement it suffices to prove that $R$ is a ready simulation.
Clearly $pRq$ implies $I(p)=I(q)$.
Now suppose $pRq$ and $p \goto{a} p'$. Let $X$ be $I(p')$.
Then $aX \in RT(p) \subseteq RT(q)$. So there is a process $q' \in
\IP$ with $q \goto{a} q'$ and $I(q')=X$.
Now let $\sigma \in RT(p')$. Then $\exists r: p' \gort{\sigma} r$.
Hence $p \gort{aX\sigma} r$ and $aX \sigma \in RT(p) \subseteq RT(q)$.
So there must be a process $s$ with $q \gort{aX\sigma} s$. By the
definition of the ready trace relations $\exists t: q
\goto{a} t \gort{\sigma} s \wedge I(t)=X$, and since $q$ is ready trace
deterministic, $t=q'$. Thus $\sigma \in RT(q')$. From this it
follows that $RT(p') \subseteq RT(q')$, implying $p' R q'$.
This finishes the proof of the first statement. The proofs of the
other two statements go the same way (but involving a trivial case
distinction for the completed trace deterministic one).
\end{proposition}
Together, Propositions \hhref{pr-determinism preorders} and
\ref{pr-RT-determinism preorders} imply that on a domain of
deterministic processes only three of the preorders of
\sect{summary} are different, namely $\sqsubseteq_{T}$,
$\sqsubseteq_{CT}$ and $\sqsubseteq_F^1$, coinciding with
$\sqsubseteq_{S}$, $\sqsubseteq_{CS}$ and $\sqsubseteq_{B}$,
respectively. That these three are indeed different is shown in
\ctr{TvsCTvsF}.
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
C1: circle at (-6,2)
box invis "$0$" at C1+(0,-2)
box invis "$\sqsubseteq_T~$" at (-3,1.8)
box invis "$\not\sqsubseteq_{CT}$" at (-3,1.1)
C2: circle at (0,2)
circle at C2+(0,-1)
arrow from C2 to last circle chop circlerad chop circlerad
box invis "$a$" at C2+(-.2,-.5)
box invis "$a$" at C2+(0,-2)
box invis "$\sqsubseteq_{CT}$" at (3,1.8)
box invis "$\not\sqsubseteq_{F}~$" at (3,1.1)
C3: circle at (6,2)
circle at C3+(-1,-1)
circle at C3+(1,-1)
arrow from C3 to 2nd last circle chop circlerad chop circlerad
arrow from C3 to last circle chop circlerad chop circlerad
box invis "$a$" at C3+(-.65,-.35)
box invis "$b$" at C3+(.65,-.35)
box invis "$a+b$" at C3+(0,-2)
.PE
\centerline{\raise 1em\box\graph}
\caption{The trace, completed trace, and failures preorders are all
different on deterministic processes\hlabel{TvsCTvsF}}
\end{counterexample}
\begin{definition}{cross saturated}
A process $p$ is \phrase{cross saturated} if $p \goto{\sigma} q
\goto{a} r \wedge p \goto{\sigma} s \goto{a} t \Rightarrow q \goto{a} t$.
\end{definition}
\hname{cross saturated}%
Thus a process graph $g \in \IG$ is {cross saturated} iff for any
$\pi,\pi' \in \pd(g)$ and $a \in Act$ such that $a \in I(end(\pi))$
and $T(\pi')=T(\pi)a$ one has $(end(\pi),a,end(\pi')) \in \ed(g)$.
\begin{proposition}{cross saturated}
If $g$ and $h \in \IG$ are cross saturated then $g =_{R} h
\Leftrightarrow g \bis{} h$.
\\\pf
Let $g$ and $h \in \IG$ be cross saturated and suppose that $R(g)=R(h)$.
Define the relation $R \subseteq \nd(g) \times \nd(h)$ by $sRt$ iff
there are $\pi \in \pd(g)$ and $\rho \in \pd(h)$ with
$end(\pi)=s$, $end(\rho)=t$ and $R(\pi)=R(\rho)$. It suffices to show
that $R$ is a bisimulation between $g$ and $h$.\\
As $I(\rt(g))=I(\rt(h))$ one clearly has $\rt(g)R\rt(h)$.\\
Now suppose $sRt$ and $(s,a,s') \in \ed(g)$. Let $\pi$ and $\rho$ be
such that $end(\pi)=s$, $end(\rho)=t$ and $R(\pi)=R(\rho)$, and let
$\pi'$ be the extension of $\pi$ with $(s,a,s')$.
Now $a \in I(end(\pi))=I(end(\rho))$. Choose $\rho' \in \pd(h)$ with
$R(\rho')=R(\pi')$ (using that $R(g)=R(h)$). Then $T(\rho')=T(\pi')=T(\pi)a=T(\rho)a$, so
$(t,a,end(\rho')) \in \ed(h)$. Moreover, $s'R\, end(\rho')$.\\
The remaining requirement of \df{bisimulation graph} follows by symmetry.
\end{proposition}
\begin{proposition}{cross saturated preorders}
If $h \in \IG$ is cross saturated then $g \sqsubseteq_{R} h
\Leftrightarrow g \sqsubseteq_{RS} h$.
\\\pf Exactly as above.
\end{proposition}
\begin{definition}{saturated}
A process $p$ is \phrase{saturated} if $\rec{\sigma,X} \in R(p) \wedge
\rec{\sigma,Y\cup Z} \in R(p) \Rightarrow \rec{\sigma, X \cup Y} \in
R(p)$.
\end{definition}
\begin{proposition}{saturated}
If $p$ is finitely branching and $q$ is saturated then $p
\sqsubseteq_{F} q \Leftrightarrow p \sqsubseteq_{R} q$. Thus if both
$p$ and $q$ are finitely branching and saturated then $p =_{F} q
\Leftrightarrow p =_{R} q$.
\\\pf Suppose $p$ is finitely branching, $q$ is saturated and $p
\sqsubseteq_{F} q$. Let $\rec{\sigma,Y} \in R(p)$. Then $Y$ is finite.
In case $Y=\emptyset$ one has $\rec{\sigma,Act} \in F(p) \subseteq
F(q)$, implying $\rec{\sigma,\emptyset} \in R(q)$, as desired. So
assume $Y \neq \emptyset$. Then, for all $a \in Y$,
$\rec{\sigma a,\emptyset} \in F(p) \subseteq F(q)$ so $\exists Z_a
\subseteq Act$ with $\rec{\sigma, \{a\} \cup Z_a} \in R(q)$. Hence,
using \df{saturated} with $Z=\emptyset$, one obtains $\rec{\sigma,Y
\cup \bigcup_{a \in Y} Z_a} \in R(q)$. As $\rec{\sigma,Act-Y} \in F(p)
\subseteq F(q)$ it must be that $\rec{\sigma,X} \in R(q)$ for some $X
\subseteq Y$. Now \df{saturated} gives $\rec{\sigma,Y} \in R(q)$.
\end{proposition}
\begin{definition}{RT-saturated}
A process $p$ is \phrase{RT-saturated} if $$\sigma X \rho \in RT_N(p)
\wedge \sigma Y \in RT_N(p) \Rightarrow \sigma (X \cup Y) \rho
\in RT_N(p).$$
\end{definition}
\begin{proposition}{RT-saturated}
If $p$ is finitely branching and $q$ is RT-saturated then $p
\sqsubseteq_{FT} q \Leftrightarrow p \sqsubseteq_{RT} q$. Thus if both
$p$ and $q$ are finitely branching and RT-saturated then $p =_{FT} q
\Leftrightarrow p =_{RT} q$.
\\\pf
Suppose $p$ is finitely branching, $q$ is RT-saturated and $p
\sqsubseteq_{FT} q$. With induction on $k \in \IN$ I will show that
whenever $X_0 a_1 X_1 a_2 \cdots a_n X_n \in RT (p)$ then
there are $Y_i \subseteq X_i$ for $i=k+1,\ldots,n$ such that
$X_0 a_1 X_1 a_2 \cdots a_k X_k a_{k+1}
Y_{k+1} a_{k+2} \cdots a_n Y_n \in RT(q).$
The case $k=n$, together with \pr{barbed}, completes the proof of the
proposition.
\\ {\it Induction base} ($k=0$):
Let $X_0 a_1 X_1 a_2 \cdots a_n X_n \in RT (p)$.
Write $\overline{X}$ for $Act-X$.\\
Then $\overline{X_0} a_1 \overline{X_1} a_2 \cdots a_n \overline{X_n} \in
FT(p) \subseteq FT(q)$. Hence there are $Y_i \subseteq X_i$ for
$i=0,...,n$ such that $Y_0 a_1 Y_1 a_2 \cdots a_n Y_n \in RT (q)$. As
$p \sqsubseteq_{FT} q \Rightarrow p \sqsubseteq_T q \Rightarrow
I(p)\subseteq I(q)$, we have $Y_0 = X_0$.
\\ {\it Induction step}:
Take $k>0$ and suppose the statement has been established for $k-1$.\\
Let $X_0 a_1 X_1 a_2 \cdots a_n X_n \in RT (p)$.
Then, by induction, there are $Y_i \subseteq X_i$ for $i=k,...,n$ such
that $X_0 a_1 X_1 a_2 \cdots a_{k-1} X_{k-1} a_k Y_k a_{k+1}
Y_{k+1} a_{k+2} \cdots a_n Y_n \in RT(q)$.
Moreover, for every $b \in X_k$, $X_0 a_1 X_1 a_2 \cdots a_k X_k b \in
RT(p)$, so, again using the induction hypothesis, there must be a $Z_b
\subseteq X_k$ such that $X_0 a_1 X_1 \cdots X_{k-1} a_k Z_b b \in RT(q)$,
and hence $X_0 a_1 X_1 \cdots X_{k-1} a_k (Z_b \cup \{b\}) \in RT(q)$.
As $X_k$ is finite and $Y_k \cup \bigcup_{b \in X_k} (Z_b \cup \{b\})
= X_k$, the RT-saturation of $q$ gives\\ $X_0 a_1 \cdots a_k X_k a_{k+1}
Y_{k+1} a_{k+2} \cdots a_n Y_n \in RT(q)$, which had to be established.
\end{proposition}
\section{Complete axiomatizations}\hlabel{axiomatizations}
\subsection{A language for finite, concrete, sequential processes}
Consider the following basic CCS- and CSP-like language BCCSP\hindex{BCCSP} for
finite, concrete, sequential processes over a given alphabet $Act$:
\begin{list}{$\bullet$}{\leftmargin .75in
\labelwidth\leftmargin\advance\labelwidth-\labelsep
\topsep 4pt \itemsep 2pt \parsep 2pt}
\item [$inaction:$\hfill]
$0$ (called $\it nil$ or $\it stop$) is a constant, representing a
process that refuses to do any action.
\item [$action:$\hfill]
$a$ is a unary operator for any action $a \in Act$. The expression $ap$
represents a process, starting with an $a$-action and proceeding with $p$.
\item [$choice:$\hfill]
$+$ is a binary operator. $p+q$ represents a process, first being
involved in a choice between its summands $p$ and $q$, and then
proceeding as the chosen process.
\end{list}
The set ${\sf T\rm(BCCSP)}$ of (closed) \phrase{process expressions} or
\phrase{terms} over this language is defined as usual:
\begin{itemise}
\item
$0 \in {\sf T\rm(BCCSP)}$,
\item
$ap \in {\sf T\rm(BCCSP)}$ for any $a \in Act$ and $p \in {\sf T\rm(BCCSP)}$,
\item
$p+q \in {\sf T\rm(BCCSP)}$ for any $p,q \in {\sf T\rm(BCCSP)}$.
\end{itemise}
Subterms $a0$ may be abbreviated by $a$. Brackets are used for
disambiguation only, assuming associativity of $+$, and letting $a$
bind stronger than $+$. If $P=\{p_1,...,p_n\}$ is a finite nonempty
multiset of BCCSP expressions, then $\Sigma P$ abbreviates $p_1 +
\cdots + p_n$. This expression is determined only up to associativity
and commutativity of $+$. Let $\Sigma \emptyset := 0$. An expression
$ap'$ is called a \phrase{summand} of $p$ if, up to associativity and
commutativity of $+$, $p$ can be written as $\Sigma P$ with $ap' \in P$.
On ${\sf T\rm(BCCSP)}$ action relations $\goto{a}$ for $a \in Act$ are
defined as the predicates on ${\sf T\rm(BCCSP)}$ generated by the
\phrase{action rules} of \tab{BCCSP}. Here $a$ ranges over $Act$
and $p$ and $q$ over ${\sf T\rm(BCCSP)}$.%
\begin{table}[htb]
\begin{center}\framebox{$
ap \goto{a} p
\qquad
\begin{array}{c}
{p \goto{a} p'} \\\hline
{p+q \goto{a} p'}
\end{array}
\qquad
\begin{array}{c}
{q \goto{a} q'} \\\hline
{p+q \goto{a} q'}
\end{array}
$}
\caption{Action rules for BCCSP\hlabel{BCCSP}}
\end{center}
\end{table}
A trivial structural induction shows that
$p \goto{a} p'$ iff $ap'$ is a summand of $p$.
Now all semantic equivalences of Sections
\ref{trace}--\ref{possible worlds} are well-defined on
${\sf T\rm(BCCSP)}$, and for each of the semantics it is
determined when two process expressions denote the same process.
The following theorem says that, apart from $U$, all these semantics
are \phrase{compositional} w.r.t.\ BCCSP, i.e.\ all semantic
equivalences are \phrase{congruences} for BCCSP.
\begin{theorem}{congruence}
Let $p,q,r,s \in {\sf T\rm(BCCSP)}$ and let $\fO$ be any of the
semantics of \sect{summary} except $U$. Then $$p =_{\fO} q ~\wedge~
r =_\fO s ~~\Rightarrow~~ ap =_{\fO} aq ~\wedge~ p+r =_{\fO} q+s.$$
\end{theorem}
\begin{proof}
Each of the semantics $\fO$ has a modal characterization, given by
$p =_{\fO} q \Leftrightarrow {\fO}(p)={\fO}(q)$, where ${\fO}(p)$ is the set of modal formulas of the appropriate form satisfied
by $p$. Let ${\fO}^+(p) := \mbox{$\{ a\phi \mid a\phi \in {\fO}(p)\}$}$ be the set of such formulas which are of the form
$a\phi$. For each choice of ${\fO}$ one easily verifies that ${\fO}(p)$ is completely determined by ${\fO}^+(p)$, i.e.\ ${\fO}(p)={\fO}(q) \Leftrightarrow {\fO}^+(p) = {\fO}^+(q)$. One
also verifies easily that ${\fO}^+(0)=\emptyset$, ${\fO}^+(ap)=\{a\phi \mid \phi \in {\fO}(p)\}$ and ${\fO}^+(p+q) =
{\fO}^+(p) \cup {\fO}^+(q)$. From this the theorem follows
immediately.
\end{proof}
For each such choice of ${\fO}$ one easily verifies that moreover
${\fO}(p)\subseteq{\fO}(q) \Leftrightarrow {\fO}^+(p)\subseteq{\fO}^+(q)$.
From this it follows that all the preorders of \sect{summary}
are \phrase{precongruences} for BCCSP:
\begin{trivlist}
\item[]{\bf Theorem \ref{thm-congruence}b}\hname{7b}
Let $p,q,r,s \in {\sf T\rm(BCCSP)}$ and let $\fO$ be any of the
semantics of \sect{summary} but $U$. Then\\[10pt]
\mbox{}\hfill
$p \sqsubseteq_{\fO} q ~\wedge~ r =_\fO s ~~\Rightarrow~~
ap \sqsubseteq_{\fO} aq ~\wedge~ p+r \sqsubseteq_{\fO} q+s$.
\hfill $\Box$
\end{trivlist}
Tree semantics, when defined merely in terms of the action relations
on ${\sf T\rm(BCCSP)}$, fails to be compositional w.r.t.\ BCCSP\@. The
expression $a0+a0$ has only a single outgoing $a$-transition, namely
to the expression $0$. Thus, by \href{tree}{Definition \ref{tree}},
$a0+a0 =_U a0$. Likewise $b(a0+a0) =_U ba0$. However, $b(a0+a0)+b0
\neq_U ba0+ba0$, as the first process has two outgoing $b$-transitions
and the second process only one. If follows that tree equivalence as
defined above is not compositional w.r.t.\ $+$.
${\sf T\rm(BCCSP)}$ can be turned into a labelled transition system
with multiplicities by assuming a different transition $p \goto{a} q$
for every different proof of $p \goto{a} q$ from the action rules
of \tab{BCCSP}. On such a transition system tree equivalence is
compositional w.r.t.\ BCCSP.
A straightforward structural induction shows that any process $p \in
{\sf T\rm(BCCSP)}$ is finite in the sense of \df{action relations}.
Hence the process graph $G(p)$ is finite as well. The next
proposition establishes that moreover, up to bisimulation equivalence,
any finite process graph can be represented by a BCCSP expression.
In fact, all finite process graphs displayed in this paper have been
annotated by their representing BCCSP expressions.
\begin{definition}{finite BCCSP}
Let $\In{\cdot}: \dl{H} \rightarrow {\sf T\rm(BCCSP)}$ be a mapping
satisfying $\In{g} = \Sigma \{a\In{h} \mid g \goto{a} h\}$.
\end{definition}
A straightforward induction on the length of the longest path of
finite process graphs teaches that such a mapping exists and is
completely determined up to associativity and commutativity of $+$.
\begin{proposition}{finite BCCSP}
Let $g \in \dl{H}$. Then there is a $p \in {\sf T\rm(BCCSP)}$ with
$G(p) \bis{} g$. In fact, $G(\In{g}) \bis{} g$.\\
\pf It suffices to show that the relation $\{h,G(\In{h}) \mid h \in
\dl{H}\}$ is a bisimulation.
Suppose $h \goto{a} h'$. Then $a\In{h'}$ is a summand of $\In{h}$, so
$\In{h} \goto{a} \In{h'}$, and by \pr{embedding}
$G(\In{h}) \goto{a} G(\In{h'})$.
Vice versa, let $G(\In{h}) \goto{a} h''$. Then, by \pr{embedding},
$h''=G(p')$ for some $p' \in {\sf T\rm(BCCSP)}$ with $\In{h} \goto{a} p'$.
Thus $ap'$ must be a summand of $\In{h}$. By \df{finite BCCSP}
$p' = \In{h'}$ for some $h' \in \dl{H}$ with $h \goto{a} h'$.
As $h'$ is related to $h''=G(\In{h'})$, also this requirement is satisfied.
\end{proposition}
\begin{corollary}{finite BCCSP}
Let $p \in {\sf T\rm(BCCSP)}$. Then $p \bis{} \In{G(p)}$.\\
\pf By the above $G(\In{G(p)}) \bis{} G(p)$. Now apply \cor{embedding}.
\end{corollary}
\begin{corollary}{finite BCCSP 2}
Let $g,h \in \dl{H}$ and let $\fO$ be any of the semantics of
\sect{summary}. Then $$g \sqsubseteq_{\fO} h \Leftrightarrow \In{g}
\sqsubseteq_{\fO} \In{h} ~~~~~\mbox{and}~~~~~ g =_{\fO} h
\Leftrightarrow \In{g} =_{\fO} \In{h}.$$ \pf Let $g \sqsubseteq_{\fO}
h$. By the above $G(\In{g}) \bis{} g \sqsubseteq_{\fO} h \bis{}
G(\In{h})$. Now apply \cor{embedding}.\\ For ``$\Leftarrow$'' let
$\In{g} \sqsubseteq_{\fO} \In{h}$. By \cor{embedding} and \pr{finite
BCCSP} $g \bis{} G(\In{g}) \sqsubseteq_{\fO} G(\In{h}) \bis{} h$.
\end{corollary}
\subsection{Axiomatizing the equivalences}\hlabel{axioms BCCSP}
In \tab{axioms}, complete axiomatizations can be found for
twelve of the fifteen semantic equivalences of this paper that differ
on BCCSP\@. Axioms for singleton-failures, 2-nested simulation and
possible-futures semantics are more cumbersome, and the corresponding
testing notions are less plausible. Therefore they have been omitted.
The axiomatization of tree semantics ($U$) requires action relations
with multiplicities. Although rather trivial, I will not formally
establish its soundness and completeness here.
In order to formulate the axioms, variables have to be added to the
language as usual. In the axioms they are supposed to be universally
quantified. Most of the axioms are axiom schemes, in the
sense that there is one axiom for each substitution of actions from
$Act$ for the parameters $a,b,c$. Some of the axioms are conditional
equations, using an auxiliary operator $I$. Thus provability is
defined according to the standards of either first-order logic with
equality or conditional equational logic. $I$ is a unary operator that
calculates the set of initial actions of a process expression, coded as
a process expression again.
\begin{theorem}{closed completeness}
For each of the semantics ${\fO} \in \{T, ~ S, ~ CT, ~ CS, ~ F, ~
R, ~ FT, ~ RT, ~ PW, ~ RS, ~ B\}$ two process expressions $p,q \in {\sf
T\rm(BCCSP)}$ are ${\fO}$-equivalent iff they can be proved equal
from the axioms marked with ``+'' in the column for ${\fO}$ in
\tab{axioms}. The axioms marked with ``v'' or ``$\omega$'' are
valid in ${\fO}$-semantics but not needed for the proof.
\begin{table}[htb]
\centerline{\begin{tabular}{|@{~}l | c | c | c | c | c | c | c | c | c | c
| c | c |}
\cline{2-13}
\multicolumn{1}{c|}{}
&\makebox[0pt][c]{\it U}&\makebox[0pt][c]{\it B}&\makebox[0pt][c]{\it RS}
&\makebox[0pt][c]{\it PW}&\makebox[0pt][c]{\it RT}&\makebox[0pt][c]{\it FT}
&\makebox[0pt][c]{\it R}&\makebox[0pt][c]{\it F}&\makebox[0pt][c]{\it CS}
&\makebox[0pt][c]{\it CT}&\makebox[0pt][c]{\it S}&\makebox[0pt][c]{\it T}\\
\hline
$(x+y)+z~=~x+(y+z)$&+&+&+&+&+&+&+&+&+&+&+&+\\
$x+y~=~y+x$&+&+&+&+&+&+&+&+&+&+&+&+\\
$x+0~=~x$&+&+&+&+&+&+&+&+&+&+&+&+\\
$x+x~=~x$&&+&+&+&+&+&+&+&+&+&+&+\\
&&&&&&&&&&&&\\
$I(x)=I(y)~~ \Rightarrow ~~a(x+y)~=~a(x+y)+ay$&&&+&v&v&v&v&v&v&v&v&v\\
$a(bx+by+z)~=~ a(bx+z) + a(by+z)$&&&&+&v&v&v&v&&v&&v\\
$I(x)=I(y)~~ \Rightarrow ~~ax+ay~=~a(x+y)$&&&&&+&+&v&v&&v&&v\\
$ax+ay~=~ax+ay+a(x+y)$&&&&&&+&&v&&v&&v\\
\multicolumn{2}{|@{~}l|}{$a(bx\!+\!u)+a(by\!+\!v) = a(bx\!+\!by\!+\!u)+a(by\!+\!v)$}&&&&&&+&+&&v&&v\\
$ax+a(y+z)~=~ax+a(x+y)+a(y+z)$&&&&&&&&+&&$\omega$&&v\\
$a(x+by+z)~=~a(x+by+z)+a(by+z)$&&&&&&&&&+&v&v&v\\
$a(bx+u)+a(cy+v)~=~a(bx+cy+u+v)$&&&&&&&&&&+&&v\\
$a(x+y)~=~a(x+y)+ay$&&&&&&&&&&&+&v\\
$ax+ay~=~a(x+y)$&&&&&&&&&&&&+\\
&&&&&&&&&&&&\\
$I(0)~=~0$&+&+&+&+&+&+&+&+&+&+&+&+\\
$I(ax)~=~a0$&+&+&+&+&+&+&+&+&+&+&+&+\\
$I(x+y)~=~I(x)+I(y)$&+&+&+&+&+&+&+&+&+&+&+&+\\
\hline
\end{tabular}}
\caption{Complete axiomatizations for the equivalences\hlabel{axioms}}
\end{table}
\end{theorem}
\begin{proof}
``If'' (\phrase{soundness}): In the light of \thm{congruence} it
suffices to show that the closed instances of the indicated axioms
are valid in the corresponding semantics. This is straightforward.
``Only if'' (\phrase{completeness}): Let $T_{\fO}$ be the set of
axioms marked with ``+'' in the column for ${\fO}$. Write $T_{\fO}
\vdash p=q$ if the equation $p=q$ is provable from $T_{\fO}$. I have
to show that
\begin{equation}\hlabel{completeness}
p =_{\fO} q ~\Rightarrow~ T_{\fO} \vdash p=q
\end{equation}
for any $p,q \in {\sf T\rm(BCCSP)}$. For the cases ${\fO} \in
\{B,~S,~RS,~CS\}$ I will show that
\begin{equation}\hlabel{inclusion}
p \sqsubseteq_{\fO} q ~\Rightarrow~ T_{\fO} \vdash q=q+p
\end{equation}
for any $p,q \in {\sf T\rm(BCCSP)}$, from which (\ref{completeness})
follows immediately. This will be done with structural induction on
$p$ and $q$. So assume $p \sqsubseteq_{\fO} q$ and (\ref{inclusion})
has been proven for all pairs of smaller expressions $p',q' \in {\sf
T\rm(BCCSP)}$.
Provided $T_{\fO}$ contains at least the first four axioms of
\tab{axioms}, one has \linebreak $T_{\fO} \vdash q=q+p$ iff
$T_{\fO} \vdash q=q+ap'$ for every summand $ap'$ of $p$.
Take ${\fO}=B$, so $p \sqsubseteq_B q$. Let $ap'$ be a summand of
$p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$ and $p' =_B
q'$. By induction $T_B \vdash p'=p'+q'=q'$, using \pr{bisimulation
preorder}. Furthermore, $aq'$ must be a summand of $q$, so $T_B
\vdash q=q+aq' = q+ap'$ and therefore $T_B \vdash q =q+p$.
Take ${\fO}=S$, so $p \sqsubseteq_S q$. Let $ap'$ be a summand of
$p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$ and $p'
\sqsubseteq_S q'$. By induction $T_S \vdash q'=q'+p'$, so $T_S \vdash
aq' = a(q'+p') = a(q'+p')+ap' = aq'+ap'$. Furthermore, $aq'$ must be a
summand of $q$, so $T_S \vdash q=q+aq' = q+ap'$ and thus $T_S \vdash q
=q+p$.
Take ${\fO}=\it RS$, so $p \sqsubseteq_{\it RS} q$. Let $ap'$ be a
summand of $p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$
and $p' \sqsubseteq_{\it RS} q'$. Now $I(p')=I(q')$ and hence $T_{RS}
\vdash I(p')=I(q')$. By induction $T_{RS} \vdash q'=q'+p'$, so $T_{\it
RS} \vdash aq' = a(q'+p') = a(q'+p')+ap' = aq'+ap'$. Furthermore,
$aq'$ must be a summand of $q$, so $T_{\it RS} \vdash q=q+aq' = q+ap'$
and thus $T_{RS} \vdash q =q+p$.
Take ${\fO}=\it CS$, so $p \sqsubseteq_{\it CS} q$. Let $ap'$ be a
summand of $p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$
and $p' \sqsubseteq_{\it CS} q'$. In case $I(p')=\emptyset$ it
must be that $I(q')=\emptyset$ as well, and hence $T_{CS} \vdash
p'=q'=0$. Otherwise, $T_{CS} \vdash p'=bp''+r$ and
by induction $T_{CS} \vdash q'=q'+p'$, so $T_{\it CS} \vdash aq' =
a(q'+p') = a(q'+bp''+r) = a(q'+bp''+r) + a(bp''+r) = a(q'+p')+ap' = aq'+ap'$.
Furthermore, $aq'$ must be a summand of $q$, so in both cases $T_{\it
CS} \vdash q=q+aq' = q+ap'$ and thus $T_{CS} \vdash q =q+p$.
Take $\fO=PW$. Suppose $p =_{PW} q$. The axiom $a(bx+by+z)=a(bx+z)+a(by+z)$
allows to rewrite $p$ and $q$ to BCCSP expressions
$p'=\Sigma_{i\in I} a_i p_i$ and $q'=\Sigma_{j\in J} a_j q_j$ with
$p_i$ and $q_j$ deterministic. For expressions of this form it is easy
to establish that $p'=_{PW} q' \Leftrightarrow p' \bis{} q'$.
Using the soundness of the axiom employed, and the completeness of
$T_B \subseteq T_{PW}$ for $\bis{}$, it follows that $T_{PW} \vdash p
= p' = q' = q$.
For $F$ and $R$ (as well as $B$) a proof is given in
{\sc Bergstra, Klop \& Olderog} \hcite{BKO88}
by means of \phrase{graph transformations}. A similar proof for {\it RT}
can be found in {\sc Baeten, Bergstra \& Klop} \hcite{BBK87b}.
This method, applied to semantics ${\fO}$, requires the definition
of a class $\dl{H}^*$ of finite process graphs that contains at least
all finite process trees, and a binary relation $\stackrel{\fO}{\leadsto}\, \subseteq \dl{H}^* \times \dl{H}^*$ --- a system of
\phrase{graph transformations}---such that the following can be established:
\vspace{-1ex}
\begin{enumerate}
\item \plat{\stackrel{\fO}{\leadsto}}, used as a rewriting system, is
\phrase{terminating} on $\dl{H}^*$, i.e.\ any reduction sequence $g_0
\stackrel{\fO}{\leadsto} g_1 \stackrel{\fO}{\leadsto} \cdots$ leads
(in finitely many steps) to a \phrase{normal form}, a graph that
cannot be further transformed,
\vspace{-1ex}
\item if \plat{g \stackrel{\fO}{\leadsto} h} then (a) $g =_{\fO}
h$ and (b) $T_\fO \vdash \In{g} = \In{h}$
\vspace{-1ex}
\item and two normal forms are bisimilar iff they are ${\fO}$-equivalent.
\vspace{-1ex}
\end{enumerate}
Now the completeness proof goes as follows: Suppose $p =_{\fO} q$. As
$\pd (G(p))$ and $\pd(G(q))$ are finite, $U(G(p))$ and $U(G(q))$
belong to $\dl{H}^*$, and by requirement 1 they can be rewritten to
normal forms $g$ and $h$. Using \cor{embedding}, \pr{unfolding} and
requirement 2(a) above
$$g =_{\fO} U(G(p)) \bis{} G(p) =_{\fO} G(q) \bis{} U(G(q)) =_{\fO} h.$$
Thus, with requirement 3, $g \bis{} h$;
and \href{cor-finite BCCSP}{Corollaries~\ref{cor-finite BCCSP} and
\ref{cor-finite BCCSP 2}} yield
$$p \bis{} \In{G(p)} \bis{} \In{U(G(p))}, ~~~~~~~ \In{g}
\bis{} \In{h}, ~~~~~~~ \In{U(G(q))} \bis{} \In{G(q)} \bis{} q.$$
Requirement 2(b) and the completeness result for bisimulation
semantics proved above finally give
$$T_{\fO} \vdash p = \In{U(G(p))} = \In{g} =
\In{h} = \In{U(G(q))} = q.$$
I will now apply this method to $T$, $RT$, $CT$, $R$, $F$ and $FT$.
In the cases of $T$, $RT$ and $CT$, $\dl{H}^*$ is taken to be
$\dl{H}^{\it tree}$, the class of finite process trees.
Take ${\fO} = T$. Let $\stackrel{T}{\leadsto}$ be the graph
transformation that converts $g$ into $h$, notation \plat{g
\stackrel{\it T}{\leadsto} h}, iff $g$ is a finite tree with edges
$(s,a,t)$ and $(s,a,u)$ with $t \neq u$, and $h$ is obtained by
\phrase{identifying} $t$ and $u$. Formally speaking, the nodes of $h$
are those of $g$, except that $t$ and $u$ are omitted and a fresh node
$v$ has been added instead. Often $v$ is taken to be the
(equivalence) class $\{t,u\}$. Define the function $':\nd(g)
\rightarrow \nd(h)$ by $t'=v$, $u'=v$ and $w'=w$ for $w \neq t,u$. Now
$\ed(h) = \{(p',a,q') \mid (p,a,q) \in \ed(g)\}$ and $\rt(h) =
\rt(g)'$. This graph transformation is illustrated in Figure
\ref{join}.
\begin{figure}[htb]
.PS
scale = 2.54
circlerad = 0.05
arrowhead = 7
LC1: circle at (-3.75,2)
LC2: circle at (-4.5,1)
LC3: circle at (-5,0)
"$\cdots$" at (-4.5,0)
LC0: circle at (-4,0)
LC4: circle at (-3,1)
LC5: circle at (-3.5,0)
"$\cdots$" at (-3,0)
LC6: circle at (-2.5,0)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC2 to LC3 chop circlerad chop circlerad
arrow from LC2 to LC0 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC4 to LC6 chop circlerad chop circlerad
"$s$" at LC1+(-.3,0)
"$t$" at LC2+(-.3,0)
"$u$" at LC4+(.3,0)
"$a$" at (-4.25,1.65)
"$a$" at (-3.25,1.65)
"$b_1$" at (-4.93,.65)
"$b_n$" at (-4.05,.65)
"$c_1$" at (-3.45,0.65)
"$c_m$" at (-2.5,0.65)
"$\stackrel{T}{\leadsto}$" at (-1.5,1)
RC1: circle at (0.2,2)
RC2: circle at last circle +(0,-1)
RC31: circle at RC2 +(-1.25,-1)
"$\cdots$" at RC2 +(-.75,-1)
RC3n: circle at RC2 +(-.25,-1)
RC41: circle at RC2 +(.25,-1)
"$\cdots$" at RC2 +(.75,-1)
RC4m: circle at RC2 +(1.25,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC2 to RC31 chop circlerad chop circlerad
arrow from RC2 to RC3n chop circlerad chop circlerad
arrow from RC2 to RC41 chop circlerad chop circlerad
arrow from RC2 to RC4m chop circlerad chop circlerad
"$s$" at RC1+(-.3,0)
"$v$" at RC2+(-.3,0)
"$a$" at RC2+(-.2,.5)
"$b_1$" at RC31+(.5,.7)
"$b_n$" at RC3n+(-.1,.52)
"$c_1$" at RC41+(.1,.5)
"$c_m$" at RC4m+(-.5,.7)
C1: circle at (4,1)
D1: circle at C1+(2,0)
C0: circle at C1+(.35,1)
"$\cdots$" at C1 +(1,1)
D0: circle at D1+(-.35,1)
C2: circle at C1+(0,-1)
D2: circle at D1+(0,-1)
arrow from C0 to C1 chop circlerad chop circlerad
arrow from C0 to D1 chop circlerad chop circlerad
arrow from D0 to C1 chop circlerad chop circlerad
arrow from D0 to D1 chop circlerad chop circlerad
arrow from C1 to C2 chop circlerad chop circlerad
arrow from D1 to D2 chop circlerad chop circlerad
"$t$" at C1+(-.3,0)
"$u$" at C2+(-.3,0)
"$v$" at D1+(.3,0)
"$w$" at D2+(.3,0)
"$a_1$" at C1+(-0.04,.65)
"$a_n$" at D1+(0.04,.65)
"$a_1$" at D1+(-.65,.2)
"$a_n$" at C1+(.7,.2)
"$b$" at C2+(-.15,.5)
"$b$" at D2+(.15,.5)
"$\stackrel{R}{\leadsto}$" at C1+(3,0)
C1: circle at (8,1)
D1: circle at C1+(2,0)
C0: circle at C1+(.35,1)
"$\cdots$" at C1 +(1,1)
D0: circle at D1+(-.35,1)
C2: circle at C1+(0,-1)
D2: circle at D1+(0,-1)
arrow from C0 to C1 chop circlerad chop circlerad
arrow from C0 to D1 chop circlerad chop circlerad
arrow from D0 to C1 chop circlerad chop circlerad
arrow from D0 to D1 chop circlerad chop circlerad
arrow from C1 to C2 chop circlerad chop circlerad
arrow from D1 to D2 chop circlerad chop circlerad
arrow from C1 to D2 chop circlerad chop circlerad
arrow from D1 to C2 dashed chop circlerad chop circlerad
"$t$" at C1+(-.3,0)
"$u$" at C2+(-.3,0)
"$v$" at D1+(.3,0)
"$w$" at D2+(.3,0)
"$a_1$" at C1+(-0.04,.65)
"$a_n$" at D1+(0.04,.65)
"$a_1$" at D1+(-.65,.2)
"$a_n$" at C1+(.7,.2)
"$b$" at C2+(-.15,.5)
"$b$" at D2+(.15,.5)
"$b$" at D2+(-.4,.54)
"$b$" at C2+(.4,.54)
.PE
\centerline{\raise 1em\box\graph}
\caption{Graph transformations\hlabel{join}}
\end{figure}
\\If $g$ is a finite tree and $g \stackrel{\it T}{\leadsto} h$ then so
is $h$. Moreover, $h$ has fewer nodes than $g$. Hence
\plat{\stackrel{\it T}{\leadsto}} is terminating on $\dl{H}^{\it tree}$.
The normal forms are exactly the finite deterministic trees. Now
requirement 3 has been established by \thm{determinism}. Requirement
2(a) is trivial, and for 2(b) observe that any application of
\plat{\stackrel{\it T}{\leadsto}} corresponds to an application of the
axiom $ax+ay~=~a(x+y)$.
Take ${\fO} = \it RT$. Let $\stackrel{\it RT}{\leadsto}$ be the
same graph transformation as \plat{\stackrel{\it T}{\leadsto}}, except
that it only applies if $I(t)=I(u)$. This time the normal forms are
the ready trace deterministic trees, and requirement 3 has been
established by \pr{RT-determinism}. Again requirement 2(a) is easy to
check, and for 2(b) it suffice to observe that any application of
\plat{\stackrel{\it RT}{\leadsto}} corresponds to an application of
the axiom $I(x)~=~I(y)~~ \Rightarrow ~~ax+ay~=~a(x+y)$.
Take ${\fO} = \it CT$. Let $\stackrel{\it CT}{\leadsto}$ be the
same graph transformation as \plat{\stackrel{\it T}{\leadsto}}, except
that it only applies if $I(t)=\emptyset \Leftrightarrow I(u)=\emptyset$.
This time the normal forms are the completed trace deterministic trees, and
again requirement 3 has been established by \pr{RT-determinism}. Once more
requirement 2(a) is easy to check, and for 2(b) observe
that any application of \plat{\stackrel{\it CT}{\leadsto}} corresponds
to an application of the law $(I(x)=0~\Leftrightarrow~I(y)=0)~~
\Rightarrow ~~ax+ay~=~a(x+y)$. This law falls outside conditional
equational logic, but it can be reformulated equationally by
considering the two cases $I(x)=0=I(y)$ and $I(x) \neq 0 \neq I(y)$.
In the first case it must be that $T_B\vdash x=0=y$ and hence the law follows
from the third and fourth axiom of \tab{axioms}. In the second,
observe that $I(p) \neq 0$ iff $p$ has the form $bq+r$ with $b \in Act$.
Hence the law can be reformulated as $a(bx+u)+a(cy+v)~=~a(bx+cy+u+v)$.
A process graph $g\in \IG$ is called \phrase{history unambiguous}
\hcite{BKO88} if any two paths from the root to the same node give rise
to the same trace, i.e.\ if for $\pi,\pi' \in \pd(g)$ one has
$end(\pi)=end(\pi') \Rightarrow T(\pi) = T(\pi')$. The
\phrase{history} or \phrase{trace} $T(s)$ of a node $s$ in such a
graph $g$ is defined as $T(\pi)$ for $\pi$ an arbitrary path from the
root of $g$ to $s$. Observe that trees are history unambiguous. In
the next two completeness proofs (the cases $R$ and $F$)
$\dl{H}^*$ is taken to be the class $\dl{H}^{hu}$ of finite, history
unambiguous, connected process graphs. For $g \in \dl{H}^{hu}$ and
$t,v \in \nd(g)$ let $t \sim v$ abbreviate $$\forall s \in \nd(g),~a
\in Act: (s,a,t) \in \ed(g) \Leftrightarrow (s,a,v) \in \ed(g).$$
Take ${\fO} = \it R$. Let $\stackrel{\it R}{\leadsto}$ be the graph
transformation with \plat{g \stackrel{\it R}{\leadsto} h} iff $g$ has
edges $(t,b,u)$ and $(v,b,w)$ with $t \sim v$, and $h$ is obtained by
adding a new edge $(t,b,w)$. This graph transformation is illustrated in
\fig{join}. Note that by applying \plat{\stackrel{\it
R}{\leadsto}} twice, one can also add the edge $(v,b,u)$ (indicated
with a dashed arrow in \fig{join}) if it isn't there already.
If $g$ is a finite, history unambiguous process graph and \plat{g
\stackrel{R}{\leadsto} h} then so is $h$.
Moreover, $h$ has more edges than $g$. As there is an upperbound to the
number of edges of graphs that can be obtained from a given graph
$g\in \dl{H}^{hu}$ by applying \plat{\stackrel{R}{\leadsto}} (namely
$n \times l \times n$, where $n$ is the number of nodes in $g$, and
$l$ the number of different edge-labels occurring in $g$),
\plat{\stackrel{R}{\leadsto}} is terminating on $\dl{H}^{hu}$
(requirement 1).
It is easy to see that \plat{\stackrel{\it R}{\leadsto}} does not
add new ready pairs. This gives requirement 2(a). For 2(b) observe
that an application of \plat{\stackrel{\it R}{\leadsto}} corresponds
to a number of applications of $a(bx+u)+a(by+v)~=~a(bx+by+u)+a(by+v)$.
Finally, requirement 3 follows from \pr{cross
saturated} and the following \\ {\sc Claim}: The normal forms w.r.t.\
$\stackrel{R}{\leadsto}$ are cross saturated.
\\ {\sc Proof of the claim}: Let $g \in \dl{H}^{hu}$ be a normal form
w.r.t.\ \plat{\stackrel{R}{\leadsto}}. With induction to the length of
$T(u)$ I will show that, for $u,w \in \nd(g)$,
\begin{equation}
\mbox{if~} T(u)=T(w) \mbox{~then~} u \sim w.
\hlabel{rnf}
\end{equation}
This implies that $g$ is cross saturated, for if $\pi,\pi'$ and $a$ are
as in \href{cross saturated}{the remark} below Definition
\ref{df-cross saturated}, there must be an edge
$(end(\pi),a,u)$ in $g$. Now $T(u)=T(\pi)a=T(end(\pi'))$, so also
$(end(\pi),a,end(\pi')) \in \ed(g)$.
\\
{\it Induction base}: If ${\it length}(T(u)) = 0$, one has
$u=w=\rt(g)$ and the statement is trivial.
\\
{\it Induction step}: Let $T(u)=T(w) \neq \epsilon$, and let $(t,b,u)
\in \ed(g)$. By symmetry, it suffices to show that $(t,b,w) \in
\ed(g)$. As $g$ is connected and history unambiguous, there must be
an edge $(v,b,w)$ with $T(t) = T(v)$. By induction $t \!\sim\! v$.
As $g$ is in normal form it must have an edge $(t,b,w)$.
\begin{figure}[htb]
.PS
scale = 1.7
circlerad = 0.05
arrowhead = 7
C1: circle at (2,1)
D1: circle at C1+(2,0)
C0: circle at C1+(1,1)
C2a: circle at C1+(-.5,-1)
"$\cdots$" at C1+(0,-1)
C2b: circle at C1+(.5,-1)
D2a: circle at D1+(-1,-1)
"$\cdots$" at D1+(-.6,-1)
D2b: circle at D1+(-.2,-1)
D2c: circle at D1+(.2,-1)
"$\cdots$" at D1+(.6,-1)
D2d: circle at D1+(1,-1)
arrow from C0 to C1 chop circlerad chop circlerad
arrow from C0 to D1 chop circlerad chop circlerad
arrow from C1 to C2a chop circlerad chop circlerad "$b_1~~~~$"
arrow from C1 to C2b chop circlerad chop circlerad "~~~~$b_n$"
arrow from D1 to D2a chop circlerad chop circlerad "$c_1$~~~~~"
arrow from D1 to D2b chop circlerad chop circlerad "$c_k$~~~~"
arrow from D1 to D2c chop circlerad chop circlerad "~~~~$d_1$"
arrow from D1 to D2d chop circlerad chop circlerad "~~~~~$d_m$"
"$s$" at C0+(-.2,0)
"$t$" at C1+(-.2,0)
"$a$" at C1+(0.5,.65)
"$u$" at D1+(.2,0)
"$a$" at D1+(-0.5,.65)
"$\stackrel{\it fork}{\leadsto}$" at C1+(4,0)
C1: circle at (8,1)
E1: circle at C1+(1,0)
D1: circle at C1+(2,0)
C0: circle at C1+(1,1)
C2a: circle at C1+(-.5,-1)
"$\cdots$" at C1+(0,-1)
C2b: circle at C1+(.5,-1)
D2a: circle at D1+(-1,-1)
"$\cdots$" at D1+(-.6,-1)
D2b: circle at D1+(-.2,-1)
D2c: circle at D1+(.2,-1)
"$\cdots$" at D1+(.6,-1)
D2d: circle at D1+(1,-1)
arrow from C0 to C1 chop circlerad chop circlerad
arrow from C0 to D1 chop circlerad chop circlerad
arrow "$a~~$" from C0 to E1 chop circlerad chop circlerad
arrow from E1 to C2a chop circlerad chop circlerad
"$b_1$" at E1+(-.55,-.2)
arrow from E1 to C2b chop circlerad chop circlerad "$b_n$~~~~"
arrow from E1 to D2a chop circlerad chop circlerad "$c_1$~~"
arrow from E1 to D2b chop circlerad chop circlerad "$c_k$~~~"
arrow from C1 to C2a chop circlerad chop circlerad "$b_1~~~~$"
arrow from C1 to C2b chop circlerad chop circlerad
"$b_n$" at C1+(.05,-.3)
arrow from D1 to D2a chop circlerad chop circlerad "$c_1$" ""
arrow from D1 to D2b chop circlerad chop circlerad "$c_k\;$~~"
arrow from D1 to D2c chop circlerad chop circlerad "~~~~$d_1$"
arrow from D1 to D2d chop circlerad chop circlerad "~~~~~$d_m$"
"$s$" at C0+(-.2,0)
"$t$" at C1+(-.2,0)
"$a$" at C1+(0.5,.65)
"$u$" at D1+(.2,0)
"$v$" at E1+(.2,0)
"$a$" at D1+(-0.5,.65)
.PE
\centerline{\raise 1em\box\graph}
\caption{Fork\hlabel{fork}}
\end{figure}
Take ${\fO} = \it F$. Let $\stackrel{\it fork}{\leadsto}$ be the graph
transformation with \plat{g \stackrel{\it fork}{\leadsto} h} iff
$g$ has edges $(s,a,t)$ and $(s,a,u)$, $\exists Y \subseteq I(u)$
such that $h$ is given by
\begin{itemise}
\item
$\nd(h) = \nd(g) \dcup \{v\}$
\item
$\rt(h)=\rt(g)$
\item
$\ed(h) = \begin{array}[t]{@{}l@{}}
\ed(g) \cup \{(s,a,v)\}\\
\cup \{(v,b,w)\mid (t,b,w) \in \ed(g)\}
\cup \{(v,b,w)\mid (u,b,w) \in \ed(g) \wedge b \in Y\}
\end{array}$
\end{itemise}
and $|R(h)| > |R(g)|$. This graph transformation is illustrated in
\fig{fork}. Note that for any path $\pi \in \pd(h)$ not ending
in $v$, a path $\pi' \in \ed(g)$ can be found with $T(\pi')=T(\pi)$
and $end(\pi')=end(\pi)$, namely by circumventing the possible portion
through $v$ along $t$ or $u$. Thus, such paths do not give rise to new
ready or failure pairs. For any path $\pi \in \pd(h)$ ending in $v$
there is a path $\pi' \in \ed(g)$ with $T(\pi')=T(\pi)$ and
$end(\pi')=t$. As $I(t) \subseteq I(v)$, also such paths do not give
rise to new failure pairs. Hence one has $R(h) = R(g) \cup
\{\rec{T(t),I(t) \cup Y}\}$ and $F(h) = F(g)$.
Note that if $g \in \dl{H}^{\it hu}$ and $g \stackrel{\it
fork}{\leadsto} h$, then also $h \in \dl{H}^{\it hu}$. Let
\plat{\stackrel{\it F}{\leadsto}} be \plat{\stackrel{\it R}{\leadsto}
\cup \stackrel{\it fork}{\leadsto}}. As \plat{g \stackrel{\it
fork}{\leadsto} h \Rightarrow g=_F h} and \plat{g \stackrel{\it
R}{\leadsto} h \Rightarrow g=_R h \Rightarrow g=_F h}, requirement
2(a) is satisfied. For 2(b) observe that an application of
$\stackrel{\it fork}{\leadsto}$ corresponds to an application of the
axiom $ax+a(y+z)~=~ax+a(x+y)+a(y+z)$.
The requirement $|R(h)| > |R(g)|$ says that the transformation may
only take place if it actually increases the ready set of the
transformed graph. Note that if \plat{g \stackrel{F}{\leadsto} h} then
$T(g)=T(h)$. As there is an upperbound to the number of ready pairs
of graphs $g$ with a given trace set (namely $|T(g)| \times 2^l$,
where $l$ is the number of different edge-labels occurring in $g$), a
reduction sequence $g_0\stackrel{F}{\leadsto} g_1
\stackrel{F}{\leadsto} \cdots$ on $\dl{H}^{\it hu}$ can contain only
finitely many occurrences of $\stackrel{\it fork}{\leadsto}$. After
the last such occurrence it leads in finitely many steps to a normal
form, because $\stackrel{R}{\leadsto}$ is terminating on $\dl{H}^{\it
hu}$. Hence also $\stackrel{F}{\leadsto}$ is terminating on
$\dl{H}^{\it hu}$ (requirement 1).
Suppose $g$ is a normal form w.r.t.\ \plat{\stackrel{F}{\leadsto}}
and $\rec{\sigma,X} \in R(g) \wedge \rec{\sigma,Y\cup Z} \in R(g)$.
Then $g$ has nodes $t$ and $u$ with $T(t)=T(u)=\sigma$, $I(t)=X$ and
$Y \subseteq I(u)$. As $g$ must be a normal form w.r.t.\
\plat{\stackrel{R}{\leadsto}}, it satisfies (\hhref{rnf}) and hence $t
\sim u$. As $g$ is connected, there are edges $(s,a,t)$ and $(s,a,u)$
in $g$. As $g$ must also be a normal form w.r.t.\ \plat{\stackrel{\it
fork}{\leadsto}}, $\rec{\sigma,X \cup Y} \in R(g)$. Thus normal
forms w.r.t.\ \plat{\stackrel{F}{\leadsto}} are saturated as well as
cross saturated, and hence requirement 3 follows by
\href{pr-saturated}{Propositions \ref{pr-saturated} and \ref{pr-cross
saturated}}.
Take ${\fO} = \it FT$. Let $\stackrel{\it sf}{\leadsto}$ ({\em
symmetric fork}) be the graph transformation consisting of those
instances of \plat{\stackrel{\it fork}{\leadsto}} where $Y=I(u)$, but
with the requirement $|R(h)| > |R(g)|$ relaxed to $|RT_N(h)| >
|RT_N(g)|$. Let $\dl{H}^*$ be $\dl{H}^{\it tree}$, and define
\plat{\stackrel{\it sfu}{\leadsto}} by \plat{g \stackrel{\it
sfu}{\leadsto} h} if \plat{g \stackrel{\it sf}{\leadsto} h'} and
$h=U(h')$. Thus \plat{\stackrel{\it sfu}{\leadsto}} is the variant of
\plat{\stackrel{\it sf}{\leadsto}} in which the target is unfolded
into a tree. Let \plat{\stackrel{\it FT}{\leadsto}} be
\plat{\stackrel{\it RT}{\leadsto} \cup \stackrel{\it
sfu}{\leadsto}}. As there is an upperbound to the number of normal
ready traces of graphs with a given finite trace set,
\plat{\stackrel{\it FT}{\leadsto}} is terminating on $\dl{H}^*$
(requirement 1). The normal forms are exactly the finite
$RT$-saturated ready trace deterministic process trees, so
requirement 3 follows from Propositions \hhref{pr-RT-determinism} and
\hhref{pr-RT-saturated}. It follows immediately from \cor{augmentation}
that \plat{g \stackrel{\it sf}{\leadsto} h \Rightarrow g=_{\it FT} h}.
Hence \pr{unfolding} gives \plat{g \stackrel{\it sfu}{\leadsto} h
\Rightarrow g=_{\it FT} h}.
Moreover, \plat{g \stackrel{\it RT}{\leadsto} h \Rightarrow g=_{\it
RT} h \Rightarrow g=_{\it FT} h}, which yields requirement 2(a). For
2(b) observe that an application of \plat{\stackrel{\it sf}{\leadsto}}
corresponds to an application of the axiom $ax+ay~=~ax+ay+a(x+y)$,
and as $h \bis{} U(h)$ \cor{finite BCCSP 2} gives $T_B \vdash \In{h} =
\In{U(h)}$ for $h \in \dl{H}$.
\end{proof}
In \thm{closed completeness} the fifth and seventh axioms of
\tab{axioms} may be replayed by $$a\sum_{i=1}^n (b_i x_i + b_i y_i) =
a\sum_{i=1}^n (b_i x_i + b_i y_i) +a \sum_{i=1}^n b_i y_i \mbox{~~~and~~~}
a\sum_{i=1}^n b_i x_i + a\sum_{i=1}^n b_i y_i = a\sum_{i=1}^n (b_i
x_i + b_i y_i).$$ These laws derive the same closed substitution
instances.
Thus none of the axiomatizations require the operator $I$, or
conditional equations. However, the laws above are axioms schemes
which have instances for any choice of $n \in \IN$. Even if $Act$
is finite, the axiomatizations involving these laws are infinite.
\begin{theorem}{open completeness}
Suppose $Act$ is infinite. For each of the semantics $\fO \in \{T, \:
S, \: CT, \: F, \: R, \: FT, \: RT,\linebreak \: RS, \: B, \: U\}$ two
BCCSP expressions with variables are ${\fO}$-equivalent iff they can
be proved equal from the axioms marked with `+' or `$\omega$' in the
column for $\fO$ in \tab{axioms}. It follows that the axioms
marked with `v' are derivable.
\end{theorem}
\begin{proof}
For ${\fO} \in \{T,~ CT,~ F,~ R,~ FT,~ RT,~ B\}$ this has been
established in {\sc Groote} \hcite{Gr90}.
His proof for $F$, $R$, $FT$ and $RT$ can be applied to $S$ and $RS$ as well.
The proof for $U$ is rather trivial, but omitted here.
\end{proof}
Groote also showed that if $Act$ is finite, \thm{open completeness}
does not hold for $F$, $R$, $FT$ and $RT$. But for $B$ and $CT$ it
suffices to assume that $Act$ is nonempty, and for $T$ it suffices to
assume that $Act$ has at least two elements. I do not know which
cardinality restriction on $Act$ is needed in the cases of $S$ and
$RS$. A complete axiomatization for open terms for completed
simulation or possible worlds semantics has so far not been provided.
\subsection{Axiomatizing the preorders}
In \tab{axioms preorders}, complete axiomatizations can be found
for the eleven preorders corresponding to the equivalences axiomatized
in \tab{axioms} (there is no preorder for tree semantics ($U)$).
This time provability is defined according to
the standards of either first-order logic with inequality or
conditional inequational logic, i.e.\ it may be used that
$\sqsubseteq$ is reflexive and transitive and satisfies the
precongruence properties of \href{7b}{Theorem~\ref{thm-congruence}b}.
For any semantics $\fO$ the $\fO$-preorder and $\fO$-equivalence are
related by $p =_\fO q \Leftrightarrow p \sqsubseteq_\fO q \wedge q
\sqsubseteq_\fO p$. Thus either $p = q$ is taken to be an abbreviation
of $p \sqsubseteq q \wedge q \sqsubseteq p$ or the conditional axioms
$p = q \Rightarrow p \sqsubseteq q$ and $p \sqsubseteq q \wedge q
\sqsubseteq p \Rightarrow p=q$ are considered part of the axiomatizations.
In the latter case, the axioms of \tab{axioms preorders} also
constitute complete axiomatizations of the equivalences.
The three axioms in \tab{axioms preorders} in which the
inequality is written ``$\sqsubseteq$'' represent strengthenings of the
corresponding axioms in \tab{axioms}. The axioms in which the
inequality is written ``$\sqsupseteq$'' are merely slick
reformulations of the corresponding axioms in \tab{axioms}, and
could be replaced by them.
Unlike in \tab{axioms}, the characteristic axiom for the
readiness preorder (the ninth) is now a substitution instance of the
characteristic axiom for the failures preorder (the tenth).
Note that the characteristic axiom for the ready simulation preorder
(the fifth) derives all closed instances of $I(x)=I(y) \Rightarrow ax
\sqsubseteq a(x+y)$, which gives the fifth axiom of \tab{axioms}.
Hence all closed instances of the characteristic axiom for the ready
trace preorder (the seventh) are derivable from the fifth and eighth
axioms. It follows that conditional (in)equations, involving the
operator $I$, or unbounded sums, are no longer needed in the
axiomatizations of ready simulation and failure trace semantics.
\begin{theorem}{closed completeness preorders}
For each of the semantics ${\fO} \in \{T, ~ S, ~ CT, ~ CS, ~ F, ~ R, ~
FT, ~ RT, ~ PW, ~ RS, ~ B\}$ one has $p \sqsubseteq_\fO q$ for $p,q \in {\sf
T\rm(BCCSP)}$ iff $p \sqsubseteq q$ can be proved from the axioms
marked with ``+'' in the column for ${\fO}$ in \tab{axioms
preorders}. The axioms marked with ``v'' are valid in
${\fO}$-semantics but not needed for the proof.
\begin{table}[htb]
\centerline{\begin{tabular}{| l | c | c | c | c | c | c | c | c | c | c | c |}
\cline{2-12}
\multicolumn{1}{c|}{}
&\makebox[0pt][c]{\it B}&\makebox[0pt][c]{\it RS}&\makebox[0pt][c]{\it PW}
&\makebox[0pt][c]{\it RT}
&\makebox[0pt][c]{\it FT}&\makebox[0pt][c]{\it R}&\makebox[0pt][c]{\it F}
&\makebox[0pt][c]{\it CS}&\makebox[0pt][c]{\it CT}&\makebox[0pt][c]{\it S}
&\makebox[0pt][c]{\it T}\\
\hline
$(x+y)+z~=~x+(y+z)$&+&+&+&+&+&+&+&+&+&+&+\\
$x+y~=~y+x$&+&+&+&+&+&+&+&+&+&+&+\\
$x+0~=~x$&+&+&+&+&+&+&+&+&+&+&+\\
$x+x~=~x$&+&+&+&+&+&+&+&+&+&+&+\\
&&&&&&&&&&&\\
$ax ~\sqsubseteq~ ax+ay$&&+&+&+&+&+&+&v&v&v&v\\
$a(bx+by+z)~=~ a(bx+z) + a(by+z)$&&&+&v&v&v&v&&v&&v\\
$I(x)~=~I(y)~~ \Rightarrow ~~ax+ay~=~a(x+y)$&&&&+&v&v&v&&v&&v\\
$ax+ay~\sqsupseteq~a(x+y)$&&&&&+&&v&&v&&v\\
{$a(bx+u)+a(by+v)~\sqsupseteq~a(bx+by+u)$}&&&&&&+&v&&v&&v\\
$ax+a(y+z)~\sqsupseteq~a(x+y)$&&&&&&&+&&v&&v\\
$ax~\sqsubseteq~ax+y$&&&&&&&&+&+&v&v\\
$a(bx+u)+a(cy+v)~=~a(bx+cy+u+v)$&&&&&&&&&+&&v\\
$x ~\sqsubseteq~ x+y$&&&&&&&&&&+&+\\
$ax+ay~=~a(x+y)$&&&&&&&&&&&+\\
&&&&&&&&&&&\\
$I(0)~=~0$&+&+&+&+&+&+&+&+&+&+&+\\
$I(ax)~=~a0$&+&+&+&+&+&+&+&+&+&+&+\\
$I(x+y)~=~I(x)+I(y)$&+&+&+&+&+&+&+&+&+&+&+\\
\hline
\end{tabular}}
\caption{Complete axiomatizations for the preorders\hlabel{axioms preorders}}
\end{table}
\end{theorem}
\begin{proof}
``If'' (\phrase{soundness}): In the light of
\href{7b}{Theorem~\ref{thm-congruence}b} it
suffices to show that the closed instances of the indicated axioms
are valid in the corresponding semantics. This is straightforward.
``Only if'' (\phrase{completeness}): Let $T^*_{\fO}$ be the set of
axioms marked with ``+'' in the column for ${\fO}$. Write $T^*_{\fO}
\vdash p\sqsubseteq q$ if the inequation $p\sqsubseteq q$ is provable
from $T^*_{\fO}$. I have to show that
\begin{equation}\hlabel{completeness preorders}
p \sqsubseteq_{\fO} q ~\Rightarrow~ T^*_{\fO} \vdash p\sqsubseteq q
\end{equation}
for any $p,q \in {\sf T\rm(BCCSP)}$. The case $\fO=B$ follows from
\pr{bisimulation preorder} and \thm{closed completeness}. For the
cases ${\fO} \in \{S,~CS,~RS\}$ (\hhref{completeness preorders}) will be
established with structural induction on $p$ and $q$. So assume $p
\sqsubseteq_{\fO} q$ and (\hhref{completeness preorders}) has been
proven for all pairs of smaller expressions $p',q' \in {\sf
T\rm(BCCSP)}$.
Take ${\fO}=S$, so $p \sqsubseteq_{S} q$. Using the axiom $x
\sqsubseteq x+y$ one finds that $T^*_{S} \vdash p\sqsubseteq q$ if for
every summand $ap'$ of $p$ there is a summand $aq'$ of $q$ such that
$T^*_{S} \vdash ap' \sqsubseteq aq'$. So let $ap'$ be a summand of
$p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$ and $p'
\sqsubseteq_{S} q'$. Note that $aq'$ is a summand of $q$. By induction
$T^*_{S} \vdash p'\sqsubseteq q'$, so $T^*_S \vdash ap' \sqsubseteq
aq'$.
Take ${\fO}=CS$, so $p \sqsubseteq_{CS} q$. Using the axiom $ax
\sqsubseteq ax+y$ one finds that $T^*_{CS} \vdash p\sqsubseteq q$ if
$I(p)\neq\emptyset$ and for every summand $ap'$ of $p$ there is a
summand $aq'$ of $q$ such that $T^*_{CS} \vdash ap' \sqsubseteq
aq'$. In case $I(p)=\emptyset$ it must be that $I(q)=\emptyset$ as
well, and hence $T^*_{CS} \vdash p=q=0$. Otherwise, let $ap'$ be a
summand of $p$. Then $p \goto{a} p'$, so $\exists q': q \goto{a} q'$
and $p' \sqsubseteq_{CS} q'$. Note that $aq'$ is a summand of $q$. By
induction $T^*_{CS} \vdash p'\sqsubseteq q'$, so $T^*_{CS} \vdash ap'
\sqsubseteq aq'$.
Take ${\fO}=RS$, so $p \sqsubseteq_{RS} q$. Using the first five
axioms of \tab{axioms preorders} one finds that $T^*_{RS} \vdash
p\sqsubseteq q$ if $I(p)=I(q)$ and for every summand $ap'$ of $p$
there is a summand $aq'$ of $q$ such that $T^*_{RS} \vdash ap'
\sqsubseteq aq'$. As $p \sqsubseteq_{RS} q$ one has $I(p)=I(q)$. Let
$ap'$ be a summand of $p$. Then $p \goto{a} p'$, so $\exists q': q
\goto{a} q'$ and $p' \sqsubseteq_{RS} q'$. Note that $aq'$ is a
summand of $q$. By induction $T^*_{RS} \vdash p'\sqsubseteq q'$, so
$T^*_{RS} \vdash ap' \sqsubseteq aq'$.
Take $\fO=PW$. Suppose $p \sqsubseteq_{PW} q$. The axiom
$a(bx+by+z)=a(bx+z)+a(by+z)$ allows to rewrite $p$ and $q$ to BCCSP
expressions $p'=\Sigma_{i\in I} a_i p_i$ and $q'=\Sigma_{j\in J} a_j
q_j$ with $p_i$ and $q_j$ deterministic. For expressions of this form
it is easy to establish that $p'\sqsubseteq_{PW} q' \Leftrightarrow p'
\sqsubseteq_{RS} q'$. Using the soundness of the axiom employed, and
the completeness of $T_{RS} \subseteq T_{PW}$ for $\sqsubseteq_{RS}$,
it follows that $T_{PW} \vdash p = p' \sqsubseteq q' = q$.
The remaining completeness proofs go by a variant of the method of
graph transformations, where requirement 3 is replaced by
\begin{center} if $g$ and $h$ are normal forms, then $g \sqsubseteq_\fO
h \Leftrightarrow g \sqsubseteq_\fN h$.
\end{center}
Here $\fN$ should be a semantics finer than $\fO$, for which the
completeness theorem has already been established, and for which
$T^*_\fN \subseteq T^*_\fO$. The reasoning now goes exactly as in the
proof of \thm{closed completeness}: Suppose $p \sqsubseteq_\fO q$.
Rewrite $U(G(p))$ and $U(G(q))$ to normal forms $g$ and $h$. Then
$$g =_{\fO} U(G(p)) \bis{} G(p) \sqsubseteq_{\fO} G(q) \bis{} U(G(q))
=_{\fO} h.$$
Thus, with requirement 3, $g \sqsubseteq_\fN h$. \cor{finite BCCSP 2}
yields $\In{g} \sqsubseteq_\fN \In{h}$, and one obtains
$$T^*_{\fO} \vdash p = \In{U(G(p))} = \In{g} \sqsubseteq \In{h} =
\In{U(G(q))} = q.$$ For each of the six remaining completeness proofs,
the class $\dl{H}^*$ and the graph transformations are the same as in
the proof of \thm{closed completeness}. Thus requirements 1 and 2(a)
are fulfilled. As (the closed instances of) the axioms for the
respective equivalences from \tab{axioms} are easily derivable
from the ones for the corresponding preorders from \tab{axioms
preorders}, requirement 2(b) is fulfilled as well. Requirement 3,
which used to follow from \thm{determinism} and Propositions
\hhref{pr-RT-determinism}, \hhref{pr-cross saturated}, \hhref{pr-saturated}
and \hhref{pr-RT-saturated}, now follows from Propositions
\hhref{pr-RT-determinism preorders}, \hhref{pr-cross saturated preorders},
\hhref{pr-saturated} and \hhref{pr-RT-saturated}.
\end{proof}
\subsection{A language for finite, concrete, sequential processes with
internal choice}
Let BCSP\hindex{BCSP} be the language that extends BCCSP with a binary operator
$\oplus$, modelling \phrase{internal choice}. Like $p+q$, the
expression $p\oplus q$ represents a process, first being involved in a
choice between its summands $p$ and $q$, and then proceeding as the
chosen process. However, whereas $+$ represents a choice that can be
influenced by the environment of the process (an \phrase{external choice}),
$\oplus$ represents one that is due to internal nondeterminism of the
specified system. BCSP can be regarded as a basic fragment of the
language CSP of {\sc Hoare} \hcite{Ho85}.
The set ${\sf T\rm(BCSP)}$ of (closed) terms over BCSP, or (closed)
{\em BCSP-expressions}, and its subset ${\sf T_1\rm(BCSP)}$ of
\phrase{initially deterministic BCSP-expressions}, are defined by:
\begin{itemise}
\item
$0 \in {\sf T_1\rm(BCSP)} \subseteq {\sf T\rm(BCSP)}$,
\item
$aP \in {\sf T_1\rm(BCSP)}$ for any $a \in Act$ and $P \in {\sf T\rm(BCSP)}$,
\item
$p+q \in {\sf T_1\rm(BCSP)}$ for any $p,q \in {\sf T_1\rm(BCSP)}$,
\item
$P+Q \in {\sf T\rm(BCSP)}$ for any $P,Q \in {\sf T\rm(BCSP)}$,
\item
$P\oplus Q \in {\sf T\rm(BCSP)}$ for any $P,Q \in {\sf T\rm(BCSP)}$.
\end{itemise}
Again, subterms $a0$ may be abbreviated by $a$. Brackets are used for
disambiguation only, assuming associativity of $+$ and $\oplus$, and
letting $a$ bind stronger than $+$ and $\oplus$.
Semantically, BCSP-expressions represent nonempty, finite sets of
initially deterministic BCSP expressions: for $P,Q \in {\sf
T\rm(BCSP)}$ let
$$
%\Id{p} := \{p\} \mbox{~~for~} p \in {\sf T_1\rm(BCSP)} \\
\Id{0} := \{0\} ~~~~~
\Id{aP} := \{aP\} ~~~~~
\Id{P+Q} := \{p+q \mid p \in \Id{P},~ q\in \Id{Q}\} ~~~~~
\Id{P\oplus Q} := \Id{P} \cup \Id{Q}.
$$
On ${\sf T_1\rm(BCSP)}$ action relations $\goto{a}$ for $a \in Act$ are
defined as the predicates on ${\sf T_1\rm(BCSP)}$ generated by the
action rules of \tab{BCSP}. Here $a$ ranges over $Act$,
$P$ over ${\sf T\rm(BCSP)}$ and $p$ and $q$ over ${\sf T_1\rm(BCSP)}$.%
\begin{table}[htb]
\begin{center}\framebox{$
\begin{array}{c}
{p \in \Id{P}} \\\hline
{aP \goto{a} p}
\end{array}
\qquad
\begin{array}{c}
{p \goto{a} p'} \\\hline
{p+q \goto{a} p'}
\end{array}
\qquad
\begin{array}{c}
{q \goto{a} q'} \\\hline
{p+q \goto{a} q'}
\end{array}
$}
\caption{Action rules for BCSP\hlabel{BCSP}}
\end{center}
\vspace{-15pt}
\end{table}
This makes ${\sf T_1\rm(BCSP)}$ into a labelled transition system.
Hence, in the light of \sect{initial nondeterminism} all
semantic equivalences of Sections \ref{trace}--\ref{bisimulation} and
\ref{possible worlds} are
well-defined on ${\sf T\rm(BCSP)}$, and for each of the semantics it
is determined when two BCSP-expressions denote the same process.
The following theorem says that all these semantic equivalences
are congruences for BCSP\@. Even stronger, all the preorders of this
paper are precongruences for BCCSP.
\begin{theorem}{congruence BCSP}
Let $P,Q,R,S \in {\sf T\rm(BCSP)}$ and let $\fO$ be any of the semantics
of Sections \ref{trace}--\ref{bisimulation},
\ref{possible worlds}. Then $$\begin{array}{@{}l@{}}
P =_{\fO} Q ~\wedge~ R =_\fO S ~~\Rightarrow~~ aP =_{\fO} aQ ~\wedge~
P+R =_{\fO} Q+S ~\wedge~ P\oplus R =_{\fO} Q\oplus S,\\
P \sqsubseteq_{\fO} Q ~\wedge~ R \sqsubseteq_\fO S ~~\Rightarrow~~ aP
\sqsubseteq_{\fO} aQ ~\wedge~ P+R \sqsubseteq_{\fO} Q+S
~\wedge~ P\oplus R \sqsubseteq_{\fO} Q\oplus S.
\end{array}$$
\end{theorem}
\begin{proof}
Each of the preorders $\fO$ has a modal characterization, given by $P
\sqsubseteq_{\fO} Q \Leftrightarrow {\fO}(P)\subseteq{\fO}(Q)$, where
$\fO(P) = \bigcup_{p \in \Id{P}} \fO(p)$ for $P \in {\sf T\rm(BCSP)}$
and $\fO(p) = \{\phi \in \fL_\fO \mid p \models \phi\}$ for $p \in
{\sf T_1\rm(BCSP)}$. Now $\fO(P\oplus Q) = \fO(P) \cup \fO(Q)$. This
immediately yields the compositionality of $\fO$ w.r.t.\ $\oplus$: $P
\sqsubseteq_{\fO} Q \wedge R \sqsubseteq_\fO S \Rightarrow P\oplus R
\sqsubseteq_{\fO} Q\oplus S$, and hence $P =_{\fO} Q \wedge R =_\fO S
\Rightarrow P\oplus R =_{\fO} Q\oplus S$.
Note that every formula in the infinitary Hennessy-Milner logic is
logically equivalent to a disjunction of formulas of the form
$\bigwedge_{i \in I} a_i \phi_i \wedge \bigwedge_{j \in J} \neg a_j \phi_j$.
Let $\fO'(P)$ be the class of formulas in $\fO(P)$ of that form.
It follows that $P \sqsubseteq_{\fO} Q \Leftrightarrow
{\fO'}(P)\subseteq{\fO'}(Q)$ for $P,Q \in {\sf T\rm(BCSP)}$.
For $p,q \in {\sf T_1\rm(BCSP)}$ one has $p+q \models \bigwedge_{i \in
I} a_i \phi_i \wedge \bigwedge_{j \in J} \neg a_j \phi_j$ iff $I$ can
be written as $I_1 \cup I_2$ such that $p \models \bigwedge_{i \in
I_1} a_i \phi_i \wedge \bigwedge_{j \in J} \neg a_j \phi_j$ and $q
\models \bigwedge_{i \in I_2} a_i \phi_i \wedge \bigwedge_{j \in J}
\neg a_j \phi_j$. Moreover, for each semantics $\fO$ of this paper, if
$\bigwedge_{i \in I} a_i \phi_i \wedge \bigwedge_{j \in J} \neg a_j
\phi_j \in \fL_\fO$ and $I' \subset I$, then $\bigwedge_{i \in I'} a_i
\phi_i \wedge \bigwedge_{j \in J} \neg a_j \phi_j \in
\fL_\fO$\footnote{At least when replacing the modality $X$ of $R$,
$RT$, $PW$ and $RS$ by $\bigwedge_{a \in Y} \neg a\top \wedge \bigwedge_{a
\in Z} a\top$.}. Thus, for $P,Q \in {\sf T\rm(BCSP)}$ and
$\bigwedge_{i \in I} a_i \phi_i \wedge \bigwedge_{j \in J} \neg a_j
\phi_j \in \fL_\fO$, one has $\bigwedge_{i \in I} a_i \phi_i \wedge
\bigwedge_{j \in J} \neg a_j \phi_j \in \fO'(P+Q)$ iff $I = I_1 \cup
I_2$ such that $\bigwedge_{i \in I_1} a_i \phi_i \wedge \bigwedge_{j
\in J} \neg a_j \phi_j \in \fO'(P)$ and $\bigwedge_{i \in I_2} a_i
\phi_i \wedge \bigwedge_{j \in J} \neg a_j \phi_j \in \fO'(Q)$. This
immediately yields the compositionality of $\fO$ w.r.t.\ $+$.
The compositionality of $\fO$ w.r.t.\ $a$ is straightforward.
\end{proof}
If $P \in {\sf T\rm(BCSP)}$, then $G(\Id{P})$ is a finite process
graph with multiple roots. Vice versa, any finite process graph with
multiple roots $g \in \IG^{\it mr}$ can be represented by a
BCSP-expression $\In{g} \in {\sf T\rm(BCSP)}$, such that
$G(\In{g})\bis{}g$. Just extend \df{finite BCCSP} by $\In{g} =
\bigoplus_{r \in \mbox{\footnotesize \sc roots}(g)} \In{g_r}$.
\paragraph{Axioms}
\begin{table}[htb]
\centerline{\begin{tabular}{| l | c | c | c | c | c | c | c | c | c | c | c |}
\cline{2-12}
\multicolumn{1}{c|}{}
&\makebox[0pt][c]{\it B}&\makebox[0pt][c]{\it RS}&\makebox[0pt][c]{\it PW}
&\makebox[0pt][c]{\it RT}&\makebox[0pt][c]{\it FT}&\makebox[0pt][c]{\it R}
&\makebox[0pt][c]{\it F}&\makebox[0pt][c]{\it CS}&\makebox[0pt][c]{\it CT}
&\makebox[0pt][c]{\it S}&\makebox[0pt][c]{\it T}\\
\hline
&&&&&&&&&&&\\[-8pt]
$(x\oplus y)\oplus z~=~x\oplus (y\oplus z)$&+&+&+&+&+&+&+&+&+&+&+\\
$x\oplus y~=~y\oplus x$&+&+&+&+&+&+&+&+&+&+&+\\
$x\oplus x~=~x$&+&+&+&+&+&+&+&+&+&+&+\\
&&&&&&&&&&&\\[-8pt]
$(x+y)+z~=~x+(y+z)$&+&+&+&+&+&+&+&+&+&+&+\\
$x+y~=~y+x$&+&+&+&+&+&+&+&+&+&+&+\\
$x+0~=~x$&+&+&+&+&+&+&+&+&+&+&+\\
&&&&&&&&&&&\\[-8pt]
$(x\oplus y)+z ~=~ (x+z)\oplus(y+z)$&+&+&+&+&+&+&+&+&+&+&+\\
$a(x\oplus y) ~=~ ax+ay$&+&+&+&+&+&+&+&+&+&+&+\\
&&&&&&&&&&&\\[-8pt]
\hline
&&&&&&&&&&&\\[-8pt]
$\sum_{i=1}^n (b_i x_i \!+\! b_i y_i) ~=\,
\sum_{i=1}^n (b_i x_i \!+\! b_i y_i) \oplus
\sum_{i=1}^n \! b_i y_i$&&+&v&v&v&v&v&v&v&v&v\\
$bx+by+z~=~(bx+z) \oplus (by+z)$&&&+&v&v&v&v&&v&&v\\
$\sum_{i=1}^n b_i x_i \oplus \sum_{i=1}^n b_i y_i ~=~
\sum_{i=1}^n (b_i x_i + b_i y_i)$&&&&+&+&v&v&&v&&v\\
$x+x ~=~ x$&&&&&+&&v&&v&&v\\
$(bx+u)\oplus (by+v)~=~(bx+by+u)\oplus(by+v)$&&&&&&+&+&&v&&v\\
$x\oplus(y+z)~=~x\oplus(x+y)\oplus(y+z)$&&&&&&&+&&v&&v\\
$x+by+z~=~(x+by+z)\oplus (by+z)$&&&&&&&&+&v&v&v\\
$(bx+u)\oplus(cy+v)~=~bx+cy+u+v$&&&&&&&&&+&&v\\
$x+y~=~(x+y)\oplus y$&&&&&&&&&&+&v\\
$x\oplus y~=~x+y$&&&&&&&&&&&+\\
&&&&&&&&&&&\\[-8pt]
\hline
&&&&&&&&&&&\\[-8pt]
$x ~\sqsubseteq~ x\oplus y$&&+&+&+&+&+&+&+&+&+&+\\
$bx+by+z~=~(bx+z) \oplus (by+z)$&&&+&v&v&v&v&&v&&v\\
$\sum_{i=1}^n b_i x_i \oplus \sum_{i=1}^n b_i y_i ~=~
\sum_{i=1}^n (b_i x_i + b_i y_i)$&&&&+&v&v&v&&v&&v\\
$x+x ~=~ x$&&&&&+&&v&&v&&v\\
$(bx+u)\oplus(by+v)~\sqsupseteq~bx+by+u$&&&&&&+&v&&v&&v\\
$x\oplus(y+z)~\sqsupseteq~x+y$&&&&&&&+&&v&&v\\
$ax~\sqsubseteq~ax+y$&&&&&&&&+&v&v&v\\
$(bx+u)\oplus(cy+v)~=~bx+cy+u+v$&&&&&&&&&+&&v\\
$x ~\sqsubseteq~ x+y$&&&&&&&&&&+&v\\
$x\oplus y~=~x+y$&&&&&&&&&&&+\\[5pt]
\hline
\end{tabular}}
\caption{Complete axiomatizations in terms of BCSP\hlabel{axioms BCSP}}
\vspace{-27pt}
\end{table}
In \tab{axioms BCSP}, complete axiomatizations in terms of BCSP
can be found for the same eleven semantics axiomatized in terms of
BCCSP in Tables \hhref{axioms} and \hhref{axioms preorders}. The first two
sections of the table apply to the equivalences and the first and last
section to the preorders. These axioms are mild variations of the ones
in Tables \hhref{axioms} and \hhref{axioms preorders}, and have been found
by exploiting a close correspondence in semantic validity between BCSP
and BCCSP expressions. First of all, using the definitions just given,
the soundness of the axioms in the first section of \tab{axioms
BCSP} is easily established. Using these, any closed BCSP expression
can be rewritten in the form $\bigoplus_{i=1}^n p_i$ with $p_i$ closed
BCCSP expressions. Now the following lemma reduces the validity of
(in)equations over BCSP to that of (in)equations over BCCSP.
\begin{lemma}{reducing BCSP}
$\displaystyle ~ \bigoplus_{i=1}^n p_i \sqsubseteq_\fO \bigoplus_{j=1}^m q_j
~\Leftrightarrow~ \sum_{i=1}^n a p_i \sqsubseteq_\fO \sum_{j=1}^m a q_j ~$
for $p_i, q_j \in {\sf T\rm(BCCSP)}$.
\\\pf
$\phi \in \fO(\bigoplus_{i=1}^n p_i) \Leftrightarrow a\phi \in
\fO(\sum_{i=1}^n a p_i)$.
\end{lemma}
\vspace*{\fill}
\noindent
Most of the axioms in the last two sections of \tab{axioms BCSP} can
be recognized as restatements of the axioms of Tables \hhref{axioms}
and \hhref{axioms preorders}, using the insight of \lem{reducing
BCSP}. However, in BCSP it is not so clear how the set of initial
actions of a process should be defined, and the obvious adaptations of
the axioms involving the operator $I$ would not be sound. Therefore
the alternatives to those axioms discussed near the \href{thm-open
completeness}{end of Section \ref{axioms BCCSP}} are used. Moreover,
in BCSP the axiom $x+x=x$ is not sound for readiness
semantics. Substituting $a\oplus b$ for $x$, one derives $a \oplus
(a+b) \oplus b = a \oplus b$, of which only the left-hand side has a
ready pair $\rec{\epsilon,\{a,b\}}$. However, in the setting of BCCSP
all closed instances of $x+x=x$ are derivable from the law $ax+ax=ax$,
which corresponds with the BCSP axiom $x \oplus x = x$. Following
\lem{reducing BCSP}, the characteristic axiom for failure trace
equivalence should be $x \oplus y=x \oplus y \oplus (x+y)$. This axiom
is derivable from $x+x=x$, and all closed instances of $x+x=x$ are
derivable from $x \oplus y=x \oplus y \oplus (x+y)$ and the axioms in
the first section of \tab{axioms BCSP}.
Let $U_{\fO}$ be the set of axioms marked with ``+'' in the column for
${\fO}$ in the first two sections of \tab{axioms BCSP}, and
$U^*_{\fO}$ be the set of axioms marked with ``+'' in the column for
${\fO}$ in the first and last section of \tab{axioms BCSP}.
Write $S \vdash \Phi$ if the formula $\Phi$ is provable from the set of
axioms $S$.
\begin{theorem}{closed completeness BCSP}
For ${\fO} \in \{T, ~ S, ~ CT, ~ CS, ~ F, ~ R, ~ FT, ~ RT, ~ PW, ~ RS, ~
B\}$ and $P,Q \in {\sf T\rm(BCSP)}$ one has $P =_\fO Q \Leftrightarrow
U_\fO \vdash P=Q$ and $P \sqsubseteq_\fO Q \Leftrightarrow
U^*_\fO \vdash P \sqsubseteq Q$.
\end{theorem}
\begin{proof}
``$\Leftarrow$'' ({\em soundness}): In the light of \thm{congruence BCSP} it
suffices to show that the closed instances of the indicated axioms are
valid in the corresponding semantics. In fact, one may restrict
attention to the instances where expressions $\bigoplus_{i=1}^n p_i$
with $p_i$ closed BCCSP expressions are substituted for the variables.
It is not difficult to check, for each of these axioms, that such
instances of it are derivable from the instances of it where simple
closed BCCSP expressions are substituted for the variables (but taking
$x \oplus y=x \oplus y \oplus (x+y)$ instead of $x+x=x$ to be the
characteristic axiom for failure trace semantics). That the instances
of the latter kind are valid in the corresponding semantics follows
immediately from \lem{reducing BCSP} and the soundness of the axioms
for BCCSP.
``First $\Rightarrow$'' ({\em completeness of the axioms for the equivalences}):
Let $T'_{\fO}$ be the set of axioms marked with ``+'' in the column
for ${\fO}$ in \tab{axioms}, but using $a\sum_{i=1}^n b_i x_i +
a\sum_{i=1}^n b_i y_i = a\sum_{i=1}^n (b_i x_i + b_i y_i)$ and $a\sum_{i=1}^n (b_i x_i +
b_i y_i) = a\sum_{i=1}^n (b_i x_i + b_i y_i) +a \sum_{i=1}^n b_i y_i$ instead of the axioms involving the operator $I$.
As \thm{closed completeness} establishes completeness for closed terms
only, it holds for $T'_\fO$ as well.
{\sc Claim}: If $T'_\fO \vdash p = \sum_{j=1}^m aq_j$ for $p, q_j \in
{\sf T\rm(BCCSP)}$, then, modulo applications of the first three
axioms of \tab{axioms}, $p$ has the form \plat{p=\sum_{i=1}^n ap_i}.
{\sc Proof of the claim}: As all axioms in $T'_\fO$ are equations, I
may use induction on the proof of $p = \sum_{j=1}^m aq_j$ in
equational logic. The case that $p = \sum_{j=1}^m aq_j$ is a closed
instance of an axiom of $T'_\fO$ proceeds by inspection of those
axioms. The cases of placing an equation in a context, as well as
reflexivity, symmetry and transitivity, are trivial.
{\sc Claim}: $\displaystyle T'_\fO \vdash \sum_{i=1}^n ap_i =
\sum_{j=1}^m aq_j ~\Rightarrow~ U_\fO \vdash \bigoplus_{i=1}^n p_i =
\bigoplus_{j=1}^m q_j$ for any $p_i,q_j \in {\sf T\rm(BCCSP)}$.
{\sc Proof of the claim}: I use induction on the proof of
\plat{\sum_{i=1}^n ap_i = \sum_{j=1}^m aq_j} from $T'_\fO$ in equational
logic. The case that $\sum_{i=1}^n ap_i = \sum_{j=1}^m aq_j$ is a
closed instance of an axiom of $T'_\fO$ proceeds by inspection of
those axioms, taking into account the remark about $x \oplus y=x
\oplus y \oplus (x+y)$ right before this theorem. The case of a
closed instance of an axiom of $T'_\fO$ in a context is
straightforward, also using that all closed instances of axioms of
$T'_\fO$ are derivable from the ones of $U_\fO$, taking into account
the remark about $x+x=x$ right before this theorem. The cases of
reflexivity and symmetry are trivial. Transitivity follows from the
previous claim.
{\sc Completeness proof}: Suppose $P =_\fO Q$ for certain $P,Q \in
{\sf T\rm(BCSP)}$. Using the axioms in the first section of
\tab{axioms BCSP} one obtains $U_\fO \vdash P = \bigoplus_{i=1}^n p_i$
and $U_\fO \vdash Q = \bigoplus_{j=1}^m q_j$ with $p_i, q_j \in {\sf
T\rm(BCCSP)}$. By the soundness of these axioms one has
\plat{\bigoplus_{i=1}^n p_i =_\fO \bigoplus_{j=1}^m q_j}. Therefore
$\sum_{i=1}^n ap_i ~=_\fO~ a\bigoplus_{i=1}^n p_i ~=_\fO~
a\bigoplus_{j=1}^m q_j ~=_\fO~ \sum_{j=1}^m aq_j$ by the soundness of
$a(x\oplus y) = ax+ay$ and \thm{congruence BCSP}, and hence $T'_\fO
\vdash \sum_{i=1}^n ap_i = \sum_{j=1}^m aq_j$ by the completeness of
$T'_\fO$. Now $U_\fO \vdash P=Q$ follows by the claim above.
The second ``$\Rightarrow$'' ({\em completeness of the axioms for the
preorders}) goes likewise, except that in the proof of the
second claim, in order to handle the axioms $ax \sqsubseteq ax+y$ and
$x \sqsubseteq x+y$, one uses the axiom $x \sqsubseteq x\oplus y$ of
$U^*_\fO$. Furthermore, $ax \sqsubseteq ax+y$ is derivable
from $U^*_{CT}$, and $x \sqsubseteq x+y$ from $U^*_T$.
\end{proof}
\section{Criteria for selecting a semantics for particular applications}
\hlabel{criteria}
\paragraph{Must testing}
Assume the testing scenario of trace semantics: we are unable to
influence the behaviour of an investigated system in any way and can
observe the performed actions only. Not even deadlock is observable.
In this case there appears to be no reason to distinguish the two
processes of \ctr{CTvsF}, $ab+a(b+c)$ and $a(b+c)$.
They have the same traces, and consequently allow the same
observations. Likewise, one might see no reason to distinguish between
the two processes of \ctr{TvsCT}, $ab+a$ and $ab$; also
these have the same traces. However, when buying process $ab$, it may
come with the guarantee that, in every run of the system, sooner or
later it will perform the action $b$, at least if the action $a$ is
known to terminate. Such a guarantee cannot be given for $ab+a$. The
distinction between $ab$ and $ab+a$ alluded to here can be formalized
with the concept of \phrase{must testing}, originally due to {\sc De
Nicola \& Hennessy} \hcite{DH84}: $ab$ {\em must} do a $b$, whereas
$ab+a$ must not.
For finite processes, must testing could be formalized as follows.
For $t \subseteq Act^*$ we say that a finite process $p \in \IP$ {\em must}
pass the test $t$ if $CT(p) \subseteq t$. To test whether a process
will sooner or later perform a $b$-action take $t$ to be all sequences
of actions containing a $b$. To test whether a process will always
perform a $b$ immediately after it does an $a$, take $t$ to be all
traces in which any $a$ is immediately followed by a $b$. Now write $p
\sqsubseteq^{\rm must}_T q$ if for all tests $t \subseteq Act^*$ such
that $p$ must pass $t$, $q$ must pass $t$ as well. It is easy to see
that, for finite processes $p$ and $q$, $p \sqsubseteq^{\rm must}_T q$
iff $q \sqsubseteq_{CT} p$.
All testing scenarios $\fO$ sketched earlier in this paper can be
regarded as forms of \phrase{may testing\/}: it is recorded whether an
observation $\varphi \in \fL_\fO$ {\em may} be made for a process $p$,
and one writes $p \sqsubseteq_\fO q$ if any observation that may be
made for $p$, may also be made for $q$.
In the context of a testing scenario $\fO$ with $\fO \succeq CT$, a
plausible form of must testing can be defined as well, and for finite
processes plausible formalizations yield that $p \sqsubseteq^{\rm
must}_\fO q$ iff $q \sqsubseteq_\fO p$.
For infinite processes there are several ways to formalize must
testing, and analyzing the resulting preorders falls outside of
the scope if this paper.
\paragraph{Deadlock behaviour}
A process is said to reach a state of \phrase{deadlock} if it can do
no further actions.\footnote{In settings were successful termination
is modelled (cf.\ \sect{termination}) a state of deadlock is only
reached if moreover the process cannot terminate successfully.} The
process $ab+a$ for instance may deadlock right after performing an
$a$-action, whereas the process $ab$ may not. One could say that a
semantics $\fO$ {\em respects} \phrase{deadlock behaviour} iff ${\fO}
\succeq CT$.\linebreak \ctr{finite} then shows that none of the
semantics on the left in \fig{spectrum} respects deadlock behaviour;
only the left-hand process of \ctr{finite} can deadlock after an
$a$-move. Respecting deadlock behaviour may be a requirement on
semantics in applications where either deadlock is important in its
own right, or where (implicitly) a form of must-testing is considered.
\paragraph{Full abstraction}
Many testing scenarios mentioned in this paper employ the notion that
an action can happen only if it is not blocked by the environment,
that is, only if both the investigated process {\em and} the
environment are ready to participate in it. Modelling both the
investigated process and the responsible part of the environment as
process graphs gives rise to the following binary \phrase{intersection
operator} that allows an action to happen only if it can happen in
both of its arguments.
\begin{definition}{intersection}
Let $\cap$ be the binary operator on process graphs defined by
\begin{itemise}
\item $\nd(g \cap h) = \nd(g) \times \nd(h)$,
\item $\mbox{\sc roots}(g \cap h) = \mbox{\sc roots}(g) \times
\mbox{\sc roots} (h)$,
\item $((s,t),a,(s',t')) \in \ed(g)$ iff $(s,a,s')\in\ed(g) \wedge
(t,a,t')\in \ed(h)$.
\end{itemise}
In order to obtain a connected process graph, unreachable parts need
to be removed.
\end{definition}
This operator is also called \phrase{synchronous parallel composition}
and is denoted $\|$ in {\sc Hoare} \hcite{Ho85}.
It can be added to BCCSP or BCSP by employing the action rule
$\begin{array}{c}
{p \goto{a} p',~ q \goto{a} q'} \\\hline
{p\cap q \goto{a} p'\cap q'}
\end{array}$.
Trace semantics turns out to compositional for the intersection operator,
i.e.\ if $g =_T g'$ and $h =_T h'$ then $g \cap h =_T g' \cap h'$.
For $T(g \cap h) = T(g) \cap T(h)$. So are failures and readiness
semantics: $$\rec{\sigma,X} \in F(g \cap h) ~\Leftrightarrow~ \exists
\rec{\sigma,Y} \in F(g),~ \rec{\sigma,Z} \in F(h): X=Y\cup Z$$
$$\rec{\sigma,X} \in R(g \cap h) ~\Leftrightarrow~ \exists
\rec{\sigma,Y} \in R(g),~ \rec{\sigma,Z} \in R(h): X=Y\cap Z.$$ In
fact, it is not hard to see that all semantics of this paper are
compositional for $\cap$, except for $CT$ and $\it CS$, and their
(in)finitary versions. The two processes of \ctr{CTvsF}, $ab+a(b+c)$
and $a(b+c)$, are completed trace equivalent, even completed
simulation equivalent, yet after intersecting them with $ac$ only the
first one has a completed trace $a$.
In applications where the intersection operator is used, one may
require a suitable semantics to be compositional for it. This rules
out $CT$ and $\it CS$. If also deadlock behaviour is of importance,
$F$ appears to be the coarsest semantics to be considered, as least
among the ones reviewed in this paper. As a matter of fact, it is the
coarsest semantics even among the ones not reviewed here.
\begin{definition}{full abstraction}
An equivalence relation is called \phrase{fully abstract} w.r.t.\ a
property if it is the coarsest equivalence with that property, i.e.\
if it has the property, and any other equivalence having that property
is finer.
An equivalence is said to \phrase{fully abstract} w.r.t.\ another
equivalence $\sim$ and some operators, if it is the coarsest
equivalence finer than $\sim$ that is compositional w.r.t.\ those
operators.
\end{definition}
An equivalence $\approx$ on $\IG$ is fully abstract w.r.t.\ an
equivalence $\sim$ and a set $L$ of operators on $\IG$ iff
\begin{list}{$\bullet$}{\leftmargin 25pt
\labelwidth\leftmargini\advance\labelwidth-\labelsep
\topsep 0pt \itemsep 0pt \parsep 0pt}
\item [(1)] it is compositional w.r.t.\ the operators in $L$, and
\item [(2)] for any two process graphs $g,h \in \IG$ with $g
\not\approx h$ there exists a context $C[\cdot]$ of operators from $L$
such that $C[g] \not\sim C[h]$.
\end{list}
In fact, for every equivalence relation $\sim$ on $\IG$ and every set
$L$ of operators on $\IG$ there exists a unique equivalence relation
$\approx$ that is fully abstract w.r.t.\ $\sim$ and the operators in
$L$, namely the one defined by $g \approx h$ iff $C[g] \sim C[h]$ for
every context $C[\cdot]$ of operators from $L$.
\begin{theorem}{full abstraction F}
Failures equivalence is fully abstract w.r.t.\ $=_{CT}$ and $\cap$,
i.e.\ w.r.t.\ deadlock behaviour and intersection.
\end{theorem}
\begin{proof}
(1) has already been established. For (2), let $g \neq_F h$. W.l.o.g.\
let $\rec{\sigma,X}\in F(g)-F(h)$. Let $k$ be the process graph that
is shaped like the failure pair $\rec{\sigma,X}$, i.e.\ the process
that performs the actions of $\sigma$ in succession, after which it
offers a choice between the actions of $X$, and nothing else.
Then $\sigma \in CT(g \cap k) -CT(h \cap k)$.
\end{proof}
Variants of \thm{full abstraction F} are abundant in the literature.
See e.g.\ \hcite{BKO88}.
\paragraph{Renaming}\textBrown
For every function $f:Act \rightarrow Act$ one can define a unary
renaming operator on $\IG$ that renames the labels of all transitions
in its argument according to $f$. In case $f$ is injective, all
semantics of this paper are compositional for the associated
renaming operator, as is trivial to check. Non-injective renaming
operators are useful to express a degree of abstraction. Imagine a
process that can do, among others, actions $a_1$ and $a_2$. At some
level of abstraction, the difference between $a_1$ and $a_2$ may be
considered irrelevant. This can be expressed by applying a renaming
that relabels both $a_1$ and $a_2$ into the same action
$a$. Naturally, if two processes are equivalent before applying such a
renaming operator, one would expect them to still be equivalent
afterwards, i.e.\ after abstracting from the difference between $a_1$
and $a_2$. It is for this reason that one might require semantics to
be compositional for (non-injective) renaming. As it happens, all
semantics between $F^1$ and $B^-$ fail this requirement. For the two
processes of \ctr{finite} are HML-equivalent ($=_B^-$),
but after renaming all actions $b_i$ into $b$ (for $i=1,2,\ldots$) the
resulting processes are not even singleton-failures equivalent
($=_F^1$). For only the first one has a singleton-failure pair
$\rec{a,b}$. This can be considered an argument against the semantics
on the left of \fig{spectrum}.
Counterexample~\ref{renaming} shows that also $\it F2S^*$, $\it
R2S^*$, $\it FB^*$ and $\it RB^*$ are not compositional for renaming.
In this counterexample $b$ is a shorthand for $\Sigma_{i=1}^\infty
b_i$, in the sense that whenever a transition \plat{p \goto{b} q} is
displayed, all the transitions \plat{p \goto{b_i} q} for $i \geq 1$ are
meant to be present.
\begin{counterexample}[htb]
\Black{
.PS
scale = 2
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
C1: circle at (-4,2)
C2: circle at (-6,1)
circle at last circle +(0,-1)
arrow "$b~~~$" from 2nd last circle to last circle chop
C3: circle at C1 +(-1,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_1$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C4: circle at C1 +(0,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_2$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C5: circle at C1 +(1,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_3$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C6: circle at C1 +(2,-1)
circle at last circle +(-.5,-.5)
arrow dashed from 2nd last circle to last circle chop
circle at 2nd last circle +(0,-1)
arrow dashed from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop
LB1: box invis "$a$" at (-4.61,1.54)
LB2: box invis "$a$" at (-3.32,1.50)
LB1: box invis "$a$" at (-5.04,1.68)
LB2: box invis "$a$" at (-4.15,1.5)
LB4: box invis "{\huge . . .}" at (-1.8,1.5)
arrow from C1 to C2 chop circlerad chop circlerad
arrow from C1 to C3 chop circlerad chop circlerad
arrow from C1 to C4 chop circlerad chop circlerad
arrow from C1 to C5 chop circlerad chop circlerad
arrow from C1 to C6 dashed chop circlerad chop circlerad
C1: circle at (3,2)
C3: circle at C1 +(-1,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_1$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C4: circle at C1 +(0,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_2$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C5: circle at C1 +(1,-1)
circle at last circle +(-.5,-.5)
arrow from 2nd last circle to last circle chop
"$b$" at last circle +(.2,.4)
circle at 2nd last circle +(0,-1)
arrow "$~~~~b_3$" from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow "$~~~c$" from 2nd last circle to last circle chop
C6: circle at C1 +(2,-1)
circle at last circle +(-.5,-.5)
arrow dashed from 2nd last circle to last circle chop
circle at 2nd last circle +(0,-1)
arrow dashed from 3rd last circle to last circle chop
circle at last circle +(0,-1)
arrow dashed from 2nd last circle to last circle chop
LB1: box invis "$a$" at (3.68,1.50)
LB2: box invis "$a$" at (2.39,1.55)
LB2: box invis "$a$" at (2.85,1.5)
LB4: box invis "{\huge . . .}" at (5.2,1.5)
arrow from C1 to C3 chop circlerad chop circlerad
arrow from C1 to C4 chop circlerad chop circlerad
arrow from C1 to C5 chop circlerad chop circlerad
arrow from C1 to C6 dashed chop circlerad chop circlerad
B3: box invis "$=_{\it RB}^*$" at (0,1.2)
B3: box invis "$\neq_{\it 2S}^\omega$" at (0,.5)
B3: box invis "$=_{RS}$" at (0,-.2)
.PE
\centerline{\raise 1em\box\graph}}
\caption{$\it F2S^*$, $\it R2S^*$, $\it FB^*$ and $\it RB^*$ are not
compositional for renaming\hlabel{renaming}}
\end{counterexample}
With some effort one checks that both processes satisfy the same
formulas in $\fL_{\it RB}^*$. However, after renaming all actions
$b_i$ into $b$ they are no longer $\it 2S^-$-equivalent: only the
first process satisfies $a \neg(bc\top)$. For all other semantics of
\fig{spectrum} it is rather easy to establish that they are
compositional for renaming.
\paragraph{Other compositionality requirements}\textBlack
Many formal languages for the description of concurrent systems,
including CCS \hcite{Mi80}, SCCS \hcite{Mi83}, CSP \hcite{Ho85} and ACP
\hcite{BW90}, are \phrase{De Simone languages} (cf.\ \hcite{AFV00}).
This means that their operators (the {\em De Simone operators}) can be
defined with action rules of a particular form (the {\em De Simone
format}). Because De Simone languages are used heavily in algebraic
system verification, semantic equivalences that are compositional for
such languages are often desirable.
\begin{theorem}{De Simone}
The semantics $\it T,\:T^\infty,\:F,\:
F^\infty,\:R,\:R^\infty,\:FT,\:FT^\infty,\: RT,\:RT^\infty,\:
PF,\:PF^\infty,\: S^*,\: S^\omega,\: S,\linebreak\: FS^*,\:
RS^*,\:RS^\omega,\:RS,\: 2S^\omega,\: 2S,\: B^\omega$ and $B$ are
compositional w.r.t.\ all De Simone languages.
\\\pf
Omitted.
\hfill $\Box$
\end{theorem}
For all the other semantics of \fig{spectrum}, which are
displayed there in red (or shaded), there are counterexamples against
such a result. Tree semantics fails to be compositional w.r.t.\ the
$+$ of BCCSP, unless the action relations are upgraded with
multiplicities, but that takes us outside of De Simone format.
The semantics $\it F^1,\:F^-,\:R^-,\:FT^-,\:RT^-,\:RS^-,\:2S^-,\:B^-,
\:F2S^*,\:R2S^*,\:FB^*$ and $RB^*$ fail to be compositional w.r.t.\
renaming, and $\it CT,\:CT^\infty,\:CS,\:CS^\omega,\:CS$ fail to be
compositional w.r.t.\ intersection. These are all De Simone operators.
Finally, \ctr{PW} shows that $PW$ is not compositional
for the synchronization operator $\times$ of SCCS \hcite{Mi83}---also a
De Simone operator. This operator can be used to create a context, in
which the two possible worlds equivalent processes of \ctr{SvsRT} are
converted into the two processes below.
\begin{counterexample}[ht]\Black{
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,3)
LC2: circle at (-5,2)
LC3: circle at (-5,1)
LC6: circle at (-5,0)
LC4: circle at (-3,2)
LC5: circle at (-3,1)
LC7: circle at (-3,0)
LB1: box invis "$a_3$" at (-4.7,2.7)
LB2: box invis "$a_3$" at (-3.35,2.7)
LB3: box invis "$b_1$" at (-5.45,1.5)
LB3: box invis "$b_2$" at (-4.5,1.5)
LB3: box invis "$b_1$" at (-3.45,1.5)
LB3: box invis "$b_2$" at (-2.5,1.5)
LB3: box invis "$c_4$" at (-5.22,.6)
LB4: box invis "$d_4$" at (-2.75,.6)
circle invis "$(abc+abd)\times 3(1.4+2.4)$" at (-4,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arc from LC2.sw to LC3.nw
arc from LC3.ne to LC2.se
arrow from LC3.nw +(-.1,.15) to LC3.nw
arrow from LC3.ne +(.1,.15) to LC3.ne
arrow from LC3 to LC6 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arc from LC4.sw to LC5.nw
arc from LC5.ne to LC4.se
arrow from LC5.nw +(-.1,.15) to LC5.nw
arrow from LC5.ne +(.1,.15) to LC5.ne
arrow from LC5 to LC7 chop circlerad chop circlerad
B3: box invis "$\neq_{\it PW}$" at (0,1.5)
RC1: circle at (4,3)
RC2: circle at (4,2)
RC3: circle at (3,1)
RC5: circle at (3,0)
RC4: circle at (5,1)
RC6: circle at (5,0)
RB1: box invis "$a_3$" at (3.75,2.6)
RB2: box invis "$b_1$" at (3.15,1.85)
RB2: box invis "$b_2$" at (3.85,1.15)
RB2: box invis "$b_1$" at (4.85,1.85)
RB2: box invis "$b_2$" at (4.25,1.15)
RB4: box invis "$c_4$" at (2.78,.6)
RB5: box invis "$d_4$" at (5.25,.6)
circle invis "$a(bc+bd)\times 3(1.4+2.4)$" at (4,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arc from RC2.w to RC3.n
arc from RC3.e to RC2.s
arrow from RC3.n +(.05,.15) to RC3.n
arrow from RC3.e +(.15,.05) to RC3.e
arc from RC2.s to RC4.w
arc from RC4.n to RC2.e
arrow from RC4.n +(-.05,.15) to RC4.n
arrow from RC4.w +(-.15,.05) to RC4.w
arrow from RC3 to RC5 chop circlerad chop circlerad
arrow from RC4 to RC6 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Possible worlds semantics is not compositional for
synchronization\hlabel{PW}}}
\end{counterexample}
These are no longer possible worlds equivalent, for only the one on
the right has a possible world $a_3(b_1c_4+b_2d_4)$. The same
counterexample can also be created with the inverse image operator of
CSP \hcite{Ho85}.
In {\sc Baeten, Bergstra \& Klop} \hcite{BBK87b} a unary priority
operator was defined on process graphs. This operator, which is not a
De Simone operator, assumes a partial ordering $<$ on $Act$, i.e.\
there is one priority operator for each such ordering. The operator
acts on graphs by removing all transitions $(s,a,t)$ for which there
is a transition $(s,b,u)$ with $b>a$ (and unreachable parts are
removed as well). Thus, in a choice between several actions, only the
actions with maximal priority may be executed. It is known that $RT$,
$RS$, $B$ and $U$ are compositional for the priority operators. I think
that $RT^\infty$, $PW$, $RS^*$, $RS^\omega$, $RB^*$ and $B^\omega$ are
too. However, none of the other semantics of \fig{spectrum} is.
Thus, in applications where priority operators are used and algebraic
reasoning makes compositionality essential, only semantics like $RT$,
$RS$ and $B$ are recommendable.
Depending on the application, compositionality for other operators may
be required as well, leading to various restrictions on the array of
suitable semantics. More on which semantics are compositional for
which operators can be found in {\sc Aceto, Fokkink \& Verhoef}
\hcite{AFV00} and the references therein.
\paragraph{The Recursive Specification Principle}\textBrown
A \phrase{recursive specification} is an equation of the form $X = t$
with $X$ a variable and $t$ a term (in a language such as BCCSP)
containing no other variables than $X$. (In the literature often
recursive specifications are allowed to involve more variables and
more such equations, but I do not need those here.) A recursive
specification $X=t$ over BCCSP is \phrase{guarded} if every occurrence
of $X$ in $t$ occurs in a subterm $at'$ of $t$ with $a \in Act$.
Recursive specifications are meant to specify processes. A process $p$
is said to be a \phrase{solution} of the recursive specification
$X=t$, using the semantics $\fO$, if the equation evaluates to a true
statement when substituting $p$ for $X$ and interpreting $=$ as
$=_\fO$. The \phrase{recursive specification principle (RSP)} says
that guarded recursive specifications have unique solutions. It has
been established for bisimulation semantics by {\sc Milner}
\hcite{Mi83} (using the language SCCS), and holds in fact for most
semantics encountered in this paper. In process algebra, two processes
are often proven semantically equivalent by showing that they are
solutions of the same recursive specification (cf.\ \hcite{Ba90}). For
this purpose it is important to work with a semantics in which RSP
holds. In the infinitary semantics between $T^\infty$ and $PW$
this is in fact not the case. For in those semantics the two different
processes of \ctr{infinitary} are both solutions of the
guarded recursive specification $X=aX+a$. For the finitary semantics
this counterexample does not apply, because the two processes are
identified, whereas in simulation semantics (of finer) these two
processes fail to be solutions of the same recursive specification.
\paragraph{Other considerations}\textBlack
In general it depends on the kind of interactions that are permitted
between a process and its environment (i.e.\ the testing scenario)
which semantics is sufficiently discriminating for a particular
application. When a range of appropriate semantics is found, also
considering the criteria discussed earlier in the section, the
question rises which of these semantics to actually use (e.g.\ in
making a formal verification). A natural choice is the {\em coarsest}
of the appropriate semantics, i.e.\ the one which is fully abstract
w.r.t.\ the requirements it has to meet in order to be adequate in the
context in which the investigated processes will be operating. In this
semantics more equations are valid than in any other. If the goal is
to prove that two processes are equivalent, this may succeed when
using the fully abstract semantics, whereas it may not even be true in
a finer one. Sometimes it is argued that the complexity of deciding
equivalence between processes is too high for certain semantics; using
them would give rise to too hard verifications. However, this can not
be an argument for rejecting a semantics in favour of a finer one. For
doing the verification in the finer semantics is actually a {\em
method} of establishing equivalence in the coarser semantics. In other
words, when $\fO \prec \fN$, establishing $p =_\fO q$ cannot be harder
than establishing $p =_\fN q$, as establishing $p =_\fN q$ is one of
the ways of establishing $p =_\fO q$. If deciding $\fO$-equivalence
has a higher complexity than deciding $\fN$-equivalence, the hard
cases to decide must be the equations $p =_\fO q$ for which $p =_\fN
q$ is not even true. It is especially for those applications that
$\fO$-semantics has a distinct advantage over $\fN$-semantics. This
argument has been made forcefully in {\sc Valmari}~\hcite{Val95}.
In practice, it may not always be certain in what ways the environment
can interact with investigated processes, and hence what constitutes
their observable behaviour. Moreover, the processes under
investigation may be transferred to more powerful environments long
after their initial use. One of the ways this could happen is through
the introduction of more operators for which the underlying semantics
has to be compositional. A big disadvantage of semantics that are
fully abstract with respect to non-stable notions of observability (or
non-stable sets of operators) is that whenever a verification is
carried out in a such a semantics, and one decides that the context in
which the verified system will be working is such that actually a
little bit more can be observed that what was originally accounted
for, the verification has to be completely redone. Moreover, the
correctness of the investigated systems keeps depending on the
completeness of the underlying testing scenario. In such cases it is
preferable to carry out verifications in the finest semantics for
which this is convenient. This gives stronger equivalence results,
which have a greater change of surviving in conditions where the
environment gets more powerful than originally anticipated. Especially
using bisimulation is safe bet, as it respects the internal structure
of processes to such a degree that it is hard to imagine ever running
into an environment that distinguishes bisimilar processes. In {\sc
Bloom, Istrail \& Meyer} \hcite{BIM95} it is argued that ready
simulation semantics already respects the limits of observable
behaviour, so this may be a good alternative. It should be pointed
out, however, that most applications involve abstraction from internal
actions (not treated in this paper), and hence require variants of the
semantics treated here that accommodate such abstractions. In this
setting, the question of which semantics represents the limit of
observable behaviour is much harder.
\section{Distinguishing deadlock and successful termination}\hlabel{termination}
\hindex{termination}\hindex{successful termination}
Often researchers feel the need to distinguish two ways in which a
process can end: successfully (by completing its mission) or
unsuccessfully (for instance because its waits for an input from the
environment that will never arrive). This distinction can be formally
modelled in the context of labelled transition systems by considering
triples $(\IP,\rightarrow,\surd)$ in which $(\IP,\rightarrow)$ is a
labelled transition system as in \df{LTS} and $\surd \subseteq \IP$ is
a predicate on processes expressing which ones can terminate successfully
in their current state. It may or may not be required that the
processes $p \in \IP$ with $\surd(p)$ have no outgoing transitions.
Likewise, in the setting of process graphs, one studies tuples
$(\nd(g), \rt(g), \ed(g), \surd(g))$ with $\surd(g) \subseteq \nd(g)$.
Now any labelled transition system over an alphabet $Act$ equipped
with such a successful termination predicate, can be encoded as
an ordinary labelled transition system over an alphabet $Act \cup
\{\surd\}$ with $\surd \not\in Act$. Namely, instead of labelling the
processes/states where successful termination occurs with $\surd$, one
can view successful termination as a kind of action, and add
$\surd$-labelled transitions from those processes/states to fresh
endstates.
Now any semantic equivalence defined on ordinary labelled transition
systems extends to labelled transition systems with a successful
termination predicate by declaring two processes equivalent iff they
are equivalent in the encoded transition system.
In fact, in the same way all equivalences and preorders of this paper
extend to labelled transition systems equipped with arbitrary
predicates $P \subseteq \IP$.
Below, three of the thusly defined equivalences are characterized
explicitly in terms of $\surd$.
\begin{definition}{terminating equivalences}
Let $(\IP,\rightarrow,\surd)$ be a labelled transition system with
successful termination.\\
$\sigma \in Act^\ast$ is a \phrase{terminating trace} of a process $p$
if there is a process $q$ such that $p \goto{\sigma} q$ and $\surd(q)$.
Let $L(p)$ denote the set of terminating traces of $p$
(and let $T(p)$ and $CT(p)$ be defined as before).
\end{definition}
Now two processes $p$ and $q$ are trace equivalent iff $T(p)=T(q)$ and
$L(p) = L(q)$.
They are completed trace equivalent iff $T(p)=T(q)$, $CT(p)=CT(q)$
and $L(p) = L(q)$.
They are bisimulation equivalent iff there exists a binary relation
$R$ on $\IP$ with $pRq$, satisfying, for $a \in Act$:
\begin{itemise}
\item
if $pRq$ and $p \goto{a} p '$, then $\exists q ' : ~q \goto{a} q '$
and $p ' Rq '$;
\item
if $pRq$ and $q \goto{a} q '$, then $\exists p ' : ~p \goto{a} p '$
and $p ' Rq '$;
\item
if $pRq$, then $\surd(p) \Leftrightarrow \surd(q)$.
\end{itemise}
\paragraph{Language semantics}\hindex{language semantics}
The nondeterministic \phrase{automata} studied in \phrase{automata
theory} (cf.\ {\sc Hopcroft \& Ullman} \hcite{HU79}) can be regarded as
process graphs with a termination predicate (except that in automata
theory the focus is on {\em finite} automata). The states $s \in
\nd(g)$ with $\surd(s)$ are called {\em accepting} or \phrase{final
states}, and a string $\sigma \in Act^*$ is said to be {\em
accepted}\hindex{accepted (by an automaton)} by the automaton $g$ iff
$\sigma \in L(g)$. The set $L(g)$ of all strings accepted by $g$ is
called the \phrase{language accepted by $g$}. In automata theory two
automata are considered equivalent iff they accept the same
language. Therefore {\em language equivalence} can be defined as
follows.
\begin{definition}{language}
Two processes $p$ and $q$ in a labelled transition system with
successful termination are \phrase{language equivalent}, notation $p
=_L q$, if $L(p) = L(q)$. Write $p \subseteq_L q$ iff
$L(p) \subseteq L(q)$.
\end{definition}
Clearly, language semantics makes more identifications than trace
semantics (i.e.\ $L \prec T$). It could be appended to the bottoms of
Figures \hhref{fig-spectrum} and \hhref{spectrum}. The reason for not
treating it earlier in this paper is that it cannot be defined
uniformly in terms of action relations. For either the definition
depends on the predicate $\surd$, which is not a part of ordinary
labelled transition systems, or, when encoding the $\surd$-predicate
by a transition label $\surd$, the definition treats \plat{\goto{\surd}}
different from the other action relations.
\paragraph{Complete axiomatizations}
A variant of the language BCCSP of \sect{axiomatizations} that
distinguishes between deadlock and successful termination is the
language BCCSP$_{\delta\epsilon}$, obtained from BCCSP by replacing
the constant 0 by two constants $\delta$ and $\epsilon$, representing
deadlock and successful termination, respectively. On
${\sf T\rm(BCCSP_{\delta\epsilon})}$ action relations $\goto{a}$ for
$a \in Act$ are again defined as the predicates on
${\sf T\rm(BCCSP_{\delta\epsilon})}$ generated by the action rules of
\tab{BCCSP}. Furthermore, the predicate $\surd \subseteq {\sf
T\rm(BCCSP_{\delta\epsilon})}$ is generated by the rules of Table
\ref{tick}.%
\begin{table}[htb]
\begin{center}\framebox{$
\surd(\epsilon)
\qquad
\begin{array}{c}
\surd(p) \\\hline
\surd(p+q)
\end{array}
\qquad
\begin{array}{c}
\surd(q) \\\hline
\surd(p+q)
\end{array}
$}
\caption{Rules for the termination predicate\hlabel{tick}}
\end{center}
\vspace{-15pt}
\end{table}
Now the complete axiomatizations of \tab{axioms} apply to
BCCSP$_{\delta\epsilon}$ as well, provided that the occurrences of $0$
are changed into $\delta$, an axiom $I(\epsilon)=\epsilon$ is added,
and the characteristic axioms for $\it CS$ and $CT$ also get variants
in which $by+z$ resp.\ $cy+v$ is replaced by $\epsilon$. Language
equivalence can be axiomatized by adding the axiom $a\delta=\delta$ to
the axioms for trace equivalence. This axiom corresponds with a
transformation on finite process trees that removes states from which
it is impossible to reach a state of successful termination. On the
normal forms w.r.t.\ this transformation, language equivalence and
trace equivalence coincide.
\paragraph{Successful termination as default}
Naturally, ordinary transition systems can be regarded as transition
systems with successful termination by taking the termination
predicate to be empty. On such transition systems, language
equivalence turns out to be the universal relation, axiomatized by the
equation $x=y$.
Alternatively, ordinary transition systems can be regarded as transition
systems with successful termination by letting $\surd$ be the set of
processes without outgoing transitions, i.e.\ by regarding all
termination to be successful. In this context, on a transition system
$(\IP,\rightarrow)$ on can define any of the semantics $\fO$ of this
paper as in \sect{summary}, or by taking successful termination
into account as in the present section. Denote the latter version of
$\fO$ by $\fO^\surd$. Then two processes are $\fO^\surd$-equivalent
iff they are $\fO$-equivalent after appending a $\surd$-transition to
every endstate. Comparing semantics that take termination into
account as well as semantics that abstract from it yields in first
approximation a ``double'' version of \fig{spectrum}, of which
a tiny fragment is displayed in \fig{termination spectrum}(a).%
\begin{figure}[htb]\Black{
.PS
scale = 4
arrowhead = 1
circlerad = .5
B0: circle invis "$L^\surd$" at (-1,0)
B1: circle invis "$T^\surd$" at last circle +(0,2)
B2: circle invis "$CT^\surd$" at last circle +(0,2)
B3: circle invis "$F^\surd$" at last circle +(0,2)
B4: circle invis "$R^\surd$" at last circle +(0,2)
arrow from B1 to B0 chop .4 chop circlerad
arrow from B2 to B1 chop .4 chop circlerad
arrow from B3 to B2 chop .4 chop circlerad
arrow from B4 to B3 chop .4 chop circlerad
C1: circle invis "$T$" at (1,2)
C2: circle invis "$CT$" at last circle +(0,2)
C3: circle invis "$F$" at last circle +(0,2)
C4: circle invis "$R$" at last circle +(0,2)
arrow from B1 to C1 chop circlerad
arrow from B2 to C2 chop .6
arrow from B3 to C3 chop circlerad
arrow from B4 to C4 chop circlerad
arrow from C2 to C1 chop .4 chop circlerad
arrow from C3 to C2 chop .4 chop circlerad
arrow from C4 to C3 chop .4 chop circlerad
"(a) first approximation" at (0,-1)
"(b) general case" at (7,-1)
"(c) normed processes" at (14,-1)
B0: circle invis "$L^\surd$" at (6,0)
B1: circle invis "$T$" at (8,0)
B2: circle invis "$CT$" at (7,2)
B3: circle invis "$F$" at last circle +(0,2)
B3t: circle invis "$F^\surd$" at last circle +(0,2)
B4: circle invis "$R$" at last circle +(0,2)
arrow from B2 to B0 chop .4 chop circlerad
arrow from B2 to B1 chop .4 chop circlerad
arrow from B3 to B2 chop .4 chop circlerad
arrow from B3t to B3 chop .4 chop circlerad
arrow from B4 to B3t chop .4 chop circlerad
B1: circle invis "$T$" at (14,0)
B2: circle invis "$L^\surd$" at last circle +(0,2)
B3: circle invis "$F$" at last circle +(0,2)
B3t: circle invis "$F^\surd$" at last circle +(0,2)
B4: circle invis "$R$" at last circle +(0,2)
arrow from B2 to B1 chop .4 chop circlerad
arrow from B3 to B2 chop .4 chop circlerad
arrow from B3t to B3 chop .4 chop circlerad
arrow from B4 to B3t chop .4 chop circlerad
.PE
\centerline{\raise 1em\box\graph}}
\caption{The linear time -- branching time spectrum for successfully
terminating processes\hlabel{termination spectrum}}
\end{figure}
However, for processes $p$ for which all termination is successful one
has $CT(p)=L(p)$. Hence the semantics $T^\surd$, $CT^\surd$ and $CT$
coincide. One also verifies easily that $R$ coincides with $R^\surd$,
$FT$ with $FT^\surd$, $RT$ with $RT^\surd$, $RS$ with $RS^\surd$, $B$
with $B^\surd$, etc. However, $F$ and $F^\surd$ differ, as
demonstrated by \linebreak[3] \ctr{F tick}. There $\it
F(left)=F(right)$ but $\rec{a,\{c,\surd\}} \in F^\surd({\it
left})-F^\surd({\it right})$. Also $\it PF$ differs from ${\it PF}^0$
and $\it 2S$ from $\it 2S^0$, for in \ctr{2SvsB} one has
${\it left} =_{\it 2S} {\it right}$ but, after appending a
$\surd$-transition to every endnode, $a \neg b\surd 0 \in
\fL_{\it 2S}^\surd({\it left})-\fL_{\it 2S}^\surd({\it right})$.
Thus \fig{termination spectrum}(a)
collapses to \fig{termination spectrum}(b).
In {\sc Groote \& Huttel} \hcite{GrHu94} \phrase{normed processes} are
studied: processes that never loose the possibility to terminate
eventually. A process $p$ is normed iff for each process $q$ reachable
from $p$, there is a process $r$ reachable from $q$ that terminates
(i.e.\ has no outgoing transitions) (and all termination is considered
successful). For normed processes $p$, $T(p)$ is completely determined
by $L(p)$. Hence \fig{termination spectrum}(b) collapses
further to \fig{termination spectrum}(c). This explains why in
\hcite{GrHu94} $L^\surd$ coincides with $CT$ and is finer than $T$.
\paragraph{Sequencing and sequential composition}
The \phrase{sequential composition} of processes $p$ and $q$ (cf.\
\hcite{Ho85,BW90}), denoted $p\cdot q$, is the process that first
executes $p$, and upon successful termination of $p$ executes
$q$. This operator is defined only on domains of processes on which
successful termination is somehow represented. {\em
Sequencing}\hindex{sequencing} on the other hand is defined on domains
of processes that do not distinguish between deadlock and successful
termination: let $p\concatenate q$ denote the process that first
executes $p$ until it can do no further actions, and then $q$
\hcite{BIM95}. On process graphs, $g \concatenate h$ can be constructed
by appending (at its root) a disjoint copy of $h$ to every endnode of
$g$. On process graphs with successful termination, $g\cdot h$ on the
other hand can be constructed by appending a disjoint copy of $h$ to
every node $s$ of $g$ with $\surd(s)$. In case $\surd(s)$ is possible
even if $s$ is not an endnode, the graph $h$ needs to be transformed
first in such a way that its root has no incoming edges \hcite{BW90}.
\begin{counterexample}[htb]
.PS
scale = 2.54
boxwid = 0.5; boxht = 0.5
circlerad = 0.05
arrowhead = 7
LC1: circle at (-4,2)
MC2: circle at (-4,1)
MC3: circle at (-4,0)
LC2: circle at (-5,1)
LC4: circle at (-3,1)
LC5: circle at (-3.5,0)
LC6: circle at (-2.5,0)
LB1: box invis "$a$" at (-4.65,1.65)
MB: box invis "$a$" at (-4.2,1.5)
LB2: box invis "$a$" at (-3.35,1.65)
LB3: box invis "$b$" at (-4.2,.5)
LB4: box invis "$b$" at (-3.4,0.65)
LB5: box invis "$c$" at (-2.6,0.65)
circle invis "$a+ab+a(b+c)$" at (-3.75,-1)
arrow from LC1 to LC2 chop circlerad chop circlerad
arrow from LC1 to MC2 chop circlerad chop circlerad
arrow from MC2 to MC3 chop circlerad chop circlerad
arrow from LC1 to LC4 chop circlerad chop circlerad
arrow from LC4 to LC5 chop circlerad chop circlerad
arrow from LC4 to LC6 chop circlerad chop circlerad
box invis "$=_{F}~$" at (.25,1.7)
box invis "$\neq_F^\surd~$" at (.25,1)
box invis "$=_F^{1\surd}$" at (.25,.3)
RC1: circle at (4,2)
RC2: circle at (5,1)
RCL: circle at (3,1)
RC3: circle at (4.5,0)
RC4: circle at (5.5,0)
RB1: box invis "$a$" at (4.65,1.65)
RBL: box invis "$a$" at (3.35,1.65)
RB2: box invis "$b$" at (4.6,.65)
RB3: box invis "$c$" at (5.4,.65)
RB4: box invis at (5.2,0)
circle invis "$a+a(b+c)$" at (4.25,-1)
arrow from RC1 to RC2 chop circlerad chop circlerad
arrow from RC1 to RCL chop circlerad chop circlerad
arrow from RC2 to RC3 chop circlerad chop circlerad
arrow from RC2 to RC4 chop circlerad chop circlerad
.PE
\centerline{\raise 1em\box\graph}
\caption{Failures semantics is not compositional for sequencing\hlabel{F tick}}
\end{counterexample}
\mbox{}\ctr{F tick} shows that failures semantics is not
compositional for sequencing. There $\it left =_F right$, but
${\it left}\concatenate c \neq_F {\it right}\concatenate c$.
The same counterexample, with all endnodes successfully terminating,
shows that singleton-failures semantics is not compositional for
either sequencing or sequential composition.
Likewise, \ctr{2SvsB} shows that $\it PF$ and $\it 2S$
are not compositional for sequencing, and \ctr{finite}
shows that none of the semantics between $T$ and $B^-$ are.
All of the semantics studied in this paper, except for $F^1$,
are compositional for sequential composition.
As sequencing is the same as sequential composition on processes where
all endstates, and only those, are considered to be successfully
terminating, this implies that all the semantics $\fO^\surd$,
except for $F^{1\surd}$, are compositional for sequencing.
If $c$ is an action that does not occur in $p$ or $q$, then
\plat{p\concatenate c =_\fO q\concatenate c \Leftrightarrow p =_\fO^\surd q}.
(Think of $c$ as $\surd$.) From this it follows that for all
semantics $\fO$, except $F^1$, $\fO^\surd$ is fully abstract w.r.t.\
$\fO$ and sequencing, at least for processes that can not execute
every action in $Act$.
\section*{Concluding remarks}\hname{Concluding remarks}
\addcontentsline{toc}{section}{Concluding remarks}
In this paper various semantic equivalences for concrete sequential
processes are defined, motivated, compared and axiomatized.
Of course many more equivalences can be given than the ones presented
here. The reason for selecting just these, is that they can
be motivated rather nicely and/or play a r\^ole in the literature on
semantic equivalences. In {\sc Abramsky \& Vickers} \hcite{AV93}
the observations which underly many of the semantics in this paper
are placed in a uniform algebraic framework, and some general
completeness criteria are stated and proved. They also introduce
\phrase{acceptance semantics}, which can be obtained from
acceptance-refusal semantics (\sect{readiness}) by dropping the
refusals, and analogously \phrase{acceptance trace semantics}. I am
not aware of any reasonable testing scenario for these notions.
In \sect{ready simulation} I remarked that a testing scenario
for simulation and ready simulation semantics can be obtained by
adding an $undo$-button to the scenario's for trace and ready trace
semantics. Likewise, {\sc Schnoebelen} \hcite{Sch91}
investigates the addition of an $undo$-button to the testing scenarios
for completed trace, readiness, failures and failure trace semantics,
thereby obtaining 3 new equivalences $CT_\#$, $R_\#$ and $F_\#$.
{\it Undo}-failure trace equivalence coincides with finitary failure simulation
equivalence, just like $undo$-trace and $undo$-ready trace equivalence
coincide with finitary simulation and finitary ready simulation
equivalence. For image finite processes $R_\#$ coincides with
$F_\#$. Furthermore $R \preceq R_\# \preceq RS^*$, $F \preceq F_\#
\preceq \it FS^*$, $CT \preceq CT_\# \preceq CS^*$ and $S^*
\preceq CT_\# \preceq F_\# \preceq R_\#$.
An interesting topic is the generalization of this work to a setting
with silent moves and/or with parallelism. In both cases there turn
out to be many interesting variations. The generalization to a
setting with invisible actions will be tackled in \hcite{vG93b}.
Some work towards generalizing the spectrum to a setting with
parallelism can be found for instance in \hcite{Pm86} and \hcite{vG90}.
%\bibliography{/kilby/u1/rvg/lib/abbreviations,/kilby/u1/rvg/lib/new,/kilby/u1/rvg/lib/dbase}
\begin{thebibliography}{10}
\bibitem{Ab87}\hname{Ab87}
{\sc S.~Abramsky} (1987):
\newblock {\em Observation equivalence as a testing equivalence.}
\newblock {\sl Theoretical Computer Science} 53, pp. 225--241.
\bibitem{AV93}\hname{AV93}
{\sc S.~Abramsky \& S.~Vickers} (1993):
\newblock {\em Quantales, observational logic and process semantics.}
\newblock {\sl Mathematical Structures in Computer Science} 3, pp. 161--227.
\bibitem{AFV00}\hname{AFV00}
{\sc L.~Aceto, W.J. Fokkink \& C.~Verhoef} (2000):
\newblock {\em Structural operational semantics.}
\newblock In J.A. Bergstra, A.~Ponse \& S.A. Smolka, editors: {\sl Handbook of Process Algebra}, chapter~3. Elsevier.
\bibitem{Ac88}\hname{Ac88}
{\sc P.~Aczel} (1988):
\newblock {\em Non-well-founded Sets}, {\sl CSLI Lecture Notes} 14.
\newblock Stanford University.
\bibitem{Ba90}\hname{Ba90}
{\sc J.C.M. Baeten}, editor (1990):
\newblock {\em Applications of Process Algebra}.
\newblock Cambridge Tracts in Theoretical Computer Science 17. Cambridge University Press.
\bibitem{BBK87b}\hname{BBK87b}
{\sc J.C.M. Baeten, J.A. Bergstra \& J.W. Klop} (1987):
\newblock {\em Ready-trace semantics for concrete process algebra with the priority operator.}
\newblock {\sl Computer Journal} 30(6), pp. 498--506.
\bibitem{BW90}\hname{BW90}
{\sc J.C.M. Baeten \& W.P. Weijland} (1990):
\newblock {\em Process Algebra}.
\newblock Cambridge Tracts in Theoretical Computer Science 18. Cambridge University Press.
\bibitem{BKMOZ86}\hname{BKMOZ86}
{\sc J.W.~de Bakker, J.N. Kok, J.-J.Ch. Meyer, E.-R. Olderog \& J.I. Zucker} (1986):
\newblock {\em Contrasting themes in the semantics of imperative concurrency.}
\newblock In J.W. de~Bakker, W.P.~de Roever \& G.~Rozenberg, editors: {\sl Current Trends in Concurrency}, {\sl \rm LNCS} 224, Springer, pp. 51--121.
\bibitem{BZ82}\hname{BZ82}
{\sc J.W.~de Bakker \& J.I. Zucker} (1982):
\newblock {\em Processes and the denotational semantics of concurrency.}
\newblock {\sl Information and Control} 54(1/2), pp. 70--120.
\bibitem{BKO86}\hname{BKO86}
{\sc J.A. Bergstra, J.W. Klop \& E.-R. Olderog} (1986):
\newblock {\em Failure semantics with fair abstraction.}
\newblock Report CS-R8609, CWI, Amsterdam.
\bibitem{BKO88}\hname{BKO88}
{\sc J.A. Bergstra, J.W. Klop \& E.-R. Olderog} (1988):
\newblock {\em Readies and failures in the algebra of communicating processes.}
\newblock {\sl SIAM Journal on Computing} 17(6), pp. 1134--1177.
\bibitem{BIM95}\hname{BIM95}
{\sc B.~Bloom, S.~Istrail \& A.R. Meyer} (1995):
\newblock {\em Bisimulation can't be traced.}
\newblock {\sl Journal of the ACM} 42(1), pp. 232--268.
\bibitem{BHR84}\hname{BHR84}
{\sc S.D. Brookes, C.A.R. Hoare \& A.W. Roscoe} (1984):
\newblock {\em A theory of communicating sequential processes.}
\newblock {\sl Journal of the ACM} 31(3), pp. 560--599.
\bibitem{BR85}\hname{BR85}
{\sc S.D. Brookes \& A.W. Roscoe} (1985):
\newblock {\em An improved failures model for communicating processes.}
\newblock In S.D. Brookes, A.W. Roscoe \& G.~Winskel, editors: {\sl Seminar on Concurrency}, {\sl \rm LNCS} 197, Springer, pp. 281--305.
\bibitem{Da82}\hname{Da82}
{\sc P.~Darondeau} (1982):
\newblock {\em An enlarged definition and complete axiomatisation of observational congruence of finite processes.}
\newblock In M.~Dezani-Ciancaglini \& U.~Montanari, editors: {\sl Proceedings international symposium on programming: $5^{th}$ colloquium, Aarhus}, {\sl \rm LNCS} 137, Springer, pp. 47--62.
\bibitem{DN87}\hname{DN87}
{\sc R.~De~Nicola} (1987):
\newblock {\em Extensional equivalences for transition systems.}
\newblock {\sl Acta Informatica} 24, pp. 211--237.
\bibitem{DH84}\hname{DH84}
{\sc R.~De~Nicola \& M.~Hennessy} (1984):
\newblock {\em Testing equivalences for processes.}
\newblock {\sl Theoretical Computer Science} 34, pp. 83--133.
\bibitem{En85}\hname{En85}
{\sc J.~Engelfriet} (1985):
\newblock {\em Determinacy $\rightarrow$ (observation equivalence $=$ trace equivalence).}
\newblock {\sl Theoretical Computer Science} 36(1), pp. 21--25.
\bibitem{vG90}\hname{vG90}
{\sc R.J.~van Glabbeek} (1990):
\newblock {\em The refinement theorem for {ST}-bisimulation semantics.}
\newblock In M.~Broy \& C.B. Jones, editors: {\sl {\rm Proceedings IFIP TC2 Working Conference on} Programming Concepts and Methods, {\rm Sea of Gallilee, Israel}}, North-Holland, pp. 27--52.
\bibitem{vG90a-full}\hname{vG90a-full}
{\sc R.J.~van Glabbeek} (1990):
\newblock {\em \weg{Z}{T}he linear time -- branching time spectrum.}
\newblock Report CS-R9029, CWI, Amsterdam.
\newblock Extended abstract in J.C.M. Baeten \& J.W. Klop, editors: Proceedings {\sl CONCUR '90, Theories of Concurrency: Unification and Extension}, Amsterdam, August 1990, LNCS 458, Springer-Verlag, 1990, pp. 278--297.
\bibitem{vG93b}\hname{vG93b}
{\sc R.J.~van Glabbeek} (1993):
\newblock {\em The linear time -- branching time spectrum {II}; the semantics of sequential systems with silent moves.}
\newblock Manuscript. Preliminary version available by ftp at {\tt ftp://boole.stanford.edu/\-pub/\-spectrum.ps.gz}.
\newblock Extended abstract in E. Best, editor: Proceedings {\sl CONCUR'93}, 4$^{\it th}$ International Conference on Concurrency Theory, Hildesheim, Germany, August 1993, LNCS 715, Springer, pp. 66--81.
\bibitem{vGR89}\hname{vGR89}
{\sc R.J.~van Glabbeek \& J.J.M.M. Rutten} (1989):
\newblock {\em The processes of {De Bakker and Zucker} represent bisimulation equivalence classes.}
\newblock In {\sl J.W. de Bakker, 25 jaar semantiek, liber amicorum}, CWI, Amsterdam, pp. 243--246.
\bibitem{Gr90}\hname{Gr90}
{\sc J.F. Groote} (1990):
\newblock {\em A new strategy for proving $\omega$--completeness with applications in process algebra.}
\newblock In J.C.M. Baeten \& J.W. Klop, editors: {\sl Proceedings CONCUR 90, {\rm Amsterdam}}, {\sl \rm LNCS} 458, Springer, pp. 314--331.
\bibitem{GrHu94}\hname{GrHu94}
{\sc J.F. Groote \& H.~H\"{u}ttel} (1994):
\newblock {\em Undecidable equivalences for basic process algebra.}
\newblock {\sl Information and Control} 115(2), pp. 354--371.
\bibitem{GrV92}\hname{GrV92}
{\sc J.F. Groote \& F.W. Vaandrager} (1992):
\newblock {\em Structured operational semantics and bisimulation as a congruence.}
\newblock {\sl Information and Computation} 100(2), pp. 202--260.
\bibitem{He85}\hname{He85}
{\sc M.~Hennessy} (1985):
\newblock {\em Acceptance trees.}
\newblock {\sl Journal of the ACM} 32(4), pp. 896--928.
\bibitem{HM80-85}\hname{HM80-85}
{\sc M.~Hennessy \& R.~Milner} (1980):
\newblock {\em On observing nondeterminism and concurrency.}
\newblock In J.W. de~Bakker \& J.~van Leeuwen, editors: {\sl Proceedings $7^{th}$ ICALP, {\sl Noorwijkerhout}}, {\sl \rm LNCS} 85, Springer, pp. 299--309.
\newblock This is a preliminary version of:\weg.
\bibitem{HM85}\hname{HM85}
{\sc M.~Hennessy \& R.~Milner} (1985):
\newblock {\em Algebraic laws for nondeterminism and concurrency.}
\newblock {\sl Journal of the ACM} 32(1), pp. 137--161.
\bibitem{Ho78}\hname{Ho78}
{\sc C.A.R. Hoare} (1978):
\newblock {\em Communicating sequential processes.}
\newblock {\sl Communications of the ACM} 21(8), pp. 666--677.
\bibitem{Ho80}\hname{Ho80}
{\sc C.A.R. Hoare} (1980):
\newblock {\em Communicating sequential processes.}
\newblock In R.M. McKeag \& A.M. Macnaghten, editors: {\sl On the construction of programs -- an advanced course}, Cambridge University Press, pp. 229--254.
\bibitem{Ho85}\hname{Ho85}
{\sc C.A.R. Hoare} (1985):
\newblock {\em Communicating {S}equential {P}rocesses}.
\newblock Prentice Hall, Englewood Cliffs.
\bibitem{Ho95}\hname{Ho95}
{\sc M.J. Hollenberg} (1995):
\newblock {\em {Hennessy-Milner} classes and process algebra.}
\newblock In A.~Ponse, M.~de~Rijke \& Y.~Venema, editors: {\sl Modal Logic and Process Algebra: a Bisimulation Perspective}, {\sl CSLI Lecture Notes} 53, CSLI Publications, Stanford, California, pp. 187--216.
\bibitem{HU79}\hname{HU79}
{\sc J.E. Hopcroft \& J.D. Ullman} (1979):
\newblock {\em Introduction to Automata Theory, Languages and Computation}.
\newblock Addison-Wesley.
\bibitem{Ke81}\hname{Ke81}
{\sc J.K. Kennaway} (1981):
\newblock {\em Formal semantics of nondetermism and parallelism}.
\newblock PhD thesis, University of Oxford.
\bibitem{LS91}\hname{LS91}
{\sc K.G. Larsen \& A.~Skou} (1991):
\newblock {\em Bisimulation through probabilistic testing.}
\newblock {\sl Information and Computation}, 94.
\bibitem{Mey85}\hname{Mey85}
{\sc A.R. Meyer} (1985):
\newblock {\em Report on the $5^{th}$ international workshop on the semantics of programming languages in {Bad Honnef}.}
\newblock {\sl Bulletin of the European Association for Theoretical Computer Science} 27, pp. 83--84.
\bibitem{Mi80}\hname{Mi80}
{\sc R.~Milner} (1980):
\newblock {\em A Calculus of Communicating Systems}, {\sl \rm LNCS} 92.
\newblock Springer.
\bibitem{Mi81}\hname{Mi81}
{\sc R.~Milner} (1981):
\newblock {\em Modal characterisation of observable machine behaviour.}
\newblock In G.~Astesiano \& C.~Bohm, editors: {\sl Proceedings CAAP 81}, {\sl \rm LNCS} 112, Springer, pp. 25--34.
\bibitem{Mi83}\hname{Mi83}
{\sc R.~Milner} (1983):
\newblock {\em Calculi for synchrony and asynchrony.}
\newblock {\sl Theoretical Computer Science} 25, pp. 267--310.
\bibitem{OH86}\hname{OH86}
{\sc E.-R. Olderog \& C.A.R. Hoare} (1986):
\newblock {\em Specification-oriented semantics for communicating processes.}
\newblock {\sl Acta Informatica} 23, pp. 9--66.
\bibitem{Pa81}\hname{Pa81}
{\sc D.M.R. Park} (1981):
\newblock {\em Concurrency and automata on infinite sequences.}
\newblock In P.~Deussen, editor: {\sl $5^{th}$ GI Conference}, {\sl \rm LNCS} 104, Springer, pp. 167--183.
\bibitem{Ph87}\hname{Ph87}
{\sc I.C.C. Phillips} (1987):
\newblock {\em Refusal testing.}
\newblock {\sl Theoretical Computer Science} 50, pp. 241--284.
\bibitem{Pn85}\hname{Pn85}
{\sc A.~Pnueli} (1985):
\newblock {\em Linear and branching structures in the semantics and logics of reactive systems.}
\newblock In W.~Brauer, editor: {\sl Proceedings $\it 12^{th}$ ICALP, {\sl Nafplion}}, {\sl \rm LNCS} 194, Springer, pp. 15--32.
\bibitem{Pm86}\hname{Pm86}
{\sc L.~Pomello} (1986):
\newblock {\em {Some equivalence notions for concurrent systems -- An overview}.}
\newblock In G.~Rozenberg, editor: {\sl Advances in Petri Nets 1985}, {\sl \rm LNCS} 222, Springer, pp. 381--400.
\bibitem{Ro93}\hname{Ro93}
{\sc A.W. Roscoe} (1993):
\newblock {\em Unbounded non-determinism in {CSP}.}
\newblock {\sl Journal of Logic and Computation} 3(2), pp. 131--172.
\bibitem{RB81}\hname{RB81}
{\sc W.C. Rounds \& S.D. Brookes} (1981):
\newblock {\em Possible futures, acceptances, refusals and communicating processes.}
\newblock In {\sl $22^{th}$ Annual Symposium on Foundations of Computer Science, {\rm Nashville, Tennessee}}, IEEE, New York, pp. 140--149.
\bibitem{Sch91}\hname{Sch91}
{\sc Ph. Schnoebelen} (1991):
\newblock {\em Experiments on processes with backtracking.}
\newblock In J.C.M. Baeten \& J.F. Groote, editors: {\sl Proceedings CONCUR 91, {\rm Amsterdam}}, {\sl \rm LNCS} 527, Springer, pp. 80--94.
\bibitem{Val95}\hname{Val95}
{\sc A.~Valmari} (1995):
\newblock {\em Failure-based equivalences are faster than many believe.}
\newblock In J.~Desel, editor: {\sl {\rm Proceedings of the International Workshop on} Structures in Concurrency Theory, {\rm Berlin, May 1995}}, Workshops in Computing, Springer, pp. 326--340.
\bibitem{VD98}\hname{VD98}
{\sc S.~Veglioni \& R.~De Nicola} (1998):
\newblock {\em Possible worlds for process algebras.}
\newblock In D.~Sangiorgi \& R.~de~Simone, editors: {\sl Proceedings CONCUR 98, {\rm Nice, France}}, {\sl \rm LNCS} 1466, Springer, pp. 179--193.
\bibitem{Wi84b}\hname{Wi84b}
{\sc G.~Winskel} (1984):
\newblock {\em Synchronization trees.}
\newblock {\sl Theoretical Computer Science} 34(1/2), pp. 33--82.
\end{thebibliography}
%\printindex
\begin{theindex}
\item \href{$\frac{2}{3}$-bisimulation equivalence}{${\begingroup 2\endgroup \over 3}$-bisimulation equivalence}, 5,
34
\item \href{2-nested simulation}{2-nested simulation}, 38
\item \href{2-nested simulation equivalence}{2-nested simulation equivalence}, 38
\item \href{2-nested simulation formulas}{2-nested simulation formulas}, 38, 51
\item \href{2-nested simulation machine}{2-nested simulation machine}, 39
\item \href{2-nested simulation semantics}{2-nested simulation semantics}, 5, 38
\indexspace
\item \href{acceptance semantics}{acceptance semantics}, 80
\item \href{acceptance trace semantics}{acceptance trace semantics}, 80
\item \href{acceptance tree}{acceptance tree}, 17
\item \href{acceptance-refusal equivalent}{acceptance-refusal equivalent}, 27
\item \href{acceptance-refusal semantics}{acceptance-refusal semantics}, 26
\item \href{acceptance-refusal triple}{acceptance-refusal triple}, 27
\item \href{accepted (by an automaton)}{accepted (by an automaton)}, 77
\item \href{action}{action}, 6
\item \href{action relations}{action relations}, 3, 6
\item \href{action rules}{action rules}, 59
\item \href{algebra}{algebra}, 3
\item \href{anti-foundation axiom}{anti-foundation axiom}, 43
\item \href{automata}{automata}, 77
\item \href{automata theory}{automata theory}, 77
\indexspace
\item \href{barbed semantics}{barbed semantics}, 5
\item \href{BCCSP}{BCCSP}, 59
\item \href{BCSP}{BCSP}, 68
\item \href{bisimilar}{bisimilar}, 39
\item \href{bisimulation}{bisimulation}, 39, 41
\item \href{bisimulation equivalence}{bisimulation equivalence}, 39
\item \href{bisimulation formulas}{bisimulation formulas}, 51
\item \href{bisimulation semantics}{bisimulation semantics}, 4, 39
\item \href{blocked}{blocked}, 14
\item \href{branching}{branching}, 6
\item \href{branching equivalent}{branching equivalent}, 43
\item \href{button pushing experiments}{button pushing experiments}, 5
\indexspace
\item \href{canonical graph}{canonical graph}, 8, 10, 13, 16, 24, 26
\item \href{comparative concurrency semantics}{comparative concurrency semantics}, 2
\item \href{complete simulation}{complete simulation}, 33
\item \href{complete trace}{complete trace}, 11
\item \href{completed simulation equivalent}{completed simulation equivalent}, 33
\item \href{completed simulation formulas}{completed simulation formulas}, 51
\item \href{completed trace deterministic}{completed trace deterministic}, 56
\item \href{completed trace domain}{completed trace domain}, 13
\item \href{completed trace equivalent}{completed trace equivalent}, 11
\item \mbox{}\href{completed trace formulas}{completed trace formulas}, 12, 50
\item \href{completed trace machine}{completed trace machine}, 12
\item \href{completed trace semantics}{completed trace semantics}, 4, 11
\item \href{completeness}{completeness}, 62, 66
\item \href{compositional}{compositional}, 59
\item \href{concrete}{concrete}, 3
\item \href{concurrency}{concurrency}, 2
\item \href{congruences}{congruences}, 59
\item \href{connected}{connected}, 7
\item \href{continuations}{continuations}, 15
\item \href{cross saturated}{cross saturated}, 57
\indexspace
\item \href{De Simone languages}{De Simone languages}, 74
\item \href{deadlock}{deadlock}, 72
\item \href{deadlock behaviour}{deadlock behaviour}, 72
\item \href{decorated trace semantics}{decorated trace semantics}, 52
\item \href{denial formulas}{denial formulas}, 38
\item \href{determinate}{determinate}, 55
\item \href{deterministic}{deterministic}, 7, 54
\item \href{deterministic up to $\equiv $}{deterministic up to $\equiv $}, 55
\item \href{domain}{domain}, 3
\item \href{domain theory}{domain theory}, 3
\indexspace
\item \href{edges}{edges}, 7
\item \href{embedding}{embedding}, 8
\item \href{event structure}{event structure}, 3
\item \href{exhibited behaviour semantics}{exhibited behaviour semantics}, 5
\item \href{explicit domains}{explicit domains}, 3
\item \href{explicit models}{explicit models}, 10
\item \href{external choice}{external choice}, 68
\indexspace
\item \href{failure formulas}{failure formulas}, 15, 50
\item \href{failure pair}{failure pair}, 14
\item \href{failure set}{failure set}, 14, 15
\item \href{failure simulation equivalent}{failure simulation equivalent}, 33
\item \href{failure simulation formulas}{failure simulation formulas}, 53
\item \href{failure trace}{failure trace}, 19
\item \href{failure trace augmentation}{failure trace augmentation}, 20
\item \href{failure trace equivalent}{failure trace equivalent}, 19
\item \href{failure trace formulas}{failure trace formulas}, 19, 50
\item \href{failure trace machine}{failure trace machine}, 18
\item \href{failure trace relations}{failure trace relations}, 19
\item \href{failure trace semantics}{failure trace semantics}, 5, 19
\item \href{failure trace set}{failure trace set}, 19, 20
\item \mbox{}\href{failure trace simulation equivalent}{failure trace simulation equivalent}, 33
\item \href{failures domain}{failures domain}, 16
\item \href{failures equivalent}{failures equivalent}, 14
\item \href{failures machine}{failures machine}, 14
\item \href{failures semantics}{failures semantics}, 4, 14
\item \href{final states}{final states}, 77
\item \href{finitary simulation equivalent}{finitary simulation equivalent}, 31
\item \href{finitary simulation formulas}{finitary simulation formulas}, 31
\item \href{finite}{finite}, 7
\item \href{finite ready trace formulas}{finite ready trace formulas}, 28
\item \href{finite-failure trace equivalent}{finite-failure trace equivalent}, 21
\item \href{finite-failures equivalent}{finite-failures equivalent}, 17
\item \href{finite-failures semantics}{finite-failures semantics}, 17
\item \href{finite-ready trace equivalent}{finite-ready trace equivalent}, 28
\item \href{finitely branching}{finitely branching}, 3, 7
\item \href{free}{free}, 14
\item \href{fully abstract}{fully abstract}, 73
\indexspace
\item \href{generalized action relations}{generalized action relations}, 6
\item \href{generative processes}{generative processes}, 36
\item \href{global testing}{global testing}, 40
\item \href{graph domains}{graph domains}, 3
\item \href{graph isomorphism}{graph isomorphism}, 7
\item \href{graph transformations}{graph transformations}, 62
\item \href{GSOS trace congruence}{GSOS trace congruence}, 5, 34, 38
\item \href{guarded}{guarded}, 75
\indexspace
\item \href{Hennessy-Milner formulas}{Hennessy-Milner formulas}, 44
\item \href{Hennessy-Milner logic (HML)}{Hennessy-Milner logic (HML)}, 44
\item \href{history}{history}, 63
\item \href{history unambiguous}{history unambiguous}, 63
\item \href{HML-equivalent}{HML-equivalent}, 44
\indexspace
\item \href{identifying}{identifying}, 63
\item \href{image finite}{image finite}, 7
\item \href{infinitary completed trace equivalent}{infinitary completed trace equivalent}, 14
\item \href{infinitary failure trace equivalent}{infinitary failure trace equivalent}, 20
\item \href{infinitary failures equivalent}{infinitary failures equivalent}, 17
\item \href{infinitary Hennessy-Milner formulas}{infinitary Hennessy-Milner formulas}, 40
\item \href{infinitary possible future}{infinitary possible future}, 28
\item \href{infinitary possible-futures equivalent}{infinitary possible-futures equivalent}, 28
\item \href{infinitary ready equivalent}{infinitary ready equivalent}, 27
\item \href{infinitary ready trace equivalent}{infinitary ready trace equivalent}, 24
\item \href{infinitary simulation equivalence}{infinitary simulation equivalence}, 31
\item \href{infinitary trace equivalent}{infinitary trace equivalent}, 11
\item \href{infinitary trace semantics}{infinitary trace semantics}, 11
\item \mbox{}\href{infinite failure trace}{infinite failure trace}, 20
\item \href{infinite ready trace}{infinite ready trace}, 24
\item \href{infinite trace}{infinite trace}, 11
\item \href{initial actions}{initial actions}, 7
\item \href{initial nondeterminism}{initial nondeterminism}, 9
\item \href{initial state}{initial state}, 7
\item \href{initially deterministic BCSP-expressions}{initially deterministic BCSP-expressions}, 68
\item \href{internal choice}{internal choice}, 68
\item \href{intersection operator}{intersection operator}, 72
\item \href{isometry}{isometry}, 43
\item \href{isomorphic}{isomorphic}, 7
\indexspace
\item \href{labelled transition system}{labelled transition system}, 3, 6
\item \href{language accepted by $g$}{language accepted by $g$}, 77
\item \href{language equivalent}{language equivalent}, 77
\item \href{language semantics}{language semantics}, 77
\item \href{linear time -- branching time spectrum}{linear time -- branching time spectrum}, 3
\item \href{lookahead}{lookahead}, 39
\indexspace
\item \href{may testing\/}{may testing\/}, 72
\item \href{menu}{menu}, 13, 22
\item \href{modal characterization}{modal characterization}, 51
\item \href{modelling}{modelling}, 2
\item \href{must testing}{must testing}, 72
\indexspace
\item \href{nodes}{nodes}, 7
\item \href{nondeterminism}{nondeterminism}, 9
\item \href{normal form}{normal form}, 62
\item \href{normal ready trace}{normal ready trace}, 23
\item \href{normed processes}{normed processes}, 78
\indexspace
\item \href{observational equivalence}{observational equivalence}, 4, 44
\indexspace
\item \href{parallel}{parallel}, 2
\item \href{partial traces}{partial traces}, 4
\item \href{path}{path}, 7
\item \href{Petri net}{Petri net}, 3
\item \href{possible future}{possible future}, 26
\item \href{possible world}{possible world}, 48, 49
\item \href{possible worlds equivalent}{possible worlds equivalent}, 48, 49
\item \href{possible worlds formulas}{possible worlds formulas}, 51
\item \href{possible worlds semantics}{possible worlds semantics}, 5, 49
\item \href{possible-futures equivalent}{possible-futures equivalent}, 26
\item \href{possible-futures formulas}{possible-futures formulas}, 51
\item \href{possible-futures semantics}{possible-futures semantics}, 5, 26
\item \href{precongruences}{precongruences}, 60
\item \href{process}{process}, 2
\item \mbox{}\href{process domain}{process domain}, 3
\item \href{process expressions}{process expressions}, 59
\item \href{process graph}{process graph}, 7
\item \href{process graphs with multiple roots}{process graphs with multiple roots}, 9
\item \href{process theory}{process theory}, 2
\indexspace
\item \href{reachable}{reachable}, 6
\item \href{reactive machines}{reactive machines}, 36
\item \href{readiness domain}{readiness domain}, 26
\item \href{readiness formulas}{readiness formulas}, 25, 50
\item \href{readiness machine}{readiness machine}, 25
\item \href{readiness semantics}{readiness semantics}, 5, 25
\item \href{ready equivalent}{ready equivalent}, 25
\item \href{ready pair}{ready pair}, 25
\item \href{ready set}{ready set}, 25
\item \href{ready simulation}{ready simulation}, 33
\item \href{ready simulation equivalent}{ready simulation equivalent}, 33
\item \href{ready simulation formulas}{ready simulation formulas}, 51
\item \href{ready simulation machine}{ready simulation machine}, 36
\item \href{ready simulation semantics}{ready simulation semantics}, 5, 33
\item \href{ready trace}{ready trace}, 22, 23
\item \href{ready trace deterministic}{ready trace deterministic}, 56
\item \href{ready trace domain}{ready trace domain}, 24
\item \href{ready trace equivalent}{ready trace equivalent}, 22
\item \href{ready trace formulas}{ready trace formulas}, 23, 51
\item \href{ready trace machine}{ready trace machine}, 22
\item \href{ready trace relations}{ready trace relations}, 22
\item \href{ready trace semantics}{ready trace semantics}, 5, 22
\item \href{ready trace set}{ready trace set}, 22
\item \href{ready trace simulation equivalent}{ready trace simulation equivalent}, 33
\item \href{recursive specification}{recursive specification}, 75
\item \href{recursive specification principle (RSP)}{recursive specification principle (RSP)}, 75
\item \href{refusal equivalence}{refusal equivalence}, 19
\item \href{refusal relations}{refusal relations}, 19
\item \href{refusal semantics}{refusal semantics}, 5
\item \href{refusal set}{refusal set}, 14, 19
\item \href{refusal testing}{refusal testing}, 19
\item \href{replicator}{replicator}, 33
\item \href{root}{root}, 7
\item \href{RT-saturated}{RT-saturated}, 58
\indexspace
\item \href{satisfaction relation}{satisfaction relation}, 10, 12, 15, 19, 23, 25, 28, 29, 31, 38,
40, 44
\item \href{saturated}{saturated}, 58
\item \href{semantic equivalence}{semantic equivalence}, 3
\item \href{semantics}{semantics}, 2, 8
\item \mbox{}\href{sequencing}{sequencing}, 79
\item \href{sequential}{sequential}, 2, 3, 6
\item \href{sequential composition}{sequential composition}, 79
\item \href{similar}{similar}, 29
\item \href{simulation}{simulation}, 5, 29, 30
\item \href{simulation equivalence}{simulation equivalence}, 29
\item \href{simulation formulas}{simulation formulas}, 29, 51
\item \href{simulation semantics}{simulation semantics}, 5, 29
\item \href{singleton-failure formulas}{singleton-failure formulas}, 50
\item \href{singleton-failure pair}{singleton-failure pair}, 37
\item \href{singleton-failures equivalent}{singleton-failures equivalent}, 37
\item \href{singleton-failures semantics}{singleton-failures semantics}, 37
\item \href{solution}{solution}, 75
\item \href{soundness}{soundness}, 61, 66
\item \href{state transition diagram}{state transition diagram}, 3
\item \href{states}{states}, 7
\item \href{successful termination}{successful termination}, 77
\item \href{summand}{summand}, 59
\item \href{synchronous parallel composition}{synchronous parallel composition}, 73
\indexspace
\item \href{term domains}{term domains}, 3
\item \href{terminating}{terminating}, 62
\item \href{terminating trace}{terminating trace}, 77
\item \href{termination}{termination}, 77
\item \href{terms}{terms}, 59
\item \href{testing equivalences}{testing equivalences}, 5
\item \href{trace}{trace}, 9, 10, 63
\item \href{trace domain}{trace domain}, 10
\item \href{trace equivalent}{trace equivalent}, 9
\item \href{trace formulas}{trace formulas}, 10, 50
\item \href{trace machine}{trace machine}, 12
\item \href{trace semantics}{trace semantics}, 4, 9
\item \href{trace set}{trace set}, 10
\item \href{transitions}{transitions}, 7
\item \href{phrase-tree}{tree}, 7
\item \href{tree equivalent}{tree equivalent}, 47
\item \href{tree semantics}{tree semantics}, 5, 47
\indexspace
\item \href{unfolding}{unfolding}, 47
\item \href{uniform concurrency}{uniform concurrency}, 3
\item \href{uniformly}{uniformly}, 3
\item \href{universal algebra}{universal algebra}, 3
\indexspace
\item \href{verification}{verification}, 2
\indexspace
\item \href{weather}{weather}, 40
\item \href{well-founded}{well-founded}, 7
\end{theindex}
\end{document}