%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Version 2 (started on 19/09/2003):
% Changing black-box concurrent ZK protocol to be that of
% Richardson-Kilian, instead of PRS.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Version 1 (started on 08/09/2003):
% This is the full journal version containing the upper
% bound only from the STOC'03 paper on bounded-concurrent
% secure 2-party computation (conc2party.tex)
% In the middle of changing to STRICT-POLY simulation
% with the PRS protocol.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentstyle[11pt,fullpage]{article}
\title{{\bf Protocols for Bounded-Concurrent Secure Two-Party
Computation in the Plain Model}}
\author{Yehuda Lindell\thanks{Much of this work was carried out while
the author was at IBM T.J.Watson, New York.}}
\date{Department of Computer Science\\
Bar-Ilan University\\
Ramat Gan, 52900, {\sc Israel}\\
{\tt lindell@cs.biu.ac.il}\\
~\\
\today}
\newcommand{\smaller}{}
\newcommand{\ynote}[1]{{(\sf Yehuda's Note:} {\sl{#1}} {\sf EON)}}
\def\ni{\noindent}
\def\etal{{et~al.}}
\newcommand{\eqref}[1]{Eq.~(\ref{#1})}
\renewcommand{\ni}{\noindent}
\newcommand{\ms}{\medskip}
\renewcommand{\(}{{\em (}}
\renewcommand{\)}{{\em )}}
\newcommand{\eqdef}{\stackrel{\rm def}{=}}
\newcommand{\indist}{\stackrel{\rm c}{\equiv}}
\newcommand{\bitset}{\{0,1\}}
\newcommand{\rnd}{\in_R}
\renewcommand{\th}{{\rm th}}
\newcommand{\ov}{\overline}
\newcommand{\e}{\epsilon}
% \newcommand{\da}{d}
% \newcommand{\SD}{\bigtriangledown}
%---
\newtheorem{thm}{Theorem}%[section] % A counter for Theorems etc
\newcommand{\BT}{\begin{thm}} \newcommand{\ET}{\end{thm}}
\newtheorem{dfn}[thm]{Definition} %
\newcommand{\BD}{\begin{dfn}} \newcommand{\ED}{\end{dfn}}
%\newtheorem{corr}[thm]{Corollary} %
%\newcommand{\BCR}{\begin{corr}} \newcommand{\ECR}{\end{corr}}
\newtheorem{prop}[thm]{Proposition}
\newcommand{\BP}{\begin{prop}} \newcommand{\EP}{\end{prop}}
%---
\newtheorem{Ithm}{Theorem} % A counter for Theorems in Intro
\newcommand{\BIT}{\begin{Ithm}} \newcommand{\EIT}{\end{Ithm}}
\newtheorem{Icorr}[Ithm]{Corollary} % A counter for Theorems in Intro
\newcommand{\BICR}{\begin{Icorr}} \newcommand{\EICR}{\end{Icorr}}
%---
\newtheorem{lem}{Lemma}[section] % A counter for Lemmas etc
\newcommand{\BL}{\begin{lem}} \newcommand{\EL}{\end{lem}}
\newtheorem{clm}[lem]{Claim} %
\newcommand{\BCM}{\begin{clm}} \newcommand{\ECM}{\end{clm}}
\newtheorem{corr}[thm]{Corollary} %
\newcommand{\BCR}{\begin{corr}} \newcommand{\ECR}{\end{corr}}
\newtheorem{fact}[lem]{Fact} %
\newcommand{\BF}{\begin{fact}} \newcommand{\EF}{\end{fact}}
%---
\newtheorem{rem}{Remark}[section] % A counter for Theorems etc
\newcommand{\BR}{\begin{rem}} \newcommand{\ER}{\end{rem}}
%---
\newlength{\saveparindent}
\setlength{\saveparindent}{\parindent}
\newlength{\saveparskip}
\setlength{\saveparskip}{\parskip}
%---
\newenvironment{enum}{%
\begin{list}{{\rm
(\arabic{ctr})}\hfill}{\usecounter{ctr}\labelwidth=17pt%
\labelsep=6pt \leftmargin=23pt \topsep=2pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{2pt}}}{\end{list}}
%---
\newenvironment{newitemize}{%
\begin{list}{$\bullet$}{\labelwidth=8pt%
\labelsep=5pt \leftmargin=15pt \topsep=3pt%
\setlength{\listparindent}{\saveparindent}%
\setlength{\parsep}{\saveparskip}%
\setlength{\itemsep}{6pt} }}{\end{list}}
%---
\newenvironment{newdescription}{%
\begin{list}{$\bullet$}{\labelwidth=8pt%
\labelsep=7pt \leftmargin=15pt \topsep=3pt%
\setlength{\listparindent}{0pt}% \saveparindent}%
\setlength{\parsep}{3pt} %\saveparskip}%
\setlength{\itemsep}{3pt} }}{\end{list}}
\newcommand{\BDE}{\begin{newdescription}}
\newcommand{\EDE}{\end{newdescription}}
%---
\newcommand{\BE}{\begin{enumerate}}
\newcommand{\EE}{\end{enumerate}}
\newcommand{\BI}{\begin{newitemize}}
\newcommand{\EI}{\end{newitemize}}
\def\blackslug
{\hbox{\hskip 1pt\vrule width 8pt height 8pt depth 1.5pt\hskip 1pt}}
\def\qed{\quad\blackslug\lower 8.5pt\null\par}
\def\qqed{$\Box$}
\newcommand{\dist}{{\rm dist}}
\newcommand{\A}{{\cal A}}
\newcommand{\F}{{\cal F}}
\newcommand{\U}{{\cal U}}
\newcommand{\B}{{\cal B}}
\renewcommand{\S}{{\cal S}}
\newcommand{\D}{{\cal D}}
\newcommand{\R}{{\sf R}}
\newcommand{\N}{{\sf N}}
\newcommand{\Z}{{\sf Z}}
\newcommand{\vol}{{\rm vol}}
\newcommand{\prob}{{\rm Pr}}
\newcommand{\poly}{{\rm poly}}
\newcommand{\union}{{\cup}}
\newcommand{\GapCVP}{{\tt GapCVP}}
\newcommand{\GapSVP}{{\tt GapSVP}}
\newcommand{\NP}{{\cal NP}}
\renewcommand{\P}{{\cal P}}
\newcommand{\qP}{{\widetilde\P}}
\newcommand{\coNP}{{\rm co}{\cal NP}}
\newcommand{\AM}{{\cal AM}}
\newcommand{\BPP}{{\cal BPP}}
\newcommand{\coAM}{{\rm co}{\cal AM}}
\newcommand{\yes}{{\sc yes}}
\newcommand{\no}{{\sc no}}
\newtheorem{prot}{Protocol} % A counter for Protocols
\newcommand{\BPR}{\begin{prot}} \newcommand{\EPR}{\end{prot}}
\newtheorem{construct}{Construction} % A counter for Protocols
\newcommand{\BCT}{\begin{construct}} \newcommand{\ECT}{\end{construct}}
\newenvironment{proof}{\noindent{\bf Proof:~~}}{\qed}
\newcommand{\BPF}{\begin{proof}} \newcommand {\EPF}{\end{proof}}
\newenvironment{proofsketch}{\noindent{\bf Proof Sketch:~~}}{\qed}
\newcommand{\BPFS}{\begin{proofsketch}} \newcommand {\EPFS}{\end{proofsketch}}
\newcommand{\remove}[1]{}
\newcommand{\RK}{{\sf RKZK}}
\newcommand{\POK}{{\sf POK}}
\newcommand{\BZK}{{\sf BZK}}
\newcommand{\ideal}{\mbox{\sc ideal}}
\newcommand{\real}{\mbox{\sc real}}
\newcommand{\zkhybrid}{\mbox{\sc zk-hybrid}}
%\newcommand{\memberzk}{\mbox{{\footnotesize\sc member}{\sc zk-hybrid}}}
\newcommand{\memberzk}{\mbox{\sc memberzk-hybrid}}
\newcommand{\tpi}{\tilde \Pi}
\newcommand{\ta}{\tilde \A}
\newcommand{\hpi}{\hat \Pi}
\newcommand{\ha}{\hat \A}
\newcommand{\adv}{{\sf adv}}
\begin{document}
\begin{titlepage}
\maketitle
\begin{abstract}
Until recently, most research on the topic of secure computation
focused on the stand-alone model, where a single protocol execution
takes place. In this paper, we construct protocols for the setting
of {\em bounded-concurrent self-composition}, where a (single)
secure protocol is run many times concurrently, and there is a
predetermined bound on the number of concurrent executions. In
short, we show that {\em any}\/ two-party functionality can be
securely computed under bounded-concurrent self-composition, in the
{\sf plain model} (where the only setup assumption made is that the
parties communicate via authenticated channels). Our protocol
provides the first feasibility result for general two-party
computation in the plain model, {\em for any model of concurrency}.
All previous protocols assumed a trusted setup phase in order to
obtain a common reference string. On the downside, the number of
rounds of communication in our protocol is super-linear in the bound
on the number of concurrent executions. Subsequent to this work,
constant-round protocols and protocols for the multiparty case were
presented by Pass and Rosen (FOCS 2003) and by Pass (STOC 2004). We
remark that this paper contains the full version of the upper-bound
portion of the extended abstract presented by the author at STOC
2003 \cite{L03b} (the lower bound from \cite{L03b} appears in
\cite{L06} together with other lower bounds from \cite{L04a}).
\end{abstract}
\vfill
\paragraph{Keywords:} secure two-party computation, concurrent
self-composition, setup assumptions.
\thispagestyle{empty}
\end{titlepage}
\section{Introduction}
\ni In the setting of two-party computation, two parties with
respective private inputs $x$ and $y$, wish to jointly compute a
functionality $f(x,y) = (f_1(x,y),f_2(x,y))$, such that the first
party receives $f_1(x,y)$ and the second party receives
$f_2(x,y)$. This functionality may be probabilistic, in which case
$f(x,y)$ is a random variable. Loosely speaking, the security
requirements are that nothing is learned from the protocol other
than the output (privacy), and that the output is distributed
according to the prescribed functionality (correctness). These
security requirements must hold in the face of a malicious
adversary who controls one of the parties and can arbitrarily
deviate from the protocol instructions (i.e., in this work we
consider static adversaries). Powerful feasibility results have
been shown for this problem, demonstrating that {\em any}\/
two-party probabilistic polynomial-time functionality can be
securely computed, assuming the existence of trapdoor
permutations~\cite{Y,GMW2}.
\paragraph{Security under concurrent composition.}
The above-described feasibility results relate only to the
stand-alone setting, where a single pair of parties run a single
execution. A more general (and realistic) setting relates to the
case that many protocol executions are run concurrently within a
network. Unfortunately, the security of a protocol in the
stand-alone setting does not necessarily imply its security under
concurrent composition. Therefore, an important research goal is
to re-establish the feasibility results of the stand-alone setting
for the setting of concurrent composition.
We stress that the need for security under concurrent composition
is not merely an issue of efficiency. Specifically, one cannot
claim that in order to ensure security, all executions should just
be carried out sequentially (recall that security under sequential
composition is guaranteed by the stand-alone
definitions~\cite{C00}). This is because the honest parties must
actively coordinate their executions in order to ensure
sequentiality. However, the honest parties may not even know of
each other's existence. Therefore, they cannot coordinate their
executions, and a dishonest adversary that interacts with a number
of parties can schedule the executions concurrently. The protocol
being used must therefore be secure under concurrent composition.
The study of concurrent composition of protocols was initiated
by~\cite{FS90} in the context of witness indistinguishability, and
was next considered by~\cite{DDN} in the context of
non-malleability. The majority of the work that followed on
concurrent composition was for the problem of concurrent
zero-knowledge~\cite{DNS}. In this scenario, a prover runs many
copies of a protocol with many verifiers, and zero-knowledge must be
preserved. The feasibility of concurrent zero-knowledge in the {\em
plain model}\/ (where no trusted setup or preprocessing phase is
assumed) was first demonstrated by~\cite{RK}. In general, the issue
of concurrent zero-knowledge has received much attention, resulting
in a rather exact understanding of the round-complexity of black-box
concurrent zero-knowledge~\cite{KPR,R00,CKPR,KP,PRS}. Other specific
problems that have been studied in the context of concurrent
composition are oblivious transfer~\cite{GM} and authenticated
Byzantine agreement~\cite{LLR}. In all of the above-mentioned work,
the setting that is considered is that of {\sf concurrent
self-composition}, where a {\em single}\/ protocol is run
concurrently any polynomial number of times in a network (we stress,
that the secure protocol in question is the only protocol being
executed). Furthermore, all the executions are run by the same set
of parties, and each party plays the same role throughout all of
these executions (e.g., for zero knowledge, one party always plays
the prover and the other party always plays the verifier).%
\footnote{Actually, such a setting should be called {\sf concurrent
self-composition with a single set of parties}. See~\cite{L03d} for
a full taxonomy of types of protocol
composition. \label{foot1}} %
A different notion, called {\sf general composition}, considers a
very broad setting where many sets of (possibly different) parties
run many protocols, and secure protocols may run concurrently with
arbitrary other protocols.%
\footnote{According to the taxonomy of~\cite{L03d}, this should
actually be called {\sf concurrent general composition with
arbitrary sets of parties}.} %
Such a setting realistically models the security needs of modern
networks like the Internet, where many different arbitrary
protocols are executed by many different sets of parties (with
possibly related inputs). The first definition and composition
theorem for concurrent general composition was presented
by~\cite{PW00} who considered the case that a secure protocol is
executed {\em once}\/ concurrently with another arbitrary protocol.%
\footnote{An earlier reference to this problem with general ideas
about how to define security appeared in~\cite[Appendix A]{C97}.} %
The unbounded case, where a secure protocol can be run any
polynomial number of times in an arbitrary network, was then
considered by the framework of universal composability~\cite{C01}.
Loosely speaking, universal composability is a specific security
definition with the important property that any protocol that is
proven secure under this definition is guaranteed to remain secure
under (unbounded) concurrent general composition. Due to the
robust security guarantees that are provided, universally
composable protocols are highly desirable. Fortunately, such
protocols exist. In fact, it has been shown that assuming an
honest majority, {\em any}\/ multi-party functionality can be
securely computed in a universally composable way~\cite{C01}.
Furthermore, in the {\em common reference string model}, any
two-party or multi-party functionality can be computed under this
definition, for {\em any}\/ number of corrupted parties. (In the
common reference string model, all parties are given access to a
string that is ideally chosen from some predetermined
distribution.)
On the negative side, in the plain model and without an honest
majority (as in the two-party case), there exist large classes of
functionalities that cannot be securely computed according to the
definitions of universal composability~\cite{CF,C01,CKL}. Note
that these impossibility results relate specifically to the
definition of universal composability and not necessarily to the
possibility of obtaining protocols that are secure under
concurrent general composition by some other definition.
Nevertheless, impossibility results have also been demonstrated
for any definition achieving concurrent {\em general}\/
composition~\cite{L03c}, and even for concurrent {\em self}\/
composition~\cite{L04a}, where impossibility holds in the plain
model and in the case of no honest majority. (We note that the
classes of functionalities that cannot be securely computed are
not exactly the same for all these impossibility results. However,
large classes of functionalities are ruled out in each case.)
\paragraph{Bounded versus unbounded concurrency.}
As we have described above, broad impossibility results exist for
both concurrent general and self-composition. However, the
impossibility results in~\cite{L04a} for concurrent self-composition
rely inherently on the fact that the secure protocol in question can
be run concurrently {\em any}\/ polynomial number times; this
property is called {\sf unbounded concurrency}. In contrast, a more
limited model of concurrency, first considered by~\cite{B},
restricts the number of concurrent executions to be some fixed,
predetermined polynomial in the security parameter. Furthermore, the
protocol design can depend on this number (i.e., if the bound is
$n^2$, then the protocol can be designed so that it is secure for at
most $n^2$ concurrent executions, and no more). This model is called
{\sf bounded concurrency}, and when $m$ is the maximum number of
concurrent executions, we talk about {\sf $m$-bounded concurrent
composition} (note that $m=m(n)$ is a fixed polynomial in the
security parameter). Importantly, the possibility of obtaining
protocols that are secure under $m$-bounded concurrent self
composition was not ruled out in~\cite{L04a}.
\paragraph{The model of concurrency.}
Before proceeding to describe our results, we describe the model of
concurrency considered here in more detail. In this work, we
consider self-composition of two-party protocols, where a single
pair of parties run the same protocol many times concurrently. This
is actually equivalent to the case where many different pairs of
parties run a protocol concurrently, with the following adversarial
limitation. Each party is designated to be either a first party
$P_1$ or a second party $P_2$ (this defines the role that it plays
in the protocol execution). The adversary is then only allowed to
corrupt a subset of the parties playing $P_1$ or a subset of the
parties playing $P_2$, but cannot simultaneously corrupt parties
playing $P_1$ and parties playing $P_2$. Such a scenario can model
some real-world scenarios, like a number of servers concurrently
interacting with many clients (and where we assume that there is no
simultaneous corruption of a server and a client). As we have
mentioned above, this is the exact model considered in the entire
body of work on concurrent zero-knowledge. Note that this is a
rather limited model, both because self-composition (rather than
general composition) is considered, and because only a single set of
parties is considered (equivalently, the adversary is severely
limited in its corruption capability). However, we stress that no
general feasibility results have been shown for {\em any}\/ model of
concurrency, without assuming some trusted setup phase (beyond that
required for authenticated channels) or an honest majority. This
therefore serves as a good first step. In addition, studying more
limited models deepens our understanding of exactly where the line
between feasibility and infeasibility lies, thus providing an
``explanation'' of sorts as to the source of the impossibility
results for concurrent composition. (We note that the fact that we
consider self, rather than general, composition is also very
limiting. However, this is essential due to the impossibility
results of~\cite{L03c} that hold even when the secure protocol is
executed only once in the setting of concurrent general
composition.)
\paragraph{Our results.}
In this work, we show that any two-party functionality can be
securely computed under bounded-concurrent self-composition and in
the plain model. More specifically, we prove the following theorem:
\BIT
Assume that enhanced trapdoor permutations%
\footnote{Enhanced trapdoor permutations have the property that a
random element generated by the domain sampler is hard to invert,
even given the random coins used by the sampler;
see~\cite[Appendix C]{GoldreichBook2}. We note that the
construction of~\cite{GMW2} for secure two-party computation in
the stand-alone model also assumes the existence of enhanced
trapdoor permutations.} %
and collision resistant hash functions exist. Then, for any
probabilistic polynomial-time two-party functionality $f$ and for
any $m$, there exists a protocol\, $\Pi$ that securely computes
$f$ under $m$-bounded concurrent composition in the plain model.
\label{thm:informal-upper}
\EIT
\ni We now provide a very brief and informal outline of the proof
of the theorem. We begin by observing that secure two-party
computation that composes concurrently can be obtained in a hybrid
model where the parties have (concurrent) access to a trusted
party computing the ideal zero-knowledge functionality. This was
formally demonstrated in~\cite{CLOS}. Given this observation, the
question is whether or not one can transform protocols that use
this ideal zero-knowledge functionality into protocols that run in
the real model (without any trusted help). Of course, this
transformation must preserve the concurrent composition, or
bounded-concurrent composition, of the protocol.
A naive implementation of the above idea is to replace the ideal
zero-knowledge calls with any concurrent zero-knowledge protocol.
However, two problems arise with this idea. First, as we have
mentioned, concurrent zero-knowledge considers a scenario where
the protocol is run concurrently with itself only. Thus, it is not
clear that the zero-knowledge simulation can be accomplished if
the protocol is run concurrently to other protocols as well.
Second, a problem relating to the malleability of protocols
arises. That is, during the concurrent executions, the adversary
may simultaneously verify and prove zero-knowledge proofs. Thus,
it can execute a man-in-the-middle attack on the zero-knowledge
proofs. This is a problem because when some of the zero-knowledge
protocols are simulated, we are no longer guaranteed that the
protocol proven by the adversary remain sound. In particular, the
adversary may be able to prove false statements in this setting,
and this would cause the simulation of the secure protocol to
fail. This second problem is solved by having the parties use
fundamentally different zero-knowledge proofs.%
\footnote{We note that the recent result of~\cite{B02} for
``non-malleable coin-tossing'' does not solve this problem. This
is because \cite{B02} relies on a very specific scheduling which
can be enforced in their scenario, but cannot be
enforced here.} %
Specifically, one party proves statements with the black-box
zero-knowledge protocol of~\cite{RK}, while the other party proves
statements with the non black-box zero-knowledge protocol
of~\cite{B,BG}. It turns out that a careful choice of parameters
for these protocols solves the problem of malleability. However,
the first problem (of concurrency with other protocols) still
remains. This is solved as follows. The protocol of~\cite{B} can
easily be modified so that it remains zero-knowledge when run
concurrently with other protocols (intuitively, this is the case
because there is no rewinding). However, the protocol of~\cite{RK}
is more problematic. We therefore devise a {\em new}\/ black-box
simulation strategy for~\cite{RK} in the setting of $m$-bounded
concurrency, that enables it to be used directly as a sub-protocol
of another protocol. Having solved these problems, we are able to
replace the ideal zero-knowledge calls in any protocol with the
specific $m$-bounded concurrent zero-knowledge protocols described
above. Putting this transformation together with a protocol that
is secure when given access to the ideal zero-knowledge
functionality, we obtain a protocol that is secure under
$m$-bounded concurrent composition in the real model. The number
of rounds of communication in our protocol is
$O(m\cdot\kappa(n))$, where $n$ is the security parameter and
$\kappa(\cdot)$ is any super-constant function (e.g.,
$\kappa(n)=\log\log n$). We note that a lower bound of $m$ rounds
for protocols that are proven using only black-box simulation has
been proven in~\cite{L03b}. Nevertheless, our protocol is proven
using both black-box and non black-box techniques (and is thus not
a ``tight'' upper-bound).
We conclude with a remark about {\em efficiency}. Our protocol
requires $O(m\kappa(n))$ rounds to obtain security for $m$
concurrent executions. It is therefore far from being ``reasonably
efficient''. Nevertheless, the focus of this paper is {\em not}\/
efficiency. Rather, we believe that establishing the feasibility
of obtaining secure computation in any reasonable model of
concurrency is of great importance, irrespective of efficiency.
Furthermore, our constructions may lead to more efficient
protocols (see below in subsequent work).
\paragraph{Discussion: setup assumptions and authenticated channels.}
The {\sf plain model} classically refers to a scenario where the
honest parties communicate via authenticated channels, but no
additional setup is assumed. In most works, the assumption on the
existence of authenticated channels is not articulated as a
``setup assumption'', but rather as part of the network model.
However, in reality, some sort of setup assumption is also
required for achieving authenticated channels. We argue that this
assumption should be explicitly treated as such.
We note that despite the fact that authenticated channels {\em is}\/
a setup assumption, it is a strictly weaker one than that required
for obtaining a common reference string, for example. Specifically,
in order to achieve authenticated channels, it suffices to have a
certificate authority who reliably posts keys on a bulletin
board~\cite{C04}. (We stress that there is no need for the
certificate authority to run any protocol with the parties posting
keys and, in particular, proofs of knowledge are not necessary.)
Furthermore, upon inspection of the lower bounds for concurrent
self-composition of~\cite{L04a}, it is easy to see that they extend
to a model where such a ``secure bulletin board'' exists. Thus, a
common reference string cannot be generated from the same assumption
used for authenticated channels (or else the lower bounds
of~\cite{L04a} would not hold).
\paragraph{Subsequent and related work.}
Subsequent to this work, a {\em constant-round}\/ protocol for
bounded-concurrent secure two-party computation was presented
in~\cite{PR}. This result was later improved and extended
in~\cite{P04} to provide a protocol that {\bf (a)} works for
multiparty functionalities, and {\bf (b)} can be extended to the
general setting where arbitrary sets of parties run the protocol
(and any subset of parties may be corrupted). The high-level
framework developed in this paper has been adopted in this
subsequent work of~\cite{PR,P04}, which makes use of the following
observations made here in our work:
\BE
\item
The problem of general secure computation in the setting of
concurrent self-composition can be reduced to a zero-knowledge proof
of membership functionality (rather than a seemingly more stringent
zero-knowledge proof of knowledge functionality).
\item
The bounded-concurrent zero-knowledge protocol of~\cite{B,BG}
remains secure irrespective of the other protocols being run in
the network (as long as the communication complexity is
appropriately bounded).
\item
The problem of malleability (and man-in-the-middle attacks) in
general secure computation under concurrent self-composition can be
solved more easily by having the different parties run different
zero-knowledge protocols.
\EE
Specifically, the works of~\cite{PR,P04} focus on constructing
improved versions of the zero-knowledge proof of membership
functionality used here. Instead of using two completely different
zero-knowledge protocols as we do (something which turns out to be
a great limitation), these latter works show how variants of the
zero-knowledge protocol of~\cite{B,BG} can be used by all parties
involved, while setting the parameters differently for every
participating party so that man-in-the-middle attacks are
prevented. This solves both the problem of large round complexity
(because the protocol of~\cite{B,BG} has only a constant number of
rounds) and the limitation to two parties (because it is possible
to have many parties, each with a different setting of the
parameters). The latter work of~\cite{P04} (that builds also
on~\cite{PR}) achieves the best one could hope for in the setting
of bounded concurrency -- constant round complexity and multiparty
computation.
We conclude by noting that both our protocol and the protocols
of~\cite{PR} and~\cite{P04} suffer from very high communication
complexity in bandwidth. Specifically, for $m$ concurrent
executions, the bandwidth of the protocols are at least
$\Omega(mn^2)$, where $n$ is the security parameter. We note that a
linear dependence of the bandwidth on $m$ is actually inherent. That
is, it has been shown that there exist many functionalities such
that any protocol that securely computes these functionalities under
$m$-bounded concurrent self-composition must have communication
complexity of at least~$m$~\cite{L04a}.
%\paragraph{Open questions.} Our protocol (and the protocol
%of~\cite{PR}) only solve the problem of {\em two}-party
%computation. It is not known how to achieve {\em multi}-party
%secure computation for the setting of bounded concurrent self
%composition. In addition, our protocol only works for the case
%that each party plays the same role in each execution. Thus it
%cannot be used in a network where different pairs of parties run
%the protocol, and any individual party may sometimes play the role
%of party~1 and sometimes the role of party~2. (In the taxonomy
%of~\cite{L03d}, this would be called bounded concurrent self
%composition with {\em arbitrary sets of parties}.) In this
%context, it would also be interesting to consider the case of
%adaptive corruptions (we only consider static corruptions here).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Definitions: \boldmath{$m$}-Bounded Concurrent Secure Computation\label{sec:secure-comp}}
\ni In this section we present the definition for $m$-bounded
concurrent secure two-party computation. The basic description and
definition of secure computation follows~\cite{GoLe,MR,Be1,C00}.
We denote computational indistinguishability by $\indist$, and the
security parameter (and, for simplicity, the lengths of the
parties' inputs) by $n$. All parties and the adversary run in time
that is polynomial in $n$.
\paragraph{Two-party computation.}
A two-party protocol problem is cast by specifying a random
process that maps pairs of inputs to pairs of outputs (one for
each party). We refer to such a process as a {\sf functionality}
and denote it
$f:\bitset^*\times\bitset^*\to\bitset^*\times\bitset^*$, where $f
= (f_1,f_2)$. That is, for every pair of inputs $(x,y)$, the
output-pair is a random variable $(f_1(x,y),f_2(x,y))$ ranging
over pairs of strings. The first party (with input $x$) wishes to
obtain $f_1(x,y)$ and the second party (with input $y$) wishes to
obtain $f_2(x,y)$. We often denote such a functionality by $(x,y)
\mapsto (f_1(x,y),f_2(x,y))$. Thus, for example, the
zero-knowledge proof of knowledge functionality for a relation
$R$, is denoted by $((x,w),x) \mapsto (\lambda,R(x,w))$. In the
context of concurrent composition, each party actually receives a
vector of inputs of polynomial length, and the aim of the parties
is to jointly compute $f(x_i,y_i)$ for every $i$. (According to
this description, all the honest party's inputs are fixed at the
onset. However, our protocols are also secure when the honest
party's inputs may be chosen adaptively throughout the execution.
This will be discussed later.) The fact that $m$-bounded
concurrency is considered relates to the allowed scheduling of
messages by the adversary in the protocol executions; see the
description of the real model below.
We note that our results here also apply to {\em reactive
functionalities}\/ where inputs and outputs are supplied over a
number of stages. Such a functionality can be modeled by a
probabilistic polynomial-time interactive machine who receives
inputs and supplies outputs. This machine can keep state and thus
the inputs from previous computations can influence the outputs of
later ones.
\paragraph{Adversarial behaviour.} In this work we consider a
malicious, static adversary. That is, the adversary controls one
of the parties (who is called corrupted) and may then interact
with the honest party while arbitrarily deviating from the
specified protocol.%
\footnote{We follow the approach of~\cite{GoldreichBook2} by
defining the two-party case as one where the adversary controls
exactly one of the parties. This is in contrast to a more general
approach in which the adversary may control zero, one or both
parties. This approach somewhat simplifies the presentation,
without weakening the results. Specifically, assuming {\em
authenticated channels}, any protocol that is secure when exactly
one party is corrupted can be transformed into a protocol that is
secure when any number (i.e., 0, 1 or 2) of the parties are
corrupted, in the following way. First, using the given
authenticated channels, the two parties establish secure channels
for private and reliable communication. Next, the original
protocol is executed over this private channel. It follows that if
no parties are corrupted, then the adversary learns nothing
(except for the length of the messages and so the length of the
inputs). Furthermore, if one party is corrupted, then the effect
is exactly the same as for the original protocol, where security
is guaranteed. Finally, the case of both parties corrupted is
always straightforward. We conclude that the resulting protocol is
secure in the more general case, as required.} %
The focus of this work is not on fairness. We therefore present a
definition where the adversary always receives its own output and
can then decide when (if at all) the honest party will receive its
output. The scheduling of message delivery is decided by the
adversary.
\paragraph{Security of protocols (informal).}
The security of a protocol is analyzed by comparing what an
adversary can do in the protocol to what it can do in an ideal
scenario that is trivially secure. This is formalized by
considering an {\em ideal}\/ computation involving an
incorruptible {\em trusted third party}\/ to whom the parties send
their inputs. The trusted party computes the functionality on the
inputs and returns to each party its respective output. Unlike in
the case of stand-alone computation, here the trusted party
computes the functionality many times, each time upon different
inputs. Loosely speaking, a protocol is secure if any adversary
interacting in the real protocol (where no trusted third party
exists) can do no more harm than if it was involved in the
above-described ideal computation.
%\paragraph{\boldmath{$m$}-bounded execution in the ideal model.}
\paragraph{Concurrent execution in the ideal model.}
Let $p(n)$ be a polynomial in the security parameter. Then, an
ideal execution with an adversary who controls either $P_1$ or
$P_2$ proceeds as follows:
\begin{description}
\item[\em Inputs:]
The honest party and adversary each obtain a vector of $p(n)$
inputs each of length $n$; denote this vector by $\ov w$ (i.e.,
$\ov w=\ov x$ or $\ov w=\ov y$).
\item[\em Honest party sends inputs to trusted party:]
The honest party sends its entire input vector $\ov w$ to the
trusted party.
\item[\em Adversary interacts with trusted party:]
For every $i=1,\ldots,p(n)$, the adversary can send $(i,w_i')$ to
the trusted party, for any $w_i'\in\bitset^n$ of its choice. Upon
sending this pair, it receives back its output based on $w_i'$ and
the input sent by the honest party. (That is, if $P_1$ is
corrupted, then the adversary receives $f_1(w_i',y_i)$ and if
$P_2$ is corrupted then it receives $f_2(x_i,w_i')$.) The
adversary can send the $(i,w_i')$ pairs in any order it wishes and
can also send them {\em adaptively}\/ (i.e., choosing inputs based
on previous outputs). The only limitation is that for any $i$,
{\em at most one pair}\/ indexed by $i$ can be sent to the trusted
party.%
\footnote{The fact that the adversary can query the trusted party
once for each execution is essential for obtaining meaningful
security. Otherwise, the adversary (controlling $P_2$) could
obtain a series of values $f_2(x,y),f_2(x,y'),f_2(x,y''),\ldots$
for a single input value $x$ belonging to the honest party.
Furthermore, it may choose $y,y',y''$ etc.\ adaptively based on
previous outputs. This may reveal much more information than
intended. For example, if $f$ is the ``less than'' function, then
the adversary could conduct a binary search on the input range and
find the exact value of $x$.} %
\item[\em Trusted party answers honest party:]
Having received all of its own outputs, the adversary specifies
which outputs the honest party receives. That is, the adversary
sends the trusted party a set $I\subseteq\{1,\ldots,p(n)\}$. Then,
the trusted party supplies the honest party with a vector $\ov v$
of length $p(n)$ such that for every $i\not\in I$, $v_i=\bot$ and
for every $i\in I$, $v_i$ is the party's output from the $i^\th$
execution. (That is, if $P_1$ is honest, then for every $i\in I$,
$v_i=f_1(x_i,w_i')$ and if $P_2$ is honest, then
$v_i=f_2(w'_i,y_i)$ .)
\item[\em Outputs:]
The honest party always outputs the vector $\ov v$ that it
obtained from the trusted party. The adversary may output an
arbitrary (probabilistic polynomial-time computable) function of
its initial input and the messages obtained from the trusted
party.
\end{description}
Let $f:\bitset^*\times\bitset^*\mapsto\bitset^*\times\bitset^*$ be
a functionality, where $f = (f_1,f_2)$, and let $\S$ be a
non-uniform probabilistic polynomial-time machine (representing
the ideal-model adversary). Then, the {\sf ideal execution of $f$}
(on input vectors $(\ov x,\ov y)$ of length $p(n)$ and auxiliary
input $z$ to $\S$), denoted $\ideal_{f,\S}(\ov x,\ov y,z)$, is
defined as the output pair of the honest party and $\S$ from the
above ideal execution.
We note that the definition of the ideal model does not include
any reference to the bound $m$ on the concurrency. This is because
this bound is relevant only to the scheduling allowed to the
adversary in the real model; see below. However, the fact that a
concurrent setting is considered can be seen from the
above-described interaction of the adversary with the trusted
party. Specifically, the adversary is allowed to obtain outputs in
any order that it wishes, and can choose its inputs adaptively
based on previous outputs. This is inevitable in a concurrent
setting where the adversary can schedule the order in which all
protocol executions take place.
\paragraph{Execution in the real model.}
We next consider the real model in which a real two-party protocol
is executed (and there exists no trusted third party). Let $p(n)$
and $m=m(n)$ be polynomials, let $f$ be as above and let $\Pi$ be
a two-party protocol for computing $f$. Furthermore, let $\A$ be a
non-uniform probabilistic polynomial-time machine that controls
either $P_1$ or $P_2$. Then, the {\sf real $m$-bounded concurrent
execution of $\Pi$} (on input vectors $(\ov x,\ov y)$ of length
$p(n)$ and auxiliary input $z$ to $\A$), denoted
$\real^m_{\Pi,\A}(\ov x,\ov y,z)$, is defined as the output pair
of the honest party and $\A$, resulting from $p(n)$ executions of
the protocol interaction, where the honest party always inputs its
$i^\th$ input into the $i^\th$ execution. The scheduling of all
messages throughout the executions is controlled by the adversary.
That is, the execution proceeds as follows. The adversary sends a
message of the form $(i,\alpha)$ to the honest party. The honest
party then adds $\alpha$ to the view of its $i^\th$ execution of
$\Pi$ and replies according to the
instructions of $\Pi$ and this view.%
\footnote{Notice that the honest party runs each execution of
$\Pi$ obliviously to the other executions. Thus, this is stateless
composition.} %
The adversary continues by sending another message $(j,\beta)$,
and so on. When {\sf unbounded concurrency} is considered, {\em
any}\/ scheduling of the messages by the adversary is allowed. In
contrast, in the setting of {\sf $m$-bounded concurrency}, the
scheduling by the adversary must fulfill the following condition:
for every execution $i$, from the time that the $i^\th$ execution
begins until the time that it ends, messages from at most $m$
different executions can be sent. (Formally, view the schedule as
the ordered series of messages of the form $({\sf index,message})$
that are sent by the adversary. Then, in the interval between the
beginning and termination of any given execution, the number of
different indices viewed can be at most $m$.) We note that this
definition of concurrency covers the case that $m$ executions are
run simultaneously. However, it also includes a more general case
where many more than $m$ executions take place, but each execution
overlaps with at most $m$ other executions.
\paragraph{Security as emulation of a real execution in the ideal
model.} Having defined the ideal and real models, we can now
define security of protocols. Loosely speaking, the definition
asserts that a secure two-party protocol (in the real model)
emulates the ideal model (in which a trusted party exists). This
is formulated by saying that for every real-model adversary there
exists an ideal model adversary that can simulate an execution of
the secure real-model protocol.
\BD{\em($m$-bounded concurrent secure computation):}
\label{def:conc-security}
Let $m=m(n)$ be a polynomial and let $f$ and $\Pi$ be as above.
Protocol\, $\Pi$ is said to {\sf securely compute $f$ under
$m$-bounded concurrent composition} if for every real-model
non-uniform probabilistic polynomial-time adversary $\A$
controlling party $P_i$ for $i\in\{1,2\}$, there exists an
ideal-model non-uniform probabilistic polynomial-time adversary
$\S$ controlling $P_i$, such that for every polynomial $p(n)$,
$$\left\{\vphantom{2^{2^2}}\ideal_{f,\S}(\ov x,\ov y,z)\right\}
_{n\in\N;\ov x,\ov y\in(\bitset^n)^{p(n)};z\in\bitset^*}
\stackrel{\rm c}{\equiv}
\left\{\vphantom{2^{2^2}}\real^m_{\Pi,\A}(\ov x,\ov y,z)\right\}
_{n\in\N;\ov x,\ov y\in(\bitset^n)^{p(n)};z\in\bitset^*}
$$
\ED
%Definition~\ref{def:conc-security} follows a liberal
%interpretation of ``efficient simulation'' in that it allows the
%ideal-model simulator/adversary to run in {\em expected}\/
%polynomial-time. Formally, an interactive machine $M_1$ is {\sf
%expected polynomial-time} if there exists a (single) polynomial
%$q(\cdot)$ such that for {\em every}\/ (possibly unbounded)
%machine $M_2$, the expected running time of $M_1$ (upon input of
%length $n$) when interacting with $M_2$ is bounded by $q(n)$.
\paragraph{Remark -- adaptively chosen inputs.}
Definition~\ref{def:conc-security} is overly simplistic and
limited in that it assumes that the honest party's inputs are
fixed before any execution begins. However, our protocol also
works for a more general definition where inputs for later
executions can depend on outputs of earlier executions;
see~\cite{L04a} for such a definition. We present this simpler
definition for the sake of clarity and because the issue of
adaptively chosen inputs versus a-priori fixed inputs actually has
no effect on our construction.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Tools for our Protocol -- Zero-Knowledge}
In this section, we develop the basic tools needed for proving
Theorem~\ref{thm:informal-upper}. As we have described in the
Introduction, this involves the construction of zero-knowledge
proofs that can be used as subprotocols within any other protocol,
in the setting of bounded concurrency. We use the standard
definitions for zero-knowledge and proofs of knowledge,
see~\cite{oded-book}, and assume familiarity with these notions.
\subsection{The Zero-Knowledge Arguments of Knowledge of
\protect\cite{BL}}
\label{sec:zkpok}
The zero-knowledge arguments of knowledge of \cite{BL} have the
interesting property that {\em both}\/ simulation and extraction
take place by simulating zero-knowledge proofs (that are used as
subprotocols). That is, the argument system is such that both the
prover and verifier prove zero-knowledge proofs of
membership during the protocol.%
\footnote{In a {\em proof of membership}, the verifier is
convinced that a certain statement is correct. However, in a {\em
proof of knowledge}, the verifier is also convinced that the
prover actually holds a witness for this statement.} %
Then, the simulator strategy essentially just involves the
simulation of the subproof given by the prover to the verifier.
Likewise, the extraction strategy essentially just involves the
simulation of the subproof given by the verifier to the prover.
This is helpful because we can focus on the one issue of
simulating zero-knowledge within our setting, and we obtain both
zero-knowledge simulation {\em and}\/ knowledge extraction. The
remainder of this section describes how this is achieved.
\paragraph{Commit-with-extract commitment schemes.}
A central tool in the construction of the zero-knowledge arguments
of knowledge of~\cite{BL} is a perfectly-binding commitment scheme
with the following {\em extraction}\/ property: for every
committer, there exists a commitment {\em extractor}\/ who is able
to extract the value being committed to by the committer. As we
will see below, the extractor for the commit-with-extract scheme
of~\cite{BL} works by simulating a zero-knowledge proof of
membership. We begin by describing how such a commitment scheme
helps in obtaining zero-knowledge arguments of knowledge:
\paragraph{Zero-knowledge arguments of knowledge using commit-with-extract.}
Given a commit-with-extract commitment scheme, a zero-knowledge
argument of knowledge can be constructed as follows. The prover
commits to a witness using the commit-with-extract scheme, and
then proves that it committed to a valid witness using a
zero-knowledge proof of membership. Intuitively, a knowledge
extractor for the argument of knowledge simply uses the extractor
of the commit-with-extract scheme. By the soundness of the proof
of membership provided by the prover in the second stage of the
argument of knowledge, we are guaranteed that the value obtained
will be a correct witness with probability only negligibly less
than the probability that the verifier accepts the proof. A
simulator for this protocol is also easily obtained: just commit
to garbage in the commit-with-extract scheme and then simulate the
proof of membership in the second stage. See~\cite{BL} for more
details. The protocol description is as follows:
\BPR \label{prot:zkpok} {\em (zero-knowledge argument of knowledge for
$R \in \NP$):}
\BI
\item {\bf Common Input:} $x$
\item {\bf Auxiliary input to prover:} $w$ such that $(x,w) \in R$.
\item {\bf Part~1: } $P$ and $V$ run a commit-with-extract
protocol in which $P$ commits to the witness $w$.
\item {\bf Part~2: } $P$ proves to $V$ that it committed to a valid
witness $w$ in the previous step, using a zero-knowledge
proof \(or argument\) of membership.%
\footnote{Formally, let {\sf trans} be the transcript of the
commit-with-extract execution of part~$1$, and let $d$\/ be the
decommitment message that $P$ would use to decommit. Then, $P$
proves the NP-statement that there exists a value $d$\/ such that
$({\sf trans},d)$ defines the value $w$, and $(x,w) \in R$.} %
\EI
\EPR
\ni We note that \cite{BL} also demonstrate the existence of a
{\sf witness-extended emulator} for Protocol~\ref{prot:zkpok}, as
defined by~\cite{Lin}. Loosely speaking, a witness-extended
emulator is a machine that not only outputs a witness with the
required probability (as is required from a knowledge extractor),
but also outputs a simulated transcript of the interaction between
the prover and the verifier. Furthermore, whenever the transcript
of the interaction is such that the verifier accepts, then the
witness that is obtained is valid. To explain this further,
consider a case that the prover convinces the verifier in a real
interaction with probability $p$. Then, with probability
negligibly close to $p$, the above-described machine should output
an accepting transcript and a valid witness. Furthermore, with
probability negligibly close to $1-p$, the machine should output a
rejecting transcript (and we don't care about the witness). We
note that the transcript that is obtained is also {\em publicly
verifiable}, meaning that the verifier's accept or reject decision
can be efficiently obtained from the transcript. Witness-extended
emulation is often needed when a proof of knowledge is used as a
{\em subprotocol}\/ within a larger protocol; as is the case for
our constructions. See~\cite{BL} for more discussion on
witness-extended emulation.
\paragraph{The commit-with-extract scheme of \protect\cite{BL}.}
The scheme is based on the following non-interactive commitment
scheme that uses one-way permutations: Let $f$ be a one-way
permutation and let $b$ be a hard-core predicate of $f$. Then, in
order to commit to a bit $\sigma$, the committer chooses
$r\rnd\bitset^n$, lets $y=f(r)$ and sends $\langle y,v\rangle$ to
the receiver, where $v=b(r) \oplus \sigma$. Loosely speaking, the
commit-with-extract scheme is defined in the same way except that
the value $y=f(r)$ is chosen jointly by the committer and the
receiver using a coin-tossing protocol. By the security of the
coin-tossing protocol, $y$ is pseudorandom. Therefore, the hiding
property remains essentially the same as in the original scheme.
Likewise, because $f$ is a permutation, any $y$ defines a unique
value $b(f^{-1}(y))=b(r)$ and thus the scheme remains perfectly
binding (i.e., given any pair $(y,v)$, the committed value is
uniquely defined by $v\oplus b(f^{-1}(y))$). The novelty of the
scheme is that the extractor can {\em bias}\/ the outcome of the
coin-tossing protocol so that it knows the preimage $f^{-1}(y)$ of
$y$. In this case, the extractor can easily obtain the commitment
value $\sigma$, as desired. We now describe the protocol:
\BPR {\em (commit-with-extract bit commitment scheme):}
\BI
\item {\bf Input:} The committer has a bit $\sigma$ to be
committed to.
\item {\bf Commit phase:}
\BE
\item {\sf The committer chooses an enhanced trapdoor permutation:}%
\footnote{See~\cite[Appendix C]{GoldreichBook2} for the definition
of {\em enhanced}\/ trapdoor permutations. For the sake of
understanding the protocol here, it suffices to think of the case
that the domain of the permutation is $\bitset^n$.}
The committer chooses an enhanced trapdoor permutation $f$
along with its trapdoor $t$ and sends $f$ to the receiver.%
\footnote{For the sake of simplicity, we assume that the protocol
uses {\em certified}\/ \cite{FLS} enhanced trapdoor permutations
(for which the receiver can efficiently verify that $f$ is indeed
a permutation). However, actually any enhanced trapdoor
permutation can be used; see~\cite{BL}.} %
\item {\sf The parties run a coin-tossing protocol:}
\BE
\item \label{step1} The receiver chooses a random string $r_1 \rnd
\bitset^n$ and sends $c = {\sf Commit}(r_1;s)$ to the committer
{\em (}using any perfectly-binding commitment scheme $\sf Commit$
and a random string $s${\em )}.
\item The committer chooses a random string $r_2 \rnd \bitset^n$ and
sends $r_2$ to the receiver.
\item The receiver sends $r_1$ to the committer {\em (}without decommitting{\em )}.
\item \label{step2} The receiver proves that $r_1$ is the value that it
indeed committed to, using a zero-knowledge argument system.
Formally, the receiver proves that there exists a string $s$ such
that $c = {\sf Commit}(r_1;s)$.
\item The output of the coin-tossing phase is $r_1 \oplus r_2$.
\EE
\item {\sf The actual commitment:}
The committer computes $r= f^{-1}(r_1 \oplus r_2)$ and sends the
value $v = b(r) \oplus \sigma$ to the receiver.
\EE
\medskip
\item {\bf Reveal phase:}
\BE
\item The committer sends the receiver the string $r$.
\item The receiver checks that $f(r) = r_1 \oplus r_2$. If this is the case,
then it computes $b(r) \oplus v$ obtaining $\sigma$. Otherwise, it
outputs $\bot$.
\EE
\EI
\label{prot:commitextract}
\EPR
First note that if the coin-tossing protocol ensures that $r_1
\oplus r_2$ is pseudorandom, then the hiding property of the
commitment scheme is preserved. This is because the receiver
cannot predict $b(r)$ from a (pseudo-)randomly chosen $f(r)$ with
probability greater than $1/2$. We remark that as long as the
proof given by the receiver is {\em sound}, the security of the
coin-tossing protocol holds.
We conclude by briefly describing how a commitment extractor $CK$
can obtain the value $\sigma$ committed to by the committer. $CK$
works by biasing the outcome $r_1 \oplus r_2$ of the coin-tossing
protocol, so that it knows the preimage under $f$. More
specifically, $CK$ chooses a random string $r$, computes $f(r)$
and then makes the output $r_1 \oplus r_2$ equal $f(r)$. This is
clearly not possible for a real receiver to do (as the
coin-tossing protocol ensures that $f(r)$ is pseudorandom).
However, $CK$ can run the {\em simulator}\/ for the proof that the
receiver provides in step~\ref{step2} of the protocol. Thus, $CK$
sends a commitment to garbage in step~\ref{step1} of the protocol
and waits to receive the string $r_2$ from the committer. Upon
receiving $r_2$, extractor $CK$ computes $r_1 = f(r) \oplus r_2$
and sends $r_1$ to the committer (where $f(r)$ is obtained by
choosing a random $r$ and applying $f$ to $r$). Now, with
overwhelming probability, this is not the string that $CK$
committed to earlier. Thus $CK$ cannot honestly prove the proof of
step~\ref{step2}. Instead, it runs the simulator for it (and by
the hiding property of commitments, this looks indistinguishable
from a real execution). The key point is that at the conclusion of
the protocol, the committer defines $f(r) = r_1 \oplus r_2$ and
sends $v=b(r) \oplus \sigma$. However, $CK$ knows the string $r$
and can therefore obtain $\sigma$ by computing $v \oplus b(r)$.
Notice that the extraction strategy relies only on the ability to
simulate the zero-knowledge proof of membership in
step~\ref{step2}. See~\cite{BL} for a formal definition of
commit-with-extract schemes and for a full proof of
Protocol~\ref{prot:commitextract}.
\paragraph{Summary.}
Consider the system of zero-knowledge arguments of knowledge of
Protocol~\ref{prot:zkpok} where the commit-with-extract scheme that
is used is that of Protocol~\ref{prot:commitextract}. Then, it holds
that the {\em simulation}\/ of the combined protocol works by
simulating the zero-knowledge proof of membership of Part~2 that is
provided by $P$. Furthermore, the {\em extraction}\/ strategy of the
combined protocol works by simulating the zero-knowledge proof of
membership of Part~1 that is provided by $V$. Actually, both the
simulation and extraction strategies also involve other
instructions, as described above. However, these instructions do not
require any ``rewinding'' or involved simulation strategies; rather
they can be generated using one-pass black-box simulation
(specifically, these instructions can be generated independently of
the internal simulation of the subproofs and based solely on the
transcript of the other messages). Due to this fact, we have that if
the zero-knowledge subproofs can be simulated under {\em concurrent
composition}\/ (even with arbitrary other protocols), then both
simulation and extraction of Protocol~\ref{prot:zkpok} can be
carried out under {\em concurrent composition}\/ (even with
arbitrary other protocols). This important property is summarized in
the following informal lemma: \pagebreak
\BL {\em (informal lemma):}
Let $(P,V)$ be the protocol obtained by plugging the
commit-with-extract scheme of Protocol~\ref{prot:commitextract}
into Protocol~\ref{prot:zkpok}. Furthermore, assume that the
subproofs of membership \(in Protocol~\ref{prot:commitextract} and
in part~2 of Protocol~\ref{prot:zkpok}\) constitute zero-knowledge
proof or argument systems with negligible soundness error, even
when run concurrently with each other and arbitrary other
protocols. Then,
\BE
\item
$(P,V)$ is a zero-knowledge argument system, even when run
concurrently with itself and arbitrary other protocols.
\item
$(P,V)$ is an argument of knowledge with negligible knowledge
error, even when run concurrently with itself and arbitrary other
protocols. Furthermore, there exists a witness-extended emulator
for $(P,V)$ in this setting.
\EE
\label{lem:subproofs}
\EL
The proof of this lemma follows directly from the proofs that
Protocol~\ref{prot:commitextract} is a commit-with-extract
commitment scheme and that Protocol~\ref{prot:zkpok} is a system of
zero-knowledge proofs of knowledge, as shown in~\cite{BL}. A formal
version of Lemma~\ref{lem:subproofs} is stated and proved in the
Appendix.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Black-Box Bounded Concurrent ZK}
\label{sec:rk}
In this section we describe the concurrent zero-knowledge protocol
of Richardson and Kilian~\cite{RK}, with a certain setting of
parameters relating to the number of rounds. It has been shown
that the protocol of~\cite{RK} with a poly-logarithmic number of
rounds is (unbounded) concurrent zero-knowledge \cite{KP}. In
contrast, we extend the protocol to have $O(m\!\cdot\!\kappa(n))$
rounds, where $m$ is the maximum number of concurrent executions
and $\kappa(\cdot)$ is any super-constant function; i.e.,
$\kappa(n)=\omega(1)$.%
\footnote{Actually, we will assume that $\kappa(n)$ is at most
polynomial in $n$; therefore, it cannot be {\em any}\/
super-constant function.} %
We then show that this extended protocol is secure under
$m$-bounded concurrent composition. Although this is a far weaker
claim (i.e., requiring more rounds and achieving only bounded
concurrency), the simulation strategies of~\cite{RK,KP} are
problematic when the protocol is used as a module within another
larger protocol (as will be the case here). We therefore increase
the number of rounds and provide a {\em new}\/ simulation strategy
that also succeeds when the protocol is plugged into another
(larger) protocol.
Before presenting the actual protocol of~\cite{RK}, we describe it
on an informal level. The idea behind the protocol is to provide
the simulator with many opportunities to rewind the verifier. This
is achieved by having a preamble of many iterations, where
rewinding in just one of the iterations suffices for simulating
the entire proof. To demonstrate this idea, we consider the case
that the preamble consists of just one iteration. Then, the
protocol begins with the verifier sending a perfectly-hiding
commitment to a random string $r\rnd\bitset^n$. Next, the prover
sends a perfectly-binding commitment $c={\sf Commit}(0^n)$, after
which the verifier decommits to $r$. (Note that ${\sf
Commit}(0^n)$ is actually a randomized computation.) Finally, the
prover proves a zero-knowledge (or
witness-indistinguishable~\cite{FS90}) proof to the verifier that
either the statement in question is true, or that its commitment
$c$ equals ${\sf Commit}(r)$. Now, the honest prover will always
prove that the statement in question is true. Furthermore, due to
the fact that the verifier's commitment is perfectly-hiding, a
cheating prover has no information on $r$ when it sends $c$ to the
verifier. Therefore, except with negligible probability, it will
not be the case that $c={\sf Commit}(r)$. Thus, if the verifier is
convinced, the statement in question must be true (by reduction to
the soundness of the zero-knowledge or witness-indistinguishable
proof provided at the end). The fact that the proof is
zero-knowledge can be seen as follows: The simulator is able to
rewind the verifier after obtaining the decommitment value $r$.
Then, it can continue the proof by providing a commitment $c$ such
that indeed $c={\sf Commit}(r)$. In this case, the simulator can
prove the proof at the end using the fact that its commitment is
to $r$ and without having any witness to the fact that the
statement in question is true. In the actual proof system, this
preamble is repeated many times, presenting many opportunities for
rewinding the verifier. We now present the proof system:
\medskip
\BPR {\em (Protocol \RK\ -- concurrent ZK proof for a language $L$):}
\BI
\item {\bf Common input:} $x\in L$ and a security parameter $n$.
\item {\bf Auxiliary input to the prover:} a witness $w$ for $x\in L$.
\item {\bf Preamble length:} $\ell$
\item {\bf Part~1 -- the preamble:}
\BE
\item $P$ sends $V$ the first message of a two-round perfectly hiding
commitment scheme.
\item
$V$ chooses $\ell$ random strings
$r_1,\ldots,r_\ell\rnd\bitset^n$. Then, $V$ sends $P$
perfectly-hiding commitments to $r_1,\ldots,r_\ell$.
\item Repeat $\ell$ times \(for $j = 1,\ldots,\ell${\em ):}
\BE
\item $P$ computes a perfectly-binding commitment $c_j={\sf Commit}(0^n)$
and sends it to $V$.
\item $V$ decommits, revealing $r_j$.
\EE
\EE
\item {\bf Part~2 -- the proof:}
\BE
\item
$P$ and $V$ run any \(efficient-prover\) zero-knowledge or
witness-indistinguishable proof for $\NP$ for the following
statement:
\begin{quote}
Either $x\in L$ or there exists a $j$ $(1\leq j\leq \ell)$ such
that $c_j = {\sf Commit}(r_j)$.
\end{quote}
\EE
\EI
\label{prot:RK}
\EPR
\ni Let $\RK(\ell)$ denote the instantiation of Protocol \RK\ for
the case that the number of iterations in the preamble is $\ell$.
Now, as we have mentioned above, it has been shown that
$\RK(\tilde O(\log^2n))$ is unbounded concurrent
zero-knowledge~\cite{KP}. Below, we will prove that
$\RK(m\kappa(n))$ is $m$-bounded concurrent zero-knowledge. This
is a far weaker claim than that of~\cite{KP}. However, as we have
mentioned, we prove this in order to demonstrate our new
simulation technique, that enables us to later use \RK\ as a
subprotocol in another, different protocol. We remark that our
proof that $\RK(m\kappa(n))$ is $m$-bounded concurrent
zero-knowledge is far simpler than all known proofs for the
unbounded concurrency case. (Of course this is facilitated by the
fact that in the model of bounded concurrency we can make the
protocol depend on the number of concurrent executions.) Loosely
speaking, our simulation strategy works as follows. In general, it
is difficult to construct concurrent zero-knowledge protocols
because rewinding one execution may force the simulator to rewind
many other executions (that may even have already been
successfully simulated). The idea behind our simulation strategy
is to always rewind only the first execution that provides a
rewinding opportunity, and to let all other executions run their
course at this point. This strategy yields the result that the
rewinding in a given execution results in a potential loss of at
most one rewinding possibility for every other execution. Since
each protocol has more than $m$ rewinding possibilities and there
are only $m$ executions, we have that each execution can be
simulated (i.e., there is always guaranteed to be a ``spare''
rewinding point in each execution). The actual simulation is
slightly more complicated because we wish to obtain strict
polynomial-time simulation. Furthermore, later we will need to
simulate the zero-knowledge protocol when run together with other
protocols (in order to obtain our result for general secure
computation). This also adds complications, but is dealt with in a
similar way to the strategy shown here.
\BP
Let $\kappa(\cdot)$ be any super-constant function. Then, assuming
the security of the prescribed commitment schemes, Protocol\,
$\RK(m\kappa(n))$ is an $m$-bounded \(black-box\) concurrent
zero-knowledge proof system for the language $L$. Furthermore, the
zero-knowledge simulator runs in strict polynomial-time.
\label{prop:cZK}
\EP
\BPF
In order to prove this proposition, we need to show completeness,
soundness and $m$-bounded concurrent zero-knowledge. Completeness
follows immediately from the completeness of the zero-knowledge
proof of part~2. We proceed to prove soundness:
\medskip\ni{\bf Soundness:} By the soundness of the proof in part~2,
it suffices to show that the probability that there exists a $j$
such that $c_j={\sf Commit}(r_j)$ is negligible. However, this
follows immediately from the fact that $V$ commits using a
perfectly-hiding commitment scheme. Therefore, $P$ has no
information about $r_j$ when it generates its commitment $c_j$.
See more details in~\cite{RK}.
\medskip\ni{\bf \boldmath{$m$}-bounded concurrent zero-knowledge:}
We begin by presenting some notation and terminology. Denote by
$\Pi_k$ the $k^\th$ execution of the protocol and by $\Pi_k^j$ the
$j^\th$ iteration of the preamble in the $k^\th$ execution. We say
that iteration $\Pi_k^j$ {\sf opens} when the prover $P$ sends the
perfectly-binding commitment $c_j$ in $\Pi_k$, and that it {\sf
closes} when the verifier $V^*$ decommits to $r_j$ in $\Pi_k$. An
execution is said to have been {\sf satisfied} by the simulator
$S$ if $V$ aborts the execution (by say, refusing to decommit in
some iteration), or if there exists a $j$ such that $c_j={\sf
Commit}(r_j)$. Notice that once an execution has been satisfied,
the simulator can complete the simulation without any rewinding.
(In the case of an aborted execution, this is immediate. In the
case that $c_j={\sf Commit}(r_j)$, the simulator can use this fact
instead of a witness for $x\in L$ in part~2 of the proof.)
Therefore, the simulation strategy is basically for $S$ to satisfy
every execution before it reaches part~2 of the proof.
Loosely speaking, the simulator $S$ works by playing the honest
prover until the first iteration of an unsatisfied execution
closes. Assume that this iteration is $\Pi_k^j$, and let $r_j$ be
the string decommitted to by the verifier $V^*$. Then, $S$ rewinds
$V^*$ to the point in execution~$k$ that iteration~$j$ opened and
sends a new perfectly-binding commitment $c_j={\sf Commit}(r_j)$.
Next, $S$ continues forward (playing the honest prover) hoping
that once again $\Pi_k^j$ is the first iteration to close. If this
occurs and $V^*$ decommits, then with overwhelming probability
execution~$k$ is satisfied. (Execution~$k$ may not be satisfied in
the case that $V^*$ now decommits to a different string $r'_j\neq
r_j$. However, due to the computational binding of the commitment
scheme used by the verifier, this happens with only negligible
probability.) If $\Pi_k^j$ is not the first iteration to close,
then $S$ repeats the process again for a fixed polynomial number
of times. If success is not obtained in any of these rewinding
attempts, then $S$ abandons this attempt to satisfy the execution
and continues in the same manner as above. This strategy is used
for all executions, and we show that eventually, all executions
are satisfied except with negligible probability. This high-level
strategy is what also lies behind the simulators of~\cite{RK,KP}.
The novelty of our strategy is that the simulator will iteratively
fix the transcript throughout the simulation, thereby limiting the
rewinding (to the part not yet fixed). Thus, we obtain a strategy
which uses rewinding, but in a significantly more restricted way
than for previous simulators. This will be crucial for using the
zero-knowledge proof as a subprotocol. We now proceed to formally
define the simulator:
\medskip\ni{\sc The simulator $S$:}
We describe the simulation strategy of $S$ for the remaining set
of unsatisfied executions (initially this set contains all
executions). $S$'s high-level strategy is to satisfy an execution,
permanently fix the transcript until the point that the execution
was satisfied (without any further rewinding behind this point),
and then continue to satisfy another execution from that point on.
Before continuing, we introduce some terminology. We call the
point at which $S$ fixes the transcript a {\sf fixed
point}.%
\footnote{Formally, ``fixing the transcript'' means fixing the
prefix of all future oracle calls to $V^*$.} %
Furthermore, the {\sf current fixed point} is the last fixed point
to be set by $S$. Now, $S$ holds a list of unsatisfied executions
and attempts to satisfy an execution from this list without
rewinding behind its current fixed point. According to this
strategy, only iterations that were opened after the current fixed
point may be rewound. Thus, we call an iteration {\sf potential}
if it {\em opened}\/ after the current fixed point. Now, $S$
interacts with verifier $V^*(x,z,r)$ in a black-box manner,
playing the honest prover strategy for part~1 of the proof and
waiting for one of the following two events:
\BE
\item {\em $V^*$ aborts an unsatisfied execution:}
When this occurs, the execution is satisfied. Therefore, $S$ sets
a fixed point at the ``abort'' message, removes this execution
from the set of unsatisfied executions and continues as described
above. We note that an abort message is just any detectably
illegal message sent by $V^*$.
\item {\em A potential iteration closes:}
Let this iteration be $\Pi_k^j$ and let $r_j$ be the string
decommitted to by the verifier when $\Pi_k^j$ closed. Furthermore,
let $r_S$ be the random coins used by $S$ in the simulation since
the last fixed point was set. Now, $S$ rewinds $V^*$ back to the
point at which iteration $\Pi_k^j$ opened and sends $c_j={\sf
Commit}(r_j)$, instead of a commitment to $0^n$. Then, $S$
continues in the simulation with the hope that $\Pi_k^j$ will be
the first iteration to close again. We stress that $S$ uses fresh
random coins in this continuation (i.e., coins that are
independent from the coins used previously). Now, as we have
mentioned, $S$'s aim in this rewinding is to once again have
$\Pi_k^j$ be the first potential iteration to close. Therefore, if
$V^*$ aborts an execution or closes a different potential
iteration {\em before}\/ $\Pi_k^j$ closes, then $S$ rewinds back
to the point at which $\Pi_k^j$ opened and tries again (with new
random coins). $S$ attempts this rewinding $2mn^2$ times. If in
all of these attempts $\Pi_k^j$ does not close before another
event causing a fixed point occurs, then $S$ abandons this attempt
at satisfying $\Pi_k$. Instead, $S$ rewinds $V^*$ to the current
fixed point and replays the honest prover strategy using the
random coins $r_S$ that it used the first time that $\Pi_k^j$
closed. Then, $S$ fixes the transcript at the point that $\Pi_k^j$
closes and continues as above. In such a case, we say that the
fixed point set by $S$ is a {\sf failed fixed point}. If at any
point during the simulation it turns out that $\kappa(n)$ {\em
consecutive}\/ fixed points are failed, then $S$ outputs {\sf
failure} and halts.
Now, if in one of the $2mn^2$ rewinding attempts, $\Pi_k^j$ {\em
is}\/ the first iteration to close (without the other events
occurring), then $S$ does the following. Let $r_j'$ be the
decommitment of $V^*$ in the second time that $\Pi_k^j$ is the
first iteration to close. If $r_j'\neq r_j$, then $S$ outputs {\sf
failure} and halts. Otherwise, the execution is satisfied (because
$c_j={\sf Commit}(r_j)$. Having satisfied $\Pi_k$, simulator $S$
fixes the transcript at the point that $\Pi_k^j$ closes, removes
$\Pi_k$ from the set of unsatisfied executions and continues as
above.
\EE
In addition to the above, whenever part~2 of a satisfied (and
non-aborted) execution is reached, $S$ proves the proof using the
witness to the fact that $c_j={\sf Commit}(r_j)$ for some $j$. In
contrast, if part~2 of an {\em unsatisfied} execution is reached,
then $S$ outputs a special {\sf unsatisfied} symbol and halts. (In
such a case $c_j \neq {\sf Commit}(r_j)$ for every $j$, and so $S$
cannot prove the proof of part~2.) $S$ continues in this way until
$V^*$ concludes the interaction. This completes the description of
$S$.
\medskip\ni{\sc Proof of the simulation strategy:}
We first comment that the simulator runs in time that is
polynomial in $n$. This follows from the fact that the overall
number of rewinding attempts in all of the $p(n)\cdot m\kappa(n)$
iterations (over all executions) is at most $2mn^2 \cdot p(n)
\cdot m\kappa(n)=\poly(n)$. (Recall that $m=m(n)$ equals the
number of executions that are run simultaneously and $p(n)$ equals
the total number of executions.)
Next, we demonstrate that $S$ outputs a distribution that is
computationally indistinguishable from a real execution between
$P$ and $V^*$. We first bound the probability that $S$ outputs a
special {\sf failure} or {\sf unsatisfied} symbol.
\BCM The probability that $S$ outputs {\sf failure} is negligible.
\label{clm:failure}
\ECM
\BPF
There are two events that can cause $S$ to output {\sf failure}.
We separately show that each of these events can occur with at
most negligible probability:
\BE
\item
{\em Event 1 -- $V^*$ decommits to some $r_j' \neq r_j$ in one of
the rewinding attempts:} The fact that $V^*$ decommits to the same
$r_j$ before and after rewinding is due to the computational
binding of the commitment scheme. The proof of this is standard
and is therefore omitted. %(A similar argument has been shown in
%\cite[Claim 3]{GoKa}.)
%(A similar argument is shown in \cite[Claim 3]{GoKa}.)
%Next, we show that the probability that $V^*$ decommits to $q'
%\neq q$ is negligible by reducing this to the computational
%binding of the commitment scheme. We call such an event an
%``ambiguous opening''. Assume by contradiction that there exists a
%polynomial $p(\cdot)$ such that for an infinite series of input
%graphs $G$, simulator $S$ sees an ambiguous opening during the
%simulation. Then, we construct a machine $M$ that generates
%commitments and ambiguous openings as follows. $M$ receives the
%first message of a perfectly hiding commitment scheme and runs the
%same strategy as $S$, feeding $V^*$ this first message. $M$ then
%waits to see if an ambiguous opening occurs. If yes, $M$ outputs
%the appropriate commitment and its two decommitments. The only
%problem with this argument is that this $M$ runs in expected
%polynomial-time, whereas the security of the commitment scheme is
%with respect to strict polynomial-time machines. This is easily
%overcome by having $M$ truncate the simulation if it exceeds
%$q(t(n),p(n))$ for some fixed polynomial $q(\cdot)$, where $t(n)$
%is the expected running-time of $M$ on input $G$ ($n=|G|$). The
%resulting $M$ now runs in strict polynomial-time and succeeds for
%an infinite number of $n$'s with probability that is at most
%negligibly smaller than $1/p(n)$. In particular, for an
%appropriate choice of $q(\cdot)$, it succeeds with probability
%greater than $1/2p(n)$. This contradict the binding of the
%commitment scheme.
\item
{\em Event 2 -- $\kappa(n)$ consecutive fixed points are failed:}
In order to bound the probability that this occurs, we first
consider the probability that a single fixed point is failed. Let
$\Pi^j_k$ be a potential iteration that closed first, and let
$p^j_k$ be the probability that given the transcript until the
point that $\Pi_k^j$ opens, iteration $\Pi^j_k$ closes before any
other event causing a fixed point occurs. Then, there are two
possibilities:
\BE
\item $p^j_k \geq \frac{1}{mn}$:
In this case, the probability that $\Pi^j_k$ will {\em not}\/ be
the first to close in all of the $2mn^2$ rewinding attempts is
negligible. This can be seen as follows. Denote by $\tilde p^j_k$,
the probability that given the transcript until $\Pi_k^j$ opened,
iteration $\Pi^j_k$ is the first iteration to close during a
rewinding attempt. Now, $p^j_k$ and $\tilde p^j_k$ may differ
because the perfectly-binding commitment provided by $S$ before
rewinding is to $0^n$ and after rewinding is to $r_j$. However, by
the computational hiding of the commitment, this difference can be
at most negligible. (The formal reduction to breaking the hiding
of the scheme is standard and is therefore omitted.) It therefore
follows that $\tilde p^j_k > \frac{1}{2mn}$. In other words, the
probability that $\Pi^j_k$ is not the first to close in any given
rewinding attempt is less than $(1-\frac{1}{2mn})$. Therefore, the
probability that it is not the first to close in all of the
$2mn^2$ independent rewinding attempts is upper-bound by
$(1-\frac{1}{2mn})^{2mn^2} < e^{-n}$, which is negligible.
\item $p^j_k < \frac{1}{mn}$:
In this case, we cannot claim that with overwhelming probability,
$\Pi_k^j$ will be the first iteration to close again in at least
one of the rewinding attempts. Rather, we bound the probability
that a potential iteration $\Pi^j_k$ with $p^j_k < 1/mn$ will be
the first iteration to close {\em before}\/ any rewinding takes
place. Let $F$ denote the set of {\em potential}\/ iterations for
which the probability that the iteration is the first to close is
less than $1/mn$. Then, since there are at most $m$ potential
iterations between any two fixed points (at most one per
execution), we have that $|F| \leq m$. (The fact that every
unsatisfied execution can only have one potential iteration
between any two fixed points follows from the fact that as soon as
a potential iteration closes, a fixed point is set. Thus, a second
potential iteration can only open after the previous one closed,
meaning that a fixed point separates them.) Therefore, by the
union bound, the probability that an iteration from $F$ will be
the first to close is less than $1/n$.
\EE
Combining the above, we have that the probability of encountering
any single failed fixed point is less than $1/n + e^{-n} < 2/n$.
Now, by the simulation strategy, $S$ outputs {\sf failure} if
$\kappa(n)$ consecutive fixed points are failed. The probability
that this occurs can be bound as follows. For any given starting
fixed point, the probability that $\kappa(n)$ consecutive failed
fixed points are encountered from this starting point is less than
$(\frac{2}{n})^{\kappa(n)}$ (this is based on the above analysis
that bounds the probability of encountering a single failed fixed
point, given the previously fixed transcript, by $2/n$). There are
at most $m\kappa(n)$ starting fixed points, and so the overall
probability of encountering $\kappa(n)$ consecutive failed fixed
points is upper bound by $m\kappa(n)(\frac{2}{n})^{\kappa(n)}$,
which is negligible as long as $\kappa(n)$ is a super-constant
function of $n$ (e.g., $\kappa(n)=\log\log n$).
\EE
This completes the proof.
\EPF
\ni Next, we show that $S$ never outputs the special {\sf
unsatisfied} symbol.
\BCM The probability that $S$ outputs {\sf unsatisfied} equals $0$.
\label{clm:satisfy}
\ECM
\BPF
$S$ only outputs {\sf unsatisfied} in the case that it reaches
part~2 of the proof in an execution that it did not satisfy; let
$\Pi_k$ be this execution. We can assume that $S$ does not output
{\sf failure} (because if it did, then it would not output {\sf
unsatisfied}). This implies that there are at most $\kappa(n)-1$
consecutive failed fixed points and that for all other fixed
points, the execution for which the fixed point was set is
satisfied. Now, there are $m\kappa(n)$ iterations in $\Pi_k$'s
preamble. Furthermore, as we have mentioned, at most one iteration
of any unsatisfied execution, including $\Pi_k$, can be opened
between every two fixed points. Therefore, $S$ must have placed
$m\kappa(n)$ fixed points before part~2 of $\Pi_k$ is reached.
Since there are at most $\kappa(n)-1$ consecutive failed fixed
points, by a pigeon-hole argument we have that there must be $m$
{\em non-failed}\/ fixed points that were placed by $S$. However,
a non-failed fixed point is only placed when $S$ satisfies an
execution. Therefore, $m$ different executions were satisfied
between the time that $\Pi_k$ started and finished. Since the
setting here is of $m$-bounded concurrency, there can be at most
$m\!-\!1$ other executions that run simultaneously with $\Pi_k$.
We therefore conclude that $\Pi_k$ must also have been satisfied.
\EPF
\ni Having established that $S$ outputs a special symbol with at
most negligible probability, we are ready to prove that the
simulation is successful. That is,
\BCM
Let $m=m(n)$ be a polynomial. Then, for every non-uniform
probabilistic polynomial-time verifier $V^*$ who schedules the
messages according to $m$-bounded concurrency and every polynomial
$p(\cdot)$,
$$
\left\{\langle P,V^*(z,r)\rangle(\ov x)\vphantom{S^{V^*}}\right\}
_{n\in\N;\ov x\in L_n^{p(n)};z,r\in\bitset^*} \indist
\left\{S^{V^*(\ov x,z,r)}(\ov x)\right\}_{n\in\N;\ov x\in
L_n^{p(n)};z,r\in\bitset^*}
$$
where $L_n = L \cap \bitset^n$.
\label{clm:indist}
\ECM
\BPF
We assume that $S$ does not output {\sf failure} or {\sf
unsatisfied}. This is fine because these events happen with at
most negligible probability. Next, consider the following hybrid
experiment. Let $M^{V^*}$ be a machine who is given a witness to
the fact that $x\in L$ and follows the strategy of $S$ exactly
with the following exception. When $M$ reaches part~2 of the proof
for a satisfied execution, it uses its knowledge of the witness to
prove the proof in the same way as the honest prover, rather than
using the fact that $c_j={\sf Commit}(r_j)$ for some $j$.
We first claim that the distribution generated by $M^{V^*}$ is
computationally indistinguishable from the distribution generated
in a real execution between $P$ and $V^*$. Intuitively, the only
difference between the transcripts is regarding the prover
commitments, which are to $0^n$ in a real execution and to $r_j$
in the simulation by $M^{V^*}$. Therefore, the
indistinguishability is due to the hiding property of commitments.
More formally, consider the following two experiments with a
probabilistic polynomial-time distinguishing machine $D$ and a
perfectly-binding commitment scheme {\sf Commit}. In the first
experiment, $D$ adaptively chooses $n$-bit values $r_1,r_2,\ldots$
and sends these values to an oracle that returns $c_j={\sf
Commit}(r_j)$ for each $j$. Note that $D$ chooses the $r_j$ values
adaptively and therefore $r_j$ is chosen after
$c_1,\ldots,c_{j-1}$ have been received. In the second experiment,
$D$ does the same; however, for each oracle query it receives back
a commitment $c_j={\sf Commit}(0^n)$. (Of course, all commitments
are generated with independent random coins.) By the hiding
property of the commitments, these two experiments are
indistinguishable; i.e., every such $D$ outputs~1 in both
experiments with negligibly close probability. (This follows from
a standard hybrid argument.) Now, let $\tilde M^{V^*}$ be a
machine who participates in the above experiments (playing the
role of $D$). Machine $\tilde M^{V^*}$ works exactly like
$M^{V^*}$ except that instead of generating a prover-commitment to
a string $r_j$ by itself, it queries its oracle with $r_j$ and
uses the response $c_j$ as if it is a commitment to $r_j$. (Note
that we abuse notation here and ignore the fact that $r_j$ can be
from any iteration and any execution.) Now, on the one hand, if
$\tilde M^{V^*}$ is involved in the first experiment (thereby
receiving commitments to $r_j$), then it generates {\em exactly}\/
the same distribution as $M^{V^*}$. On the other hand, if $\tilde
M^{V^*}$ is involved in the second experiment (thereby receiving
commitments to $0^n$), then we claim that it generates a
distribution that is statistically close to a real execution
between $P$ and $V^*$. The fact that the distribution is
statistically close to a real execution requires justification
because $\tilde M^{V^*}$ rewinds $V^*$. Intuitively, this holds
because in both cases, the commitments received by $V^*$ are to
$0^n$; furthermore, the rewinding carried out by $\tilde M^{V^*}$
is such that it does not skew the distribution of the transcript
in any way. This is formally shown by induction over the fixed
points. The base case refers to an empty transcript (this is as if
a fixed point is placed before the simulation begins). Now, assume
that indistinguishability holds until the $i^\th$ fixed point is
set; we now prove for the $i\!+\!1^\th$ fixed point. In order to
prove this, first note that the setting of a fixed point can be
viewed as the following two-step process:
\BE
\item
Sample the event that causes the next fixed point to be set in a
real execution. There are at most $2$ such events that can occur
for every execution: 1 event for the case that the verifier
aborts, and 1 event for the case that a potential iteration is the
first to close.
\item Given the event that causes the next fixed point to be set,
sample from all real transcripts that result in this event
occurring.
\EE
The above ``sampling'' is according to the distribution defined by
$V^*(x,z,r,t)$, where $x,z,r$ are $V^*$'s common-input,
auxiliary-input and random-tape, respectively, and $t$ is the
transcript up to the $i^\th$ fixed point. Note that the
distribution in question is dependent upon the prover's coins only
($V^*$'s coins $r$ are fixed here). Now, if $\tilde M^{V^*}$ does
not carry out any rewinding in setting the $i\!+\!1^\th$ fixed
point, then the portion of the transcript between the $i$ and
$i\!+\!1^\th$ fixed point that is obtained, is distributed {\em
exactly}\/ like the transcript of a real execution between $P$ and
$V^*$. (Recall that in this case $\tilde M^{V^*}$ sends
commitments to $0^n$ (just like the real prover) and furthermore,
it uses a ``real witness'' for proving the proof of part~2.) We
note that although the transcript is sampled directly, it
implicitly follows the above described two-step process. Next,
consider the case that rewinding is carried out by $\tilde
M^{V^*}$. In this case, there are two possibilities. First,
$\tilde M^{V^*}$ may output {\sf failure}; if this happens, then
the resulting transcript is different from in a real execution.
However, since this happens with at most negligible probability
(by Claim~\ref{clm:failure}), this causes at most a negligible
difference. In contrast, if $\tilde M^{V^*}$ does not output {\sf
failure}, then the transcript that is output is either the same
one as before rewinding (as when a failed fixed point is set), or
the last one after rewinding (if $\tilde M^{V^*}$ succeeds in
learning $r_j$). However, both of these transcripts are uniformly
chosen from the set of transcripts in which the potential
iteration in question is the first to close (i.e., the sampling is
done exactly according to the distribution over all transcripts,
conditioned on the event that causes the fixed point to be set).
It follows that the transcript generated by $\tilde M^{V^*}$ in
the second experiment is {\em statistically close}\/ to a real
transcript between $P$ and $V^*$ (the only difference is in the
case that {\sf failure} is output). In contrast, as shown above,
the transcript generated by $\tilde M^{V^*}$ in the first
experiment is exactly the same as by $M^{V^*}$. Therefore, by the
indistinguishability of the two experiments, we conclude that the
transcript distribution generated by $M^{V^*}$ is computationally
indistinguishable from the transcript generated in a real
execution.
Next, we consider the difference between the distribution
generated by $M^{V^*}$ and the distribution generated by $S$. The
difference between these distributions is with respect to the
witness used for proving part~2 of the proofs. Specifically, $M$
always uses the ``real'' witness for the statement being proved,
whereas $S$ always uses the witness that $c_j={\sf Commit}(r_j)$.
Thus, the indistinguishability of the distributions reduces to the
zero-knowledge (or actually witness indistinguishability) property
of the proof in part~2 of the protocol. (We also rely here on the
fact that witness indistinguishability is preserved under
concurrent composition~\cite{FS90}.) This reduction is
straightforward and is therefore omitted.
\EPF
This completes the proof of Proposition~\ref{prop:cZK}.
\EPF
\BR {\bf -- on the length of the preamble.} {\rm The number of
iterations in the preamble is chosen so that all executions are
guaranteed to be satisfied before part~2 of the proof is reached;
this is demonstrated in Claim~\ref{clm:satisfy}. Specifically, it
holds that the number of iterations needs to be greater than or
equal to $\kappa(n)$ times the number of {\em non-failed fixed
points}\/ that are placed by the simulator. In the context of
bounded concurrent zero-knowledge, the number of non-failed fixed
points equals the number of concurrent executions that take place.
We stress that we can extend the preamble to be of any polynomial
length (greater than or equal to $m\kappa(n)$), and the number of
non-failed fixed points required remains $m$ only. This will
become important when Protocol~\RK\ is used for obtaining
bounded-concurrent secure two-party computation.}
\label{remark:length}
\ER
\BR {\bf -- reducing the length of the preamble.} {\rm
We also remark that the length of the preamble can be reduced to
$m$, rather than $m\kappa(n)$ by employing an {\em expected
polynomial-time}\/ simulation strategy. The difference in the
simulator is that when a potential iteration closes first, the
verifier is rewound repeatedly until that same iteration closes
first again. (This is in contrast to the above-described strategy
where at most $2mn^2$ rewinding attempts are made.) We note that
this must be done carefully in order to make sure that the
simulator remains expected polynomial-time; techniques for such a
simulation can be found in~\cite{GoKa}. The reason that we use
strict polynomial-time simulation (at the expense of additional
rounds) is due to the fact that the zero-knowledge proof is used
as a subprotocol in a larger protocol that utilizes ``ideal
calls'' to zero-knowledge. Furthermore, this larger protocol has
been proven secure for strict polynomial-time adversaries only.
Now, if our zero-knowledge simulation ran in expected
polynomial-time, then we would encounter the following problem. In
order to prove the security of our entire protocol (i.e., the
larger protocol combined with our zero-knowledge subprotocol), we
first simulate the zero-knowledge proofs, thereby obtaining an
adversary for the larger protocol in a setting with ideal
zero-knowledge calls. Next, we derive security by applying the
proof of security for the larger protocol. Now, if the simulation
of the zero-knowledge proof takes expected polynomial-time, then
we would obtain an expected polynomial-time adversary for the
larger protocol. However, as we have mentioned, this protocol has
only been proven secure for strict polynomial-time adversaries.
The proof of security would therefore not go through.}
\ER
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Non-Black-Box Bounded Concurrent ZK}
\label{sec:boaz}
In this section, we describe the bounded concurrent zero-knowledge
protocol of Barak~\cite{B}. The high-level outline of the
construction of~\cite{B} is as follows. In the first part of the
protocol, the prover and verifier run a ``generation protocol''
with the following property. A simulator is able to obtain some
trapdoor information from the verifier during the generation
protocol (this trapdoor will be used later on in the simulation).
In contrast, in a real execution between the prover and verifier,
the prover has only a negligible chance of obtaining this trapdoor
information. Then, the second part of the zero-knowledge protocol
is designed such that given the trapdoor information, it is
possible to complete the proof (without knowing any witness to the
statement being proved). Thus, the simulator works by obtaining
the trapdoor information in the first part and then using it to
complete the proof in the second part. In contrast, in a real
interaction the prover will not obtain the trapdoor information
and thus its proof must be based on the validity of the statement
being proved.
In this section, we will only describe the generation protocol of
the first part of the protocol of~\cite{B}. This suffices for our
purposes here. For details regarding the second part, which uses
universal arguments, see~\cite{B,BG}.
\paragraph{Easy-hard relations.} The idea behind an easy-hard
relation is that in a real interaction between the prover and
verifier, it is hard for the prover to ``hit'' the relation. In
contrast, it is easy for the simulator to ``hit'' the relation.
Thus, such a relation can be used for part~1 of the proof as
described above. In the definition below, we refer to ${\sf
Ntime}(f(n))$ relations; a relation $R$ is said to be in ${\sf
Ntime}(f(n))$ if there exists a non-deterministic Turing machine
that upon input $(x,y)$, runs for at most $f(|x|)$ steps and
outputs~1 if and only if $(x,y)\in R$. We now present the
definition:
\BD {\em (easy-hard ${\sf Ntime}(n^{\log\log n})$ relations):}
Let $R \subseteq \bitset^* \times \bitset^*$ be an ${\sf
Ntime}(n^{\log\log n})$ binary
relation. We say that $R$ is an {\sf easy-hard relation}%
\footnote{Such a relation is called {\sf interactive strong
easy-hard} in \cite{B}. For the sake of brevity, we drop
``interactive'' and ``strong'' from the name.} %
if there exist two interactive probabilistic polynomial-time {\sf
generating} algorithms $P$ and $V$ that satisfy the following two
properties:
\BE
\item {\em Hardness:}
For any \(non-uniform\) probabilistic polynomial-time machine
$P^*$, if $\tau$ is the transcript of the interaction of $P^*$ and
$V$ on input $1^n$, then the probability that $P^*$ outputs
$\sigma$ such that $(\tau,\sigma)\in R$ is negligible in $n$.
\item {\em Easiness:}
There exists a polynomial-time computable transformation that
takes any \(non-uniform\) probabilistic polynomial-time machine
$V^*$ and outputs a \(non-uniform\) probabilistic polynomial-time
simulator $S^*$ such that $S^*$ outputs a pair $(v,\sigma)$ and:
\BE
\item The first string $v$ is computationally indistinguishable
from the actual view of\, $V^*$ when interacting with $P$. \(The
view of a party consists of the contents of its tapes and the
messages that it receives.\)
\item Let $\tau$ be the transcript that is derived from $v$ \(note
that the transcript of an execution can be derived
deterministically from either party's view\). Then, except with
negligible probability, $(\tau,\sigma)\in R$.
\EE
\EE
The pair $(P,V)$ is called a {\sf generation protocol for $R$}.
\label{def:easy-hard }
\ED
The witness $\sigma$ for $\tau$ is the trapdoor information that
was mentioned in the motivating discussion above. The ``hardness''
requirement says that a real prover will not be able to obtain
such a $\sigma$; whereas the ``easiness'' requirement says that
the simulator will obtain a $\sigma$ as required. In the second
part of the argument system of \cite{B}, the prover essentially
proves that either the original statement is true or that it knows
$\sigma$ from the generation protocol.
We remark that we have chosen to denote the generating algorithms
by $P$ and $V$ as they are played by the prover and verifier in
the zero-knowledge protocol. However, this notion can be defined
independently of zero-knowledge (and indeed, the above is
independent of the statement being proved).
Barak showed that assuming the existence of hash functions that
are collision-resistant against $n^{\log \log n}$-time
adversaries, it suffices to prove the existence of an ${\sf
Ntime}(n^{\log\log n})$ easy-hard relation in order to derive
zero-knowledge. Furthermore, if the hardness and easiness
properties hold in the bounded concurrent model, then bounded
concurrent zero-knowledge is obtained \cite{B}. We note that the
hardness assumption regarding the hash functions has been reduced
so that it suffices for them to be collision resistant against any
polynomial-time adversary \cite{BG}. However, this requires some
special properties in the construction of the easy-hard relation
as well. We ignore these technicalities and just remark that the
weaker hardness assumption suffices for the results presented
here. We now proceed to describe the easy-hard relation
of~\cite{B} for achieving bounded concurrent zero-knowledge.
\paragraph{The Barak easy-hard relation:} The main idea behind the
relation is to have the transcript $\tau$ contain a ``proof'' that
$P$ knows the next message function of $V$. The hardness property
follows from the fact that $P^*$ cannot know $V$'s next message
function (and in particular, $P^*$ cannot guess $V$'s
random-tape). In contrast, the simulator $S^*$ {\em does}\/ hold
$V^*$'s next message function and can therefore succeed in
generating this ``proof''. Of course, the only question remaining
is what does it mean for $P$ to ``prove'' that it holds the next
message function of $V$? This was solved by \cite{B} in the
following way: $P$ sends $V$ a commitment $c$ to a machine $M$,
and $V$ replies with some random string $r$. Then, we say that $P$
knows $V$'s next message function if $M(c)=r$ (i.e., when applying
the committed machine $M$ to the commitment value $c$, the output
is exactly the message sent by $V$ upon receiving the commitment
value $c$). The key point behind the hardness is that if $r$ is of
high enough entropy, then $P^*$ cannot succeed in committing to
the correct $M$. In contrast, since the simulator $S^*$ actually
holds the code of $V^*$, it can just set $M=V^*$ (including the
contents of all of $V^*$'s tapes). Then, of course, it holds that
$M(c)=V^*(c)$ and so the simulator indeed commits to the correct
next message function.
The above works for the stand-alone case. However, in the bounded
concurrent model, $V^*$ will not necessarily respond immediately
with $r$. In particular, it may send messages belonging to other
executions first. Therefore, it will not hold that $M(c)=r$. The
solution to this problem is to say that $P$ holds the next message
function if there exists a {\em ``short''}\/ string $y$ such that
$M(c,y)=r$. By ``short'', we mean that $y$ should be at least $n$
bits shorter than the random string $r$ sent by $V$. The length of
$r$ is fixed to be some polynomial $\ell(n)$, where $\ell(n)$
bounds the length of {\em all}\/ messages received by $V$ during
all the concurrent executions. We now describe why this fulfills
the requirements of being an easy-hard relation.
\BI
\item {\em Hardness:} In order for a cheating $P^*$ to succeed, it
must be that $M(c,y)=r$. However, $M$ and $c$ are fixed before $V$
chooses $r$. Furthermore, $|r| \geq |y|\!+\!n$. Therefore, the
Kolmogorov complexity of $r$ is too high for it to be predicted by
$y$, and $P^*$ can ``hit'' the relation with at most negligible
probability.
\item {\em Easiness:}
As in the stand-alone case, the simulator $S^*$ commits to
$M=V^*$. However, here $S^*$ must also determine the string $y$.
This string $y$ is set to be all the $P$-messages sent by $S^*$
from the time that $S^*$ committed to $M$ (in this execution)
until the time that $V^*$ replies with $r$ (in this execution).
Since $M=V^*$, it turns out that indeed $M(c,y)=r$. Notice also
that since the total length of all $P$-messages in all concurrent
execution is less than $\ell(n)$, it is possible for $S^*$ to
define $y$ in this way. Thus, the ``easiness'' requirement holds.
We note that it doesn't matter how $y$ is generated, as long as
the relation is fulfilled.
%We note that it is actually possible to extend this simulation to
%the case that $V^*$ obtains some of its messages from an external
%source (say, an oracle). Furthermore, the simulation works even
%though $S^*$ is only given access to $V^*$'s code, and not the
%oracle. This holds as long as the message $r$ is actually
%generated by $V^*$. In order to see this, note that it doesn't
%matter how $V^*$ responds to the messages that form part of $y$;
%it is only important how it responds to the entire $y$, and this
%is ``committed'' to by $S^*$. We will use this property later on.
\EI
For more details, see \cite{B}. The generation protocol and
easy-hard relation of Barak are as follows:
\BCT {\em (Barak generation protocol for an easy-hard relation $R$):}
\BI
\item {\bf Common input:} $1^n$
\item {\bf Length parameter:} $\ell(n)$
\item {\bf The protocol:}
\BE
\item $V$ chooses $h\rnd\bitset^n$ \(where $h$ defines a collision-resistant
hash function with range in $\bitset^n$\) and sends it to $P$.
\item $P$ sends $V$ a commitment $c={\sf Commit}(0^n)$.
\item $V$ chooses $r\rnd\bitset^{\ell(n)}$ and sends $r$ to $P$.
\EE
\EI
\label{const:easy-hard}
\ECT
We now define the easy-hard relation $R$ for which the above is a
generation protocol. Loosely speaking, the relation consists of
pairs $(c,r)$ and $(M,y)$ such that $c={\sf Commit}(M)$ and
$M(c,y)=r$. In other words, the relation is ``hit'' if the prover
succeeds in committing to a machine that outputs the verifier's
response to the commitment itself (as described in the motivation
above). We note that the actual relation also includes $h$ (the
hash function from the protocol) and $s$ (the random coins used in
generating the commitment $c$ to $M$). Formally, let $R$ be the
relation of pairs $\{((h,c,r),(M,s,y))\}$ for which the following
holds:
\BE
\item $M$ is a program of length at most $n^{\log \log n}$ where
$n \eqdef |h|$.
\item $c$ is a commitment to a hash of $M$ using coins $s$. That
is, $c = {\sf Commit}(h(M);s)$. Note that $|h(M)|=n$.
\item $\U(M,c,y)$ outputs $r$ within $n^{\log\log n}$ steps, where $\U$
is a universal Turing machine.
\item $|y|\leq |r|-n$.
\EE
\ni Barak has shown that Construction~\ref{const:easy-hard} is a
generation protocol in the bounded concurrent model for the
easy-hard relation $R$, as long as $\ell(n)$ (that determines the
length of $r$) fulfills the following requirement: {\em The total
length of all messages sent by $P$ between the time that $P$ sends
the commitment $c$ and $V$ replies with $r\rnd\bitset^{\ell(n)}$
is less than $\ell(n)-n$.} Thus, both the soundness and
zero-knowledge properties of the protocol of~\cite{B} hold as long
as this condition is fulfilled.
We note that the length of the messages sent by $P$ in part~2 of
the zero-knowledge protocol of~\cite{B} can be bound by $n^2$.
Since $P$ only sends $n$ bits in the generation protocol, we can
bound the total size of all $P$-messages in the protocol
of~\cite{B} by $n^2+n$. Therefore, for $m$ concurrent executions,
one can set $\ell(n)=2mn^2$ and it is guaranteed that $r$ is long
enough.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{A Special-Purpose Composition Theorem}
\label{sec:compose}
In this section, we consider a model where the parties run a real
protocol that uses ideal calls to a zero-knowledge proof of
knowledge functionality (computed by a trusted third party). We
then present a composition theorem that demonstrates that in the
setting of $m$-bounded concurrency, a protocol in this model can
be transformed into a different protocol for the real model (with
no trusted party), such that the output distributions from the two
protocols are essentially the same. This composition theorem is
the central tool in our construction of $m$-bounded concurrent
secure two-party computation.
\subsection{The ZK-Hybrid Model}
In general, a {\em hybrid model}\/ is one where parties send
protocol messages to each other (like in the real model), but also
have access to a trusted party that computes some functionality
(like in the ideal model). Thus, this model is a hybrid of the
real and ideal models. A protocol that is designed for a hybrid
model contains two types of messages: {\sf real messages} that are
sent directly between the parties, and {\sf ideal messages} that
are sent between the parties and the trusted third party.
In the ZK-hybrid model, the parties have access to a trusted party
who computes the following ideal zero-knowledge functionality:
$$
((v,w),\lambda) \mapsto (\lambda,(v,R(v,w)))
$$
where $R$ is an NP-relation. That is, the first party (the prover)
sends a statement $v$ and a witness $w$ to the trusted party
computing the functionality. The second party (the verifier) then
receives the statement $v$ and a bit $b$ signalling whether or not
the prover provided a valid witness for $v$. We note that this
functionality is actually a proof of knowledge because the prover
must provide an actual witness to the trusted party. In order to
model a situation where the verifier may reject a proof of even a
true statement, we define a special symbol $\bot$ such that for
{\em every}\/ relation $R$ and every $v$, it holds that
$(v,\bot)\notin R$. Thus, upon receiving $(v,\bot)$, the trusted
party always sends $(v,0)$ to the verifier.
Let $p(n)$ be a polynomial and let $\ov x,\ov y \in
(\bitset^n)^{p(n)}$ and $z\in\bitset^*$. Then, we denote by
$\zkhybrid^m_{\Pi,\A}(\ov x,\ov y,z)$ the output distribution of
$p(n)$ concurrent executions of protocol $\Pi$ in the {\rm
ZK-hybrid} model, where the scheduling is according to what is
allowed in the model of $m$-bounded concurrency. We stress that
the only difference between this model and the real model, is that
the protocol $\Pi$ also contains ideal messages which are dealt
with by the trusted party who computes the above zero-knowledge
functionality.
\subsection{Motivation for the Composition Theorem}
Informally speaking, the composition theorem states that in the
setting of $m$-bounded concurrent composition, a real-model
protocol $\Pi$ can be constructed from any ZK-hybrid model
protocol $\Pi'$ so that the following holds: for any real model
adversary $\A$ running $\Pi$ there exists a ZK-hybrid model
adversary $\A'$ running $\Pi'$ so that the output distributions in
both cases are essentially the same. In other words, the ideal
zero-knowledge calls can be replaced by a real protocol, and the
result is the same. Jumping ahead, this basically means that it
suffices to obtain a secure protocol for the ZK-hybrid model, and
we can infer the existence of a secure protocol for the real
model. As we will see in Section~\ref{sec:final-prot}, such
protocols already exist, and so our goal is fulfilled.
The basic idea behind the theorem is to construct $\Pi$ from
$\Pi'$ by replacing the ideal calls to the zero-knowledge
functionality with concurrent zero-knowledge protocols. However,
two main problems arise when attempting to implement such a
strategy:
\BE
\item {\em Malleability$\!\!$ }/{\em Soundness:}
Assume that in protocol $\Pi'$, both parties prove zero-knowledge
proofs (as is indeed the case for all known protocols for secure
two-party computation). Then, we must ensure that the adversary
cannot maul the zero-knowledge proof of the honest party in order
to cheat. On a technical level this problem arises because in the
hybrid model, the zero-knowledge proofs are simulated, and such
simulation essentially constitutes ``cheating''. Therefore, if the
adversary could cheat by mauling the simulated proofs, this would
mean that the result of a real execution (where the adversary
cannot cheat) and a hybrid execution (where it can) would be
different. In short, the problem here is of soundness and we
remark that a naive instantiation of the ideal calls with
zero-knowledge protocols would definitely suffer from this
problem.
\item {\em Simulation under concurrency with other protocols:}
In the setting of concurrent zero-knowledge, the protocol in
question is run concurrently to itself only; that is, it runs in the
context of {\em self-composition}\/ only. In contrast, here the
zero-knowledge protocol is run concurrent to itself {\em and}\/ to
other protocols, as in the case of {\em general composition}.
Therefore, one cannot automatically claim that the zero-knowledge
property holds here. In fact, rewinding techniques (used in all
black-box zero-knowledge protocols) are very problematic in this
context. In order to see this, notice that the natural way to
construct the ZK-hybrid model adversary $\A'$ from the real model
adversary $\A$ is to have $\A'$ simulate the zero-knowledge proofs
for $\A$, while all the other messages (i.e., the messages from the
ZK-hybrid model protocol $\Pi'$) are forwarded to the external
honest party. This causes a problem because messages from the
zero-knowledge proofs in some executions may be interleaved with
messages of $\Pi'$ in other executions. Since the messages of $\Pi'$
are forwarded by $\A'$ to an external party, $\A'$ cannot rewind
$\A$ at these points. This may conflict with the necessity to rewind
in order to simulate the zero-knowledge protocol (if the simulation
strategy is indeed based on rewinding).
\EE
We solve the first problem by using different zero-knowledge
protocols so that the adversary cannot maul one protocol in order
to cheat in the other. Specifically, we have one of the parties
prove statements with the non-black-box zero-knowledge protocol of
\cite{B} (see Section~\ref{sec:boaz}), while the other party
proves statements with the black-box zero-knowledge protocol of
\cite{RK} (see Section~\ref{sec:rk}). Loosely speaking, it turns
out that these protocols are both non-malleable (or more
accurately, simulation-sound) with respect to each other. That is,
both of the protocols remain sound even if the adversary proves
one protocol while simultaneously receiving a {\em simulated}\/
proof of the other. We now informally (and inexactly) explain why
this holds. If we simulate the protocol of~\cite{RK}, this makes
no difference to the soundness of the protocol of~\cite{B} because
no rewinding is carried out over the messages of~\cite{B} while
simulating~\cite{RK}. (This is guaranteed by taking the number of
rounds in the protocol of~\cite{RK} to be large enough, as we show
below.) Thus, if it is possible to cheat in this setting, it is
possible to cheat in a stand-alone execution of the protocol
of~\cite{B} (by running the simulation of~\cite{RK} internally).
On the other hand, if we simulate the protocol of~\cite{B}, then
as above the simulation of~\cite{B} can be carried out internally,
while a single execution of~\cite{RK} is carried out {\em
externally}. This means that if an adversary could cheat in this
setting, it is possible to cheat in a stand-alone execution
of~\cite{RK}. One point worth noting here is that the simulation
of the protocol of~\cite{B} {\em can}\/ be carried out, even
though some messages are generated externally. The reason for this
is that the string $y$ provided by the simulator can be generated
in any way, and in particular, can contain messages obtained
externally. See Section~\ref{sec:boaz}, just before
Construction~\ref{const:easy-hard}, for an explanation.
The second problem of concurrency with other protocols is solved
as follows. First, the simulation strategy for the protocol of
\cite{B} does not use rewinding. Therefore, no problem arises. (We
remark that there are parameters for the protocol of \cite{B} that
must be modified in this scenario, but this is reasonably
straightforward.) In contrast, the simulation strategy of the
protocol of \cite{RK} does use rewinding, which as we have
described is problematic here. We solve this problem by using the
simulation strategy demonstrated in Section~\ref{sec:rk}. Recall
that this strategy is almost ``straight-line''. That is, the
simulator deals sequentially with each execution and places fixed
points behind which it never needs to rewind. The simulation
succeeds as long as the number of iterations in the preamble is
greater than or equal to the number of non-failed fixed points
that need to be placed. Thus, we increase the length of the
preamble so that the simulator can place a fixed point for every
$\Pi'$-message that is sent. Then, since rewinding only takes
place between fixed points, this ensures that no rewinding behind
a $\Pi'$-message is necessary. This strategy is based on the
methodology used by \cite{GL} to solve the problem of simulating
zero-knowledge while external messages may be sent.
\subsection{The Composition Theorem}
\label{sec:composition-theorem}
In this section we show that any protocol $\Pi'$ that runs in the
ZK-hybrid model can be transformed into a protocol $\Pi$ in the
real model. The transformation is such that any real-model
adversary $\A$ for $\Pi$ can be simulated by a ZK-hybrid model
adversary $\A'$ for $\Pi'$ in the setting of $m$-bounded
concurrency. We remark that the protocol $\Pi$ depends both on
$\Pi'$ and on the concurrency bound~$m$.
\BT Let\, $\Pi'$ be a two-party protocol for the {\em ZK-hybrid model}
and let $m(\cdot)$ be a polynomial. Then, assuming the existence
of enhanced trapdoor permutations and collision resistant hash
functions, there exists a protocol\, $\Pi$ for the real model with
the following property. For every non-uniform probabilistic
polynomial-time adversary $\A$ running\, $\Pi$ in the real model,
there exists a non-uniform probabilistic polynomial-time adversary
$\A'$ running\, $\Pi'$ in the {\rm ZK-hybrid} model such that for
every polynomial $p(n)$,
$$
\left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\}_{n\in\N;\ov x,\ov y \in(\bitset^n)^{p(n)};
z\in\bitset^*} \indist
\left\{\vphantom{0^{0^0}}\real^m_{\Pi,\A}(\ov x,\ov
y,z)\right\}_{n\in\N;\ov x,\ov y \in(\bitset^n)^{p(n)};
z\in\bitset^*}
$$
\label{thm:compose}
\ET
\BPF
We begin by showing how to construct $\Pi$. Denote the
participating parties by $P_1$ and $P_2$. Then, both $P_1$ and
$P_2$ may ``prove'' zero-knowledge proofs in $\Pi'$ (recall that
in $\Pi'$ this involves interaction with the trusted party only).
Protocol $\Pi$ is obtained by {\em replacing}\/ all the ideal
zero-knowledge calls of $\Pi'$ with the zero-knowledge arguments
of knowledge of~\cite{BL}, described in Section~\ref{sec:zkpok}.
We denote this protocol by \POK. Recall that in this system of
arguments of knowledge, both the prover and verifier play the role
of prover in zero-knowledge proofs of {\em membership}; we call
these the {\sf subproofs}. Now, all the zero-knowledge subproofs
in which $P_1$ is the prover are proven using the black-box
concurrent zero-knowledge protocol of~\cite{RK}, described in
Section~\ref{sec:rk}; denote this protocol by \RK. Furthermore,
all the zero-knowledge subproofs in which $P_2$ is the prover are
proven using the concurrent zero-knowledge protocol of~\cite{B},
described in Section~\ref{sec:boaz}; denote this protocol by
\BZK. That is, when $P_1$ plays the prover in the zero-knowledge proof
of knowledge \POK, then $P_2$ uses Protocol \BZK\ when proving its
subproof from the commit-with-extract scheme and $P_1$ uses
Protocol \RK\ when proving its subproof in part~2 of \POK.
Conversely, when $P_2$ plays the prover in the zero-knowledge
proof of knowledge \POK, then $P_1$ uses Protocol \RK\ when
proving its subproof from the commit-with-extract scheme and $P_2$
uses Protocol \BZK\ when proving its subproof in part~2 of \POK.
{\em The important thing to remember is that all the subproofs in
which $P_1$ plays the prover use Protocol\, \RK, and all the
subproofs in which $P_2$ plays the prover use Protocol\, \BZK.} We
note that the proof of knowledge \POK\ and the subproofs \RK\ and
\BZK\ can all be implemented assuming the existence of enhanced
trapdoor permutations and collision resistant hash functions.
Before continuing, we formally define what it means to ``replace''
an ideal zero-knowledge call with protocol \POK:
\BI
\item {\em Prover:}
If a party is instructed in $\Pi'$ to send $(v,w)$ to the trusted
party computing the zero-knowledge functionality, then it plays
the prover in the \POK\ zero-knowledge proof of knowledge
protocol, using the witness $w$ that it has. The messages that it
sends and receives within this proof are not recorded on its
transcript for $\Pi'$.
\item {\em Verifier:}
If in $\Pi'$ a party is to receive a message $(v,b)$ from the
trusted party computing the zero-knowledge functionality, then it
plays the verifier in the \POK\ zero-knowledge proof of knowledge
protocol. If it accepts the proof, then it writes $(v,1)$ on its
transcript for $\Pi'$. Otherwise it writes $(v,0)$. As above, the
messages within the proof are not recorded on its transcript for
$\Pi'$.
\EI
As we have described above, the \RK\ and \BZK\ protocols are used
as subproofs in \POK. Both of these protocols are parameterized:
the \RK\ protocol by the number of iterations $k$ in the preamble
and the \BZK\ protocol by the length $\ell(n)$ of the verifier
message $r$. Recall that in the scenario of $m$-bounded concurrent
zero-knowledge, in \RK\ the parameter $k$ is set to $m\kappa(n)$,
and in \BZK\ the parameter $\ell(n)$ is set to $2mn^2$. Now, we
denote by ${\sf rounds}({\sf Prot})$ the number of rounds in
Protocol {\sf Prot}, and by ${\sf length}({\sf Prot})$ the total
length (i.e., number of bits) of all the messages sent in Protocol
{\sf Prot}. Furthermore, we denote by $\zeta$ the total number of
ideal calls made by both parties to the ideal zero-knowledge
functionality in $\Pi'$. Finally, when we refer to ${\sf
rounds}(\POK)$ and ${\sf length}(\POK)$, we mean the number of
rounds and the total length of the messages in the proof of
knowledge {\em without}\/ the subproofs \RK\ and \BZK. The
parameters for the zero-knowledge subproofs are set as follows.
Let $\kappa(\cdot)$ be any super-constant (and sub-linear)
function. Then, in \RK\ we set
$$
k = \zeta m\kappa(n) + m\kappa(n)
\!\cdot\!\left(\vphantom{0^{0^0}}{\sf rounds}(\Pi') + \zeta
\!\cdot\!{\sf rounds}(\POK) + \zeta\!\cdot\! {\sf
rounds}(\BZK)\right)
$$
and in \BZK\ we set
$$
\ell(n) = 2\zeta mn^2 + m\!\cdot\!\left(\vphantom{0^{0^0}}{\sf
length}(\Pi') + \zeta \!\cdot\!{\sf length}(\POK) +
\zeta\!\cdot\!{\sf length}(\RK)\right)
$$
In other words, the number of iterations in the \RK\ preamble is
$\zeta m\kappa(n)$ plus $m\kappa(n)$ times the combined number of
rounds in $\Pi$ excluding the \RK\ subproofs; this combined number
of rounds comes to ${\sf rounds}(\Pi')+\zeta \!\cdot{\sf
rounds}(\POK)+\zeta\!\cdot{\sf rounds}(\BZK)$. (The first $\zeta m
\kappa(n)$ iterations are used to cover the $\zeta m$ executions
of \RK, as discussed in Remark~\ref{remark:length}. The number of
additional iterations is fixed so that there are $\kappa(n)$
iterations for every non-\RK\ message that is sent in all $m$
executions of the protocol.)
Likewise, the length $\ell(n)$ of the message $r$ in \BZK\ is set
to $2\zeta mn^2$ (as in $\zeta m$-bounded concurrent
zero-knowledge) plus $m$ times the length of all other messages
sent. This ensures that $r$ is at least $n$ bits longer than all
the messages sent by $P_2$ in $m$ executions of $\Pi$. We remark
that there is no problem setting the parameters in such a way.
That is, although $\ell(n)$ is dependent on ${\sf length}(\RK)$,
the number of rounds in \BZK\ is {\em not} dependent on \RK. Thus,
we can first set $k$ and then $\ell(n)$. We digress here for a
moment and comment that in our final protocol, ${\sf
rounds}(\Pi')$ and $\zeta$ are both constants and thus the final
number of rounds in $\Pi$ is $O(m\kappa(n))$, where $\kappa(n)$ is
any super-constant function (e.g., $\kappa(n)=\log\log n$).
We now show that for every real model adversary $\A$ running
$\Pi$, there exists a ZK-hybrid model $\A'$ running $\Pi'$ such
that the output distributions generated by $\A$ and $\A'$ are
indistinguishable. Intuitively, this is because for $m$-bounded
concurrency the zero-knowledge arguments of knowledge as described
constitute ``good'' replacements for the ideal calls to the
zero-knowledge functionality. Loosely speaking, we construct $\A'$
from $\A$ as follows. $\A'$ internally incorporates $\A$ and
externally forwards all the $\Pi'$-messages (i.e., the messages
not belonging to the zero-knowledge proofs) between $\A$ and the
external party with which $\A'$ interacts. In contrast, $\A'$
internally simulates the zero-knowledge proofs that $\A$ expects
to see. Specifically, upon receiving a message $(v,1)$ from the
trusted party computing the ideal zero-knowledge functionality,
$\A'$ simulates the zero-knowledge proof of knowledge for $v$,
where $\A$ plays the verifier. Likewise, when $\A$ proves a proof
of knowledge for a statement $v$, the ZK-hybrid model adversary
$\A'$ runs the extractor for the proof. If $\A'$ obtains a valid
witness $w$, then it sends $(v,w)$ to the trusted third party;
otherwise, it sends $(v,\bot)$. Such a strategy ensures that the
output distribution generated by $\A'$ in a run of $\Pi'$ (in the
ZK-hybrid model) is indistinguishable from the output distribution
generated by $\A$ in a run of $\Pi$ (in the real model). Of
course, the main challenge (and one of the central contributions
of this work), is in showing how to carry out the simulation and
extraction when the zero-knowledge proofs of knowledge are
subprotocols within a larger protocol that is executed in the
setting of $m$-bounded concurrency. We stress that it does not
suffice to prove the correctness of the simulation and extraction
within the context of $m$-bounded concurrent zero-knowledge. This
is because now the zero-knowledge proof is run concurrently with
$\Pi'$, and not only concurrently with itself. We now formally
prove the existence of a ZK-hybrid model adversary $\A'$ for every
real model adversary $\A$. We separately deal with the cases that
$P_1$ and $P_2$ are corrupted.
\subsubsection{Party \boldmath{$P_1$} is Corrupted}
Let $\A$ be a real-model adversary for $\Pi$. We construct an
adversary $\A'$ who internally incorporates $\A$ and works in the
ZK-hybrid model interacting with $P_2$ in protocol $\Pi'$.
Adversary $\A'$ sends {\sf internal} and {\sf external} messages.
Internal messages are sent by $\A'$ to the internally incorporated
$\A$, whereas external messages are sent by $\A'$ to the external
honest party $P_2$ and to the external trusted party computing the
ideal zero-knowledge functionality. As we have mentioned, all the
$\Pi'$-messages are defined to be external, whereas all of the
messages belonging to \POK\ are defined to be internal. Thus,
except for the ideal zero-knowledge calls, $\A'$ forwards all of
the $\Pi'$-messages unmodified between $\A$ and $P_2$. In
addition:
\BI
\item Whenever $\A$, controlling $P_1$, proves a zero-knowledge
proof of knowledge for some statement $v$, $\A'$ runs the {\em
extraction}\/ strategy for \POK, possibly obtaining a valid
witness $w$ for $v$. (Actually, it runs the {\em witness-extended
emulator}\/ for \POK, and obtains both a transcript of an
execution along with a value that is possibly a witness. See the
short discussion in Section~\ref{sec:zkpok} on witness-extended
emulation.) This involves simulating Protocol~\BZK\ for $\A$ as
the verifier (because the subproof of part~1 that is simulated is
given by $P_2$ to $P_1$) and honestly verifying Protocol \RK\
where $\A$ is the prover (because the subproof of part~2 is given
by $P_1$). (In addition, the receiver messages of the
commit-with-extract scheme of part~1 are chosen according to the
extraction strategy; see Section~\ref{sec:zkpok}.) As we have
mentioned, from this extraction procedure (or, actually,
witness-extended emulation), $\A'$ receives some string $w$, which
is possibly a witness for $v$, and a transcript of an execution of
the proof. $\A'$ verifies that the transcript constitutes an
accepting proof (this can be publicly verified by the construction
of Protocol \POK). If yes, then $\A'$ externally sends $(v,w)$ to
the trusted party computing the ideal zero-knowledge
functionality. Otherwise, it sends $(v,\bot)$.
\item Whenever $\A'$ receives an external message $(v,1)$ from the
trusted party, it internally {\em simulates}\/ the zero-knowledge
proof of knowledge \POK\ for $\A$. This involves honestly
verifying Protocol \RK\ where $\A$ is the prover (from part~1 of
\POK) and simulating \BZK\ for $\A$ as the verifier (from part~2
of \POK). (In addition, the value committed to by $\A'$ in the
commit-with-extract scheme of part~1 is garbage; see
Section~\ref{sec:zkpok}.)
\ni (We note that $\A'$ never receives a message $(v,0)$ from the
trusted party because $P_2$ is honest and we assume that honest
provers always check that they have a correct witness before they
send anything to the trusted party or attempt to prove \POK.)
\EI
The simulation of protocol \BZK\ is carried out using the
simulator $S^*$ of~\cite{B}, as described in
Section~\ref{sec:boaz}. Notice that in the above simulation, all
of the subproofs in which $\A$ is the prover are \RK, and all of
the subproofs in which $\A$ is the verifier are \BZK. Having
described the simulation by the ZK-hybrid model adversary $\A'$,
we proceed to prove that it generates a distribution that is
indistinguishable from a real model execution.
\paragraph{\sc Proof outline.}
Intuitively, the simulation strategy by $\A'$ works as long as
Protocol \RK\ is sound and the simulator of \BZK\ succeeds in
generating a view that is indistinguishable from a real execution
of \BZK. However, in order to prove this, we need to define a
hybrid experiment that isolates the \RK\ and \BZK\ executions. We
do this by defining an ideal zero-knowledge functionality that is
a proof of membership, and not a proof of knowledge. We then show
that replacing the \RK\ and~\BZK\ subproofs by this ideal
functionality makes at most a negligible difference. This enables
us to deal with the \RK\ and
\BZK\ subproofs separately.
\medskip\ni
\paragraph{\sc The hybrid experiment.} The hybrid experiment involves
an ideal zero-knowledge {\em proof of membership}\/ functionality,
parameterized by an NP-language $L$, and defined by the following:
$$
(v,\lambda) \rightarrow (\lambda,(v,\chi_L(v)))
$$
where $\chi_L(v)=1$ if and only if $v\in L$. That is, the proving
party sends a statement $v$ to the trusted party, who then sends
$(v,1)$ to the verifying party if $v \in L$, and $(v,0)$ if $v
\notin L$. In order to reflect the possibility that a prover may
fail to prove a correct statement in a protocol execution that
replaces this ideal functionality, we define that if the statement
has the symbol $\bot$ appended to it (i.e., if it is of the form
$v \circ \bot$), then the trusted party sends $(v,0)$ to the
verifier irrespective of whether or not $v \in L$. We note that
this ideal functionality may not be efficient (in particular, if
$L$ is a hard language, then verifying whether or not $v\in L$ may
take super-polynomial time); nevertheless this suffices here. We
denote this hybrid experiment by \memberzk.
\paragraph{\sc Step 1 -- replacing \BZK\ with an ideal zero-knowledge proof of membership.}
We now define a protocol $\hpi$ that works in the memberZK-hybrid
model. $\hpi$ is the same as the real-model protocol $\Pi$ except
that instead of $P_2$ proving a statement $v$ to $P_1$ using the
subproof \BZK, it sends $v$ to the trusted party computing the
ideal zero-knowledge proof of membership functionality. Subproofs
given by $P_1$ using \RK\ are unmodified.
We now show that for every real-model adversary $\A$ for $\Pi$,
there exists an adversary $\ha$ for $\hpi$, such that $\ha$
interacts with $P_2$ in the memberZK-hybrid model and generates an
output distribution that is indistinguishable from an execution of
$\A$ with $\Pi$ in the real model. Notice that the only difference
between $\Pi$ and $\hpi$ is whether proofs given by $P_2$ (who in
this case is honest) are carried out by running~\BZK\ or by using
the zero-knowledge proof of membership functionality. Thus, the
proof of indistinguishability is based on the zero-knowledge (or
simulatability) property of \BZK\ in this scenario.
The adversary $\ha$ for $\hpi$ works as follows. $\ha$ internally
incorporates $\A$ and externally forwards all $\Pi'$, \POK\ and
\RK\ messages between $\A$ and $P_2$. However, messages of \BZK\
are internally simulated for $\A$ by $\ha$. That is, whenever
$\ha$ receives a message $(v,1)$ from the trusted party computing
the ideal zero-knowledge proof of membership functionality, it
simulates an execution of~\BZK\ with $\A$ as the verifier. We now
prove that
\begin{equation}
\left\{\vphantom{0^{0^0}}\memberzk^m_{\hpi,\ha}(\ov x,\ov
y,z)\right\} \indist \left\{\vphantom{0^{0^0}}\real^m_{\Pi,\A}(\ov
x,\ov y,z)\right\}
\label{eq:weakzk12}
\end{equation}
The difference between an execution of $\Pi$ with $\A$ and an
execution of $\hpi$ with $\ha$ is that in $\Pi$ party $P_2$ proves
real \BZK\ proofs, whereas in $\hpi$ they are simulated. Thus, as
we have mentioned \eqref{eq:weakzk12} follows from the
zero-knowledge property of \BZK. It remains to show that the
\BZK\ simulation ``works'' in this scenario (i.e., within $\hpi$).
However, as we have discussed in Section~\ref{sec:boaz}, the
simulator for \BZK\ works on the condition that the total length
of all the messages sent by the prover (i.e., $P_2$) from the time
that it sends the commitment $c$ in \BZK\ until the time that
$P_1$ returns $r\in\bitset^{\ell(n)}$ is less than $\ell(n) - n$.
In order to see that this holds, recall that in the setting of
$m$-bounded concurrency, the scheduling is such that from the time
that any given execution begins until it terminates, messages from
at most $m$ different executions are sent. Therefore, the length
of all messages sent by $P_2$ from the time that it sends $c$
until $P_1$ replies with $r$ is upper-bound by $m$ times the
length of all the messages sent by $P_2$ in an execution of $\Pi$.
However, this follows immediately from the way $\ell(n)$ was
chosen.
\paragraph{\sc Step 2 -- replacing \RK\ with an ideal zero-knowledge proof of membership.}
We now define a protocol $\tpi$ which is the same as $\Pi$ except
that {\em both}\/ the subproofs \RK\ and \BZK\ are proven using
calls to the ideal zero-knowledge proof of membership
functionality. Thus, the only difference between $\hpi$ and $\tpi$
is whether proofs provided by the corrupted $P_1$ to the honest
$P_2$ are given by running \RK\ or by using the zero-knowledge
proof of membership functionality. Thus, the proof of
indistinguishability is based on the {\em soundness}\/ of \RK\ in
this scenario.
We show that for every adversary $\ha$ for $\hpi$, there exists an
adversary $\ta$ for $\tpi$, such that the output distributions in
the executions of $\hpi$ and $\tpi$ are essentially the same. The
adversary $\ta$ works in exactly the same way as $\ha$ except that
it internally verifies the zero-knowledge subproofs of \RK\
provided by $\ha$. Formally, $\ta$ internally incorporates $\ha$
and externally forwards all $\Pi'$, \POK\ and ideal zero-knowledge
proof of membership calls that already existed in $\hpi$. However,
messages of \RK\ are dealt with internally. That is, whenever
$\ha$ proves a statement $v$ using \RK, $\ta$ internally verifies
the proof by playing the honest verifier strategy. If $\ta$
accepts the proof, then it sends $v$ to the trusted party
computing the ideal zero-knowledge proof of membership
functionality. Otherwise, it sends $v\circ\bot$ to the trusted
party. We prove that
\begin{equation}
\left\{\vphantom{0^{0^0}}\memberzk^m_{\tpi,\ta}(\ov x,\ov
y,z)\right\}
%_{n\in\N,\ov x,\ov y \in(\bitset^n)^m, z\in\bitset^*}
\indist \left\{\vphantom{0^{0^0}}\memberzk^m_{\hpi,\ha}(\ov x,\ov
y,z)\right\}
%_{n\in\N,\ov x,\ov y \in(\bitset^n)^m, z\in\bitset^*}
\label{eq:weakzk-real}
\end{equation}
\ni First, assume that $\ta$ accepts a (false) \RK\ subproof for
$v\notin L$ from $\ha$ with at most negligible probability. In
this case, we show that the output distributions of an execution
of $\tpi$ with $\ta$ and an execution of $\hpi$ with $\ha$ are
statistically close. This can be seen as follows. In verifying the
\RK\ subproofs, both $\ta$ and $P_2$ behave identically;
specifically, they both follow the honest verifier strategy.
However, if $\ta$ accepts a subproof for a statement $v$ from
$\ha$ in $\tpi$, then it sends $v$ to the trusted party computing
the zero-knowledge proof of membership functionality. The trusted
party then sends $(v,1)$ to $P_2$ if $v\in L$, and $(v,0)$
otherwise. Finally, $P_2$ writes whatever it receives from the
trusted party directly on its transcript. This describes the
process in $\tpi$. In contrast, in $\hpi$, if $P_2$ accepts such a
subproof from $\ha$, then it immediately writes $(v,1)$ on its
transcript in $\hpi$, without any other conditions. Therefore, a
difference in the output distributions can occur if $\ta$ accepts
a subproof for $v$, but $v\not\in L$. In such a case, $P_2$ would
write $(v,1)$ on its transcript in $\hpi$, but would write $(v,0)$
on its transcript in $\tpi$. However, by the above assumption,
this event occurs with at most negligible probability. Therefore,
the output distributions are negligibly close. (Another possible
difference can occur if $\ta$ rejects a subproof for $v$, but
$v\in L$. However, if $\ta$ rejects a subproof for $v$, then it
sends $v\circ\bot$ to the trusted party, who then always forwards
$(v,0)$ to $P_2$, irrespective of whether or not $v\in L$.
Therefore, in this case, party $P_2$ writes $(v,0)$ on its
transcript in both $\tpi$ and $\hpi$.)
It remains to show that $\ta$ accepts a proof for $v\notin L$ from
$\ha$ with at most negligible probability. That is,
\eqref{eq:weakzk-real} follows from the {\em soundness}\/ of \RK.
Since the soundness of \RK\ is unconditional, it suffices for us
to construct an all-powerful cheating prover $P^*$. Specifically,
assume by contradiction that for some set of inputs $\ov x,\ov y$
and $z$, adversary $\ha$ successfully proves a false statement to
$\ta$ with non-negligible probability. Then, $P^*$ computes the
coins of $\ha$, $\ta$ and $P_2$ which maximize the probability of
$\ha$ successfully proving the false statement; actually, $P^*$
computes the coins for all messages sent by $\ta$ except for those
used in verifying the false proof from $\ha$. Then, $P^*$
internally emulates the entire execution with $\ha$, $\ta$ and
$P_2$, except for the \RK\ subproof of the false statement proven
by $\ha$; the messages of this subproof are sent externally to the
verifier $V$. (Notice that this emulation can be carried out
perfectly by $P^*$, including the ideal zero-knowledge proof of
membership calls, because $P^*$ is not computationally bounded.
The internal simulation of the \BZK\ protocol, that is part of
this emulation, can be carried out because, as we have mentioned,
the fact that some messages are generated externally make no
difference to the \BZK\ simulator. Specifically, the messages sent
by the external $V$ can be incorporated into the string $y$ used
by the simulator; see Section~\ref{sec:boaz} for an explanation.)
Now, since $\ta$ verifies \RK\ subproofs from $\ha$ using the
honest verifier strategy and it accepts the false proof with
non-negligible probability, the probability that $V$ accepts the
false proof from $\ha$ (as forwarded by $P^*$) is also
non-negligible. This contradicts the soundness of the \RK\
subproof and thus \eqref{eq:weakzk-real} follows.
\remove{
Now, let $\ov x$ and $\ov y$ be the respective input vectors of
$P_1$ and $P_2$. Then, we construct a cheating prover $P^*$ who
interacts with an honest verifier $V$ in a {\em single}\/
execution of Protocol \RK. The probability that $P^*$ successfully
proves a false statement will be polynomially related to the
probability that $\ta$ accepts the proof of a false statement from
$\A$ in an execution of $\tpi$ (when $\ov x$ and $\ov y$ are the
parties' input vectors). $P^*$ internally incorporates $\ta$ and
works as follows. $P^*$ uniformly chooses one of the $\zeta m$
\RK\ protocol executions in the $m$ concurrent executions of $\Pi$
and internally emulates the entire simulation of $\ta$ for $\A$,
except with respect to the selected \RK\ execution. (We stress
that $P^*$ emulates the memberZK-hybrid simulation by $\ta$ for
$\A$, and does not emulate a real execution of $\Pi$. Note that
$P^*$ can run this emulation because it knows $\ov x$ and $\ov
y$.) Now, when $\A$ reaches the selected \RK\ execution, $P^*$
sends the statement being proved in this execution to $V$ and
externally forwards the messages of this execution between $\A$
and $V$. We stress that all other messages that are sent
concurrently to this \RK\ execution are internally simulated by
$P^*$. Since $\ta$ plays the honest verifier in its emulation, the
view of $\A$ in the emulation by $\ta$ is identical to its view in
this emulation by $P^*$. Noting that there are $\zeta m$ proofs of
\RK\ supplied by $\A$, we conclude that the probability that $P^*$
successfully proves a false statement to $V$ equals $1/\zeta m$
times the probability that $\ta$ accepts the proof of a false
statement from $\A$ in the emulation. By the (stand-alone)
soundness of \RK, this probability must therefore be negligible.
\eqref{eq:weakzk-real} follows. }
\medskip\ni{\em Remark:} This soundness argument is facilitated (in
part) by the fact that the simulation by $\ha$ and $\ta$ requires
no rewinding. Rewinding would cause a problem because then the
same verifier message to be sent by $V$ may appear a number of
times. However, $V$ is an external party that sends each message
only once and cannot be rewound.
\paragraph{\sc Step 3 -- conclusion.} The proof of the case that
$P_1$ is corrupted is concluded by showing that the distribution
generated by $\ta$ in $\tpi$ in the memberZK-hybrid model, is
indistinguishable from the distribution generated by $\A'$ in
$\Pi'$ in the ZK-hybrid model. (The strategy of adversary $\A'$ is
described above and is the same as $\ta$ except for the treatment
of
\POK\ messages.) That is, we show the following:%
\footnote{We stress an important subtlety in our proof here. We do
{\em not}\/ prove that for every adversary $\ta$ there exists an
adversary $\A'$ for which the output distributions are
indistinguishable. Rather, we show this for the specific adversary
$\ta$ described above. Specifically, it is crucial to our proof
that $\ta$ would sends a false statement $v$ to the trusted party
computing the ideal zero-knowledge proof of membership
functionality with at most negligible probability. If this were
not the case (like with general adversaries), we would be
``stuck'' because we may not be able to verify whether or not
$v\in L$.\label{foot:nonabusing}}
\begin{equation}
\left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\} \indist
\left\{\vphantom{0^{0^0}}\memberzk^m_{\tpi,\ta}(\ov x,\ov
y,z)\right\}
\label{eq:zkhybrid-weak}
\end{equation}
Before proving \eqref{eq:zkhybrid-weak}, we describe $\A'$ in
terms of $\ta$. That is, our initial description of $\A'$ is such
that it internally incorporates the real-model adversary $\A$ and
runs all the simulations. However, we have considered these same
simulations separately by $\ha$ and $\ta$. Therefore, we can
rewrite $\A'$ as follows: Adversary $\A'$ internally incorporates
$\ta$ and simulates the \POK\ messages that $\ta$ expects to
receive in $\tpi$. $\A'$ also plays the trusted party for the
zero-knowledge proof of membership functionality that replaces the
subproofs in \POK\ (we will describe below how this emulation of
the trusted party is carried out). All other $\Pi'$ messages are
forwarded by $\A'$ to the external party $P_2$. Now, when $\A'$
receives $(v,1)$ from its trusted party (recall that $\A'$ works
in the ZK-hybrid model), then it runs the simulation strategy of
\POK. Likewise, when $\ta$ proves a statement $v$ in an execution
of \POK\ in $\tpi$, then $\A'$ runs the extraction strategy (or
actually the witness-extended emulation), and obtains a witness
$w$. If the execution of \POK\ was accepting, then $\A'$ sends
$(v,w)$ to its trusted party; otherwise, it sends $(v,\bot)$.
In the above simulation and extraction, it is necessary for $\A'$
to simulate the trusted party for the zero-knowledge proof of
membership functionality. However, this trusted party may need to
run in time that is super-polynomial in order to verify if some
statement $v'$ is in the language $L$ or not. Therefore, $\A'$
simply works as follows: if $\ta$ sends $v'$ to its trusted party,
then $\A'$ assumes that $v'\in L$ and continues the
simulation/extraction strategy under this assumption. On the other
hand, if $\ta$ sends $v'\circ\bot$ to its trusted party, then
$\A'$ continues as if $v'\notin L$. (Note that $\ta$ may send
$v'\circ\bot$ even if $v'\in L$; however, the simulation and
extraction strategies are the same for this case and when
$v'\notin L$.) This completes the description of $\A'$.
Now, by Lemma~\ref{lem:subproofs}, if the subproofs of
Protocol~\POK\ have negligible soundness error and can be simulated
(i.e., are zero-knowledge), then the simulation and extraction
strategies for \POK\ succeed. The zero-knowledge property of the
subproofs is straightforward because here $\A'$ emulates the trusted
party who just sends $(v',1)$ every time. (When simulating the
subproofs, the case in question is where the honest $P_2$ is the
prover; therefore, only correct statements $v'$ are sent to the
trusted party and so $\ta$ always receives $(v',b)$ with $b=1$.) On
the other hand, the soundness property of the subproofs is due to
the fact that by the construction of $\ta$, we know that the
probability that it sends $v'$ (and not $v'\circ\bot$) to the
trusted party for $v'\notin L$ is negligible. We can therefore apply
Lemma~\ref{lem:subproofs} and we have that the simulation and
extraction strategies for \POK\ succeed. In other words, the outcome
of an execution of $\Pi'$ with $\A'$ is computationally
indistinguishable from the outcome of an execution of $\tpi$ with
$\ta$, proving \eqref{eq:zkhybrid-weak}. This proof is somewhat
lacking due to the fact that Lemma~\ref{lem:subproofs} is informally
stated, and thus so is its application. In the Appendix, we state
and prove a formal version of Lemma~\ref{lem:subproofs}, and show
how it can be used to obtain \eqref{eq:zkhybrid-weak}.
The theorem for the case that $P_1$ is corrupt follows by
combining Equations~(\ref{eq:weakzk12}),~(\ref{eq:weakzk-real})
and~(\ref{eq:zkhybrid-weak}).
\medskip\ni
Before proceeding to the case that $P_2$ is corrupted, we present
a summary of the flow of the above proof for the case that $P_1$
is corrupted; see Table~\ref{table:P1corrupted}.
%\vspace{-2ex}
\begin{table}[h]
\begin{center}
\begin{tabular}{|c|c|l|}
\hline
{ \bf Step} & {\bf Protocol} & ~~~~~~{\bf Zero-knowledge
POK and subproofs} \\ \hline
& $\Pi$ & All {\em real}: \POK, \BZK\ and \RK\ \\
1 & $\hpi$ & \BZK\ replaced with ideal memberZK; \POK\ and \RK\ real \\
2 & $\tpi$ & \BZK, \RK\ replaced with ideal memberZK, \POK\ real \\
3 & $\Pi'$ & All {\em ideal}: \POK, \BZK\ and \RK\ replaced with
ideal-ZK \\ \hline
\end{tabular}
\caption{Proof outline for the case that $P_1$ is corrupted
\label{table:P1corrupted}}
\end{center}
\end{table}
%\vspace{-3ex}
\subsubsection{Party \boldmath{$P_2$} is Corrupted}
We construct the ZK-hybrid model adversary $\A'$ in a similar way
to the previous case. Before describing $\A'$, recall that $P_1$
proves all of its subproofs with \RK, and $P_2$ proves all of its
subproofs with \BZK. Adversary $\A'$ internally incorporates $\A$
and externally forwards all $\Pi'$-messages between $\A$ and
$P_1$. In addition:
\BI
\item
Whenever $\A$, controlling $P_2$, proves a zero-knowledge proof of
knowledge for some statement $v$, $\A'$ runs the extraction
strategy for \POK, possibly obtaining a valid witness $w$ for $v$.
(Actually, it runs the witness-extended emulator for \POK, and
obtains both a transcript of an execution and a possible witness;
see Section~\ref{sec:zkpok}.) This involves simulating Protocol
\RK\ for $\A$ as verifier (because the subproof of part~1 of \POK\
that is simulated is given by $P_1$ to $P_2$), and honestly
verifying Protocol \BZK\ where $\A$ is the prover (because the
subproof of part~2 is given by $P_2$). (In addition, the receiver
messages of the commit-with-extract scheme of part~1 are chosen
according to the extraction strategy; see
Section~\ref{sec:zkpok}.) As we have mentioned, from this
extraction procedure, or actually witness-extended emulation,
$\A'$ receives some string $w$ which is possibly a witness for
$v$, and a transcript of an execution of the proof. $\A'$ verifies
that the transcript constitutes an accepting proof. If yes, then
$\A'$ externally sends $(v,w)$ to the trusted party computing the
ideal zero-knowledge functionality. Otherwise, it sends
$(v,\bot)$.
\item Whenever $\A'$ receives an external message $(v,1)$ from the
trusted party, it internally simulates the zero-knowledge proof of
knowledge \POK\ for $\A$. This involves honestly verifying
Protocol \BZK\ where $\A$ is the prover (from part~1 of \POK) and
simulating \RK\ for $\A$ as the verifier (from part~2 of \POK). In
addition, the value committed to by $\A'$ in the
commit-with-extract scheme of part~1 is garbage; see
Section~\ref{sec:zkpok}.
\EI
We now describe how $\A'$ carries out the simulation of \RK. The
simulation uses a very similar strategy to that described in the
proof of Proposition~\ref{prop:cZK} (see Section~\ref{sec:rk}). In
our description here, we will use notation and terminology taken
from the proof of Proposition~\ref{prop:cZK} (and we therefore
assume familiarity with the details of that proof). $\A'$
interacts with $\A$ in a black-box manner and externally forwards
all $\Pi'$-messages to $P_1$. Likewise, replies from $P_1$ are
returned to $\A$. Furthermore, $\A'$ sets a {\sf fixed point}
after any of the following events:
\BE
\item A non-\RK\ message is sent by $\A$; this includes all messages
belonging to $\Pi'$, \BZK\ and \POK. (Recall that when we refer to
\POK, we mean the proof of knowledge not including the \BZK\ and
\RK\ subproofs.)
\item An execution of Protocol \RK\ is aborted.
\item A potential iteration closes.
\EE
Recall that no rewinding ever takes place behind a fixed point.
Now, $\A'$ works in exactly the same way as the simulator $S$ in
the proof of Proposition~\ref{prop:cZK}. Specifically, whenever a
fixed point of type (1) or (2) above is sent, $\A'$ fixes the
transcript until (and including) the current message. Furthermore,
if the fixed point is due to an execution of \RK\ being aborted,
then this execution is satisfied. Therefore, $\A'$ removes the
execution from the set of unsatisfied executions. The treatment of
a fixed point of type (3) is different. That is, whenever a
potential iteration closes, $\A'$ carries out $2mn^2$ rewinding
attempts: in each one, $\A'$ rewinds $\A$ back to the point that
the iteration opened and provides it with a commitment to $r_j$
instead of $0^n$. $\A'$ then continues, hoping that this iteration
will once again close before any non-\RK\ messages are sent or any
executions of \RK\ are aborted. If none of the $2mn^2$ rewinding
attempts succeed, then $\A'$ sets a fixed point, records it as a
failed fixed point, and continues. If at any stage of the
simulation $\kappa(n)$ consecutive failed fixed points are set,
then $\A'$ outputs {\sf failure} and halts. Likewise, the other
instructions that cause the \RK-simulator $S$ to output {\sf
failure} or {\sf unsatisfied} are also followed by $\A'$; see
Section~\ref{sec:rk} for more details. This completes the
description of $\A'$.
We begin by noting that $\A'$ runs in time that is polynomial in
$n$. Specifically, the overall number of rewinding attempts is
bounded by $2mn^2$ times the overall number of fixed points, and
all other work also takes polynomial time. Next, we prove that the
output distribution of an execution of $\Pi'$ with $\A'$ is
computationally indistinguishable from the output distribution of
an execution of $\Pi$ with $\A$. We prove this using the same
hybrid experiment as in the case that $P_1$ is corrupted.
\paragraph{\sc Step 1 -- replacing \RK\ with an ideal zero-knowledge proof of membership.}
We define the protocol $\tpi$ for the memberZK-hybrid model in a
similar way as for the case that $P_1$ is corrupted. That is, in
this protocol, ideal calls to the zero-knowledge proof of
membership functionality are used instead of running the \RK\
subproofs, but the rest (i.e., \POK\ and \BZK) stays the same. We
now show that for every real-model adversary $\A$ for $\Pi$, there
exists an adversary $\ta$ for $\tpi$ in the memberZK-hybrid model,
such that the output distribution of an execution of $\tpi$ with
$\ta$ is indistinguishable from an execution of $\Pi$ with $\A$
(in the real model). $\ta$ internally incorporates $\A$ and
forwards all non-\RK\ messages between $\A$ and the external
$P_1$. However, when $\ta$ receives a message $(v,1)$ from the
trusted party computing the ideal zero-knowledge proof of
membership functionality, it simulates a \RK\ execution for $\A$
using exactly the same strategy described above for $\A'$. We now
prove that,
\begin{equation}
\left\{\vphantom{0^{0^0}}\memberzk^m_{\tpi,\ta}(\ov x,\ov
y,z)\right\} \indist \left\{\vphantom{0^{0^0}}\real^m_{\Pi,\A}(\ov
x,\ov y,z)\right\}
\label{eq2:weakzk-real}
\end{equation}
Proving \eqref{eq2:weakzk-real} involves showing that the
simulation by $\ta$ is indistinguishable from a real proof by
$P_1$ (this is the only difference between $\Pi$ and $\tpi$). This
follows very similar lines to the proof of
Proposition~\ref{prop:cZK}. We first claim that $\ta$ outputs {\sf
failure} with only negligible probability:
\BCM The probability that $\ta$ outputs\, {\sf failure} is
negligible.
\ECM
The proof of this claim is almost identical to the proof of
Claim~\ref{clm:failure}. (Recall that there are two events that
can cause $\ta$ to output {\sf failure}. In the first, for some
commitment $c_j$, $\A$ generates two different decommitments
$r_j\neq r_j'$ during rewinding. In the second, $\kappa(n)$
consecutive failed fixed points are set by $\ta$. Regarding the
first event, the only difference here from the proof of
Claim~\ref{clm:failure} is that there are $\zeta m$ executions of
\RK\ instead of $m$ and the reduction to the computational binding
includes emulating all of the $\Pi'$ and~\BZK\ messages as well.
However, this emulation can be carried out because the machine
attempting to break the binding property of the commitments is
given both input vectors $\ov x$ and $\ov y$. Regarding the second
event, the proof remains the same. That is, we first consider the
case that $p_k^j \geq 1/mn$; in this case the probability that
none of the $2mn^2$ rewinding attempts succeeds is negligible.
Next, we consider the case that $p_k^j < 1/mn$ and use the same
proof to show that the probability that such an iteration was
chosen is at most $1/n$. It therefore follows that $\kappa(n)$
consecutive iterations are encountered with at most negligible
probability.)
Next, we claim that $\ta$ never outputs {\sf unsatisfied}:
\BCM The probability that $\ta$ outputs\, {\sf unsatisfied} equals $0$.
\ECM
\BPF
As in the proof of Claim~\ref{clm:satisfy}, it suffices to show
that if there are not $\kappa(n)$ consecutive failed fixed points,
then every execution of \RK\ must have been satisfied. Here, this
follows from the fact that the number of iterations in the
preamble of \RK\ is $\zeta m\kappa(n)$ plus the number of rounds
in the rest of the protocol times $\kappa(n)$. Notice that $\ta$
sets at most one non-failed fixed point for every execution of
\RK\ ($\zeta m$ in total) and one non-failed fixed point for every
message from the rest of the protocol (consisting of $\Pi'$, \POK\
and \BZK). That is, the maximum number of non-failed fixed points
set by $\ta$ equals $\zeta m + m\cdot({\sf rounds}(\Pi')+{\sf
rounds}(\POK)+{\sf rounds}(\BZK))$, and if this number of
non-failed fixed points are set, then all executions of \RK\ are
guaranteed to have been satisfied.
Now, recall that at most one iteration of an unsatisfied execution
can begin between every two fixed points. Therefore, if part~2 of
an unsatisfied execution is reached, it must be that $\zeta
m\kappa(n) + m\kappa(n)\cdot({\sf rounds}(\Pi')+{\sf
rounds}(\POK)+{\sf rounds}(\BZK))$ fixed points were set. Assuming
that there are not $\kappa(n)$ consecutive failed fixed points,
this means that at least $\zeta m + m\cdot({\sf rounds}(\Pi')+{\sf
rounds}(\POK)+{\sf rounds}(\BZK))$ non-failed fixed points were
set. However, this equals the {\em total}\/ number of non-failed
fixed points that can be set, in which case, all executions are
\RK\ are guaranteed to have been satisfied. We conclude that when
part~2 of an execution of \RK\ is reached, it must already have
been satisfied.
\EPF
\ni Having established the above, the indistinguishability of the
distributions follows with an analogous hybrid argument to the
proof of Claim~\ref{clm:indist}. Specifically, we define a hybrid
adversary/simulator $M$ who receives the witnesses for each proof
being proved (from the honest $P_1$) and runs part~2 of the proof
of \RK\ as $P_1$ would (i.e., using the witness). However, all
other parts of the experiment are run exactly according to the
instructions of $\ta$ (including the simulation of the preamble of
\RK). The proof continues as for Claim~\ref{clm:indist}. This
completes the proof of \eqref{eq2:weakzk-real}.
\paragraph{\sc Step 2 -- replacing \BZK\ with an ideal zero-knowledge proof of membership.}
Again, as in the case that $P_1$ is corrupted, we define a
protocol $\hpi$ in which both the subproofs \RK\ and \BZK\ are
proven using the ideal zero-knowledge proof of membership
functionality (but the \POK\ messages remains unchanged). We show
that for every adversary $\ta$ for $\tpi$ there exists an
adversary $\ha$ for $\hpi$ such that the output distribution of
the two executions are indistinguishable. Adversary $\ha$
internally incorporates $\ta$ and externally forwards all
$\Pi'$-messages and \POK-messages between $\ta$ and $P_1$. In
addition, $\ha$ forwards all ideal messages from the trusted party
who computes the zero-knowledge proof of membership functionality
to $\ta$ (recall that $\ta$ works in a model where it only {\em
receives}\/ ideal messages from the trusted party). However, when
$\ta$ proves a~\BZK\ subproof of a statement $v$, $\ha$ verifies
this internally. If $\ha$ accepts the proof, it sends $v$ to the
trusted party computing the ideal zero-knowledge proof of
membership functionality; otherwise, it sends $v\circ\bot$. We now
prove that
\begin{equation}
\left\{\vphantom{0^{0^0}}\memberzk^m_{\hpi,\ha}(\ov x,\ov
y,z)\right\} \indist
\left\{\vphantom{0^{0^0}}\memberzk^m_{\tpi,\ta}(\ov x,\ov
y,z)\right\}
\label{eq2:weakzk12}
\end{equation}
Naturally, we prove \eqref{eq2:weakzk12} by showing that the
soundness of \BZK\ holds in the simulation by $\ha$. As we have
discussed in Section~\ref{sec:boaz}, the soundness of
\BZK\ holds as long as the string $r$ sent by the verifier is at
least $n$ bits longer than the allowed length of $y$. This is of
course true (by the definition of $y$). However, we must still
show that the soundness is not affected when \BZK\ is run
concurrently with $\Pi'$ and \RK. The key point here is that $\ha$
never rewinds $\ta$. Therefore, it is possible to use $\ta$ in
order to construct a cheating prover $P^*$.
Formally, let $\ov x$ and $\ov y$ be the respective input vectors
of $P_1$ and $P_2$. Then, we construct a cheating prover $P^*$ who
interacts with an honest verifier $V$ in a {\em single}\/
execution of Protocol \BZK. The probability that $P^*$
successfully proves a false statement will be polynomially related
to the probability that $\ha$ accepts the proof of a false
statement from $\ta$ in an execution of $\hpi$ (when $\ov x$ and
$\ov y$ are the parties' input vectors). $P^*$ internally
incorporates $\ta$ and works as follows. $P^*$ uniformly chooses
one of the $\zeta p(n)$ \BZK\ protocol executions in the $p(n)$
executions of $\Pi$ and internally emulates the entire simulation
of $\ha$ for $\ta$, except with respect to the selected
\BZK\ execution. We stress that $P^*$ emulates the memberZK-hybrid
simulation by $\ha$ for $\ta$, and does not emulate a real
execution of $\Pi$ or an execution of $\tpi$. For now, assume that
this emulation can be perfectly carried out by $P^*$ (we will show
how this is done later).%
\footnote{The problem that arises here is that the zero-knowledge
proof of membership functionality cannot be efficiently computed
for hard languages $L$. Note that any proofs provided by $P_1$ are
fine, because $P_1$ is honest and so only sends proofs of correct
statements $v\in L$. However, when $\ta$ sends a value $v$ to the
trusted party, $\ha$ cannot know whether or not $v\in L$.} %
Now, when $\ta$ reaches the selected \BZK\ execution, $P^*$ sends
the statement being proved in this execution to $V$ and externally
forwards the messages of this execution between $\ta$ and $V$. We
stress that all other messages that are sent concurrently to this
\BZK\ execution are internally simulated by $P^*$. Since $\ha$ plays
the honest verifier in its emulation, the view of $\ta$ in the
emulation by $\ha$ is identical to its view in this emulation by
$P^*$. Noting that there are $\zeta p(n)$ proofs of \BZK\ supplied
by $\ta$, we conclude that the probability that $P^*$ successfully
proves a false statement to $V$ equals $1/\zeta p(n)$ times the
probability that $\ha$ accepts the proof of a false statement from
$\ta$ in the emulation. By the (stand-alone) soundness of \BZK,
this probability must therefore be negligible.
\eqref{eq:weakzk-real} follows.
The above analysis assumes that the emulation can be carried out
perfectly by $P^*$. We now show that this is the case. First, the
emulation of parties $P_1$ and $\ha$ can be carried out because
$P^*$ knows the inputs $\ov x$ and $\ov y$. However, $P^*$ cannot
emulate the trusted party for the zero-knowledge proof of
membership functionality. This is because the functionality cannot
be efficiently computed. We solve this problem as follows. If
$\ta$ successfully proves a statement $v$, then in the emulation,
$P^*$ assumes that $v$ is correct (and emulates the trusted party
sending $(v,1)$ to $P_1$). If $v$ is a correct statement, then the
emulation is perfect. If $v$ is incorrect, then the emulation may
not be correct. However, up until the conclusion of this proof
(where the emulation is ``messed up''), the emulation was perfect.
Therefore, with probability $1/\zeta p(n)$, this statement will be
the one that $V$ is verifying, and the contradiction will be
derived.
\paragraph{\sc Step 3 -- conclusion.} The proof is concluded by
showing that the distribution generated by $\ha$ in $\hpi$ is
indistinguishable from the distribution generated by $\A'$ in
$\Pi'$. This is identical to the proof of \eqref{eq:zkhybrid-weak}
for the case that $P_1$ is corrupted. (Notice that in $\hpi$ all
subproofs are proven using the ideal zero-knowledge proof of
membership functionality. Therefore, the proof is the same if it
is $P_1$ or $P_2$ that is corrupted.) Thus the proof of the case
that $P_2$ is corrupted follows by combining
Equations~(\ref{eq2:weakzk-real}),~(\ref{eq2:weakzk12}) and an
analog of \eqref{eq:zkhybrid-weak}.
\medskip\ni This concludes the proof of Theorem~\ref{thm:compose}.
\EPF
\paragraph{Remark:} We call attention to the fact that in the
proof of Theorem~\ref{thm:compose}, the ZK-hybrid model adversary
$\A'$ either verifies \RK\ and simulates \BZK\ (when $P_1$ is
corrupt), or simulates \RK\ and verifiers \BZK\ (when $P_2$ is
corrupt). That is, there is never a time that $\A'$ has to
simultaneously verify and simulate the same zero-knowledge
protocol. This is a crucial point in our proof and is what enables
us to ensure the soundness of the subproofs during the ZK-hybrid
simulation.
\subsection{Generalizing the Composition Theorem}
Theorem~\ref{thm:compose} is stated so that the {\em same}\/
ZK-hybrid model protocol $\Pi'$ is executed concurrently. However,
there is really no need to consider the same protocol. Rather, the
theorem can be stated with respect to possibly different protocols
for the ZK-hybrid model $\Pi'_1,\Pi'_2,\ldots$ The composition
theorem will still hold as long as all the protocols replace the
ideal zero-knowledge calls in the same way (as described in the
proof of Theorem~\ref{thm:compose}). Thus, we really demonstrate
the feasibility of obtaining $m$-bounded concurrent composition in
the real model of {\em arbitrary}\/ protocols that are all
designed in the ZK-hybrid model.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Obtaining Bounded Concurrent Two-Party Computation}
\label{sec:final-prot}
In this section, we show that by Theorem~\ref{thm:compose}, it
suffices to provide protocols that are secure in the ZK-hybrid
model. We then show that such protocols exist. First, however, we
formally define {\em security}\/ in the ZK-hybrid model.
\paragraph{Secure computation in the ZK-hybrid model -- definition.}
Let $\Pi'$ be a two-party protocol that is designed in the
ZK-hybrid model. That is, $\Pi'$ contains regular interaction
between the parties as well as ideal calls to the zero-knowledge
proof of knowledge functionality
$((v,w),\lambda)\mapsto(\lambda,(v,R(v,w))$. Security in the
ZK-hybrid model is defined in the natural way. That is,
\BD {\em (security in the ZK-hybrid model):}
Let $m(\cdot)$ be a polynomial, $f$ a functionality and $\Pi'$ a
protocol for the {\rm ZK-hybrid} model. Then, we say that\, $\Pi'$
{\sf securely computes $f$ in the ZK-hybrid model under
$m$-bounded concurrent composition} if for every non-uniform
probabilistic polynomial-time {\rm ZK-hybrid} adversary $\A'$
running\, $\Pi'$ there exists a non-uniform probabilistic
polynomial-time ideal-model adversary $\S$ such that for every
polynomial $p(n)$,
\begin{equation}
\left\{\vphantom{0^{0^0}}\ideal_{f,\S}(\ov x,\ov
y,z)\right\}_{n\in\N;\ov x,\ov y \in(\bitset^n)^{p(n)};
z\in\bitset^*} \indist
\left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\}_{n\in\N;\ov x,\ov y \in(\bitset^n)^{p(n)};
z\in\bitset^*}
\label{eq:def-zk-hybrid}
\end{equation}
\label{def:zk-hybrid}
\ED
\paragraph{Security in the ZK-hybrid model suffices.}
An important corollary of Theorem~\ref{thm:compose} is the fact
that in order to obtain $m$-bounded concurrent secure two-party
computation in the real model, it suffices to obtain $m$-bounded
concurrent secure two-party computation in the ZK-hybrid model.
\BCR {\em (bounded-concurrent secure two-party computation):}
Assume that there exists a two-party protocol\, $\Pi'$ that
securely computes a functionality $f$ in the\, {\em ZK-hybrid
model} under $m$-bounded concurrent composition. Then, assuming
the existence of enhanced trapdoor permutations and collision
resistant hash functions, there exists a two-party protocol\,
$\Pi$ that securely computes $f$ in the {\em real model} under
$m$-bounded concurrent composition.
\label{cor:compose}
\ECR
\BPF
The protocol $\Pi$ in the corollary statement is the one that is
guaranteed to exist by the transformation of $\Pi'$ that is stated
in Theorem~\ref{thm:compose}. The proof that the real-model
protocol $\Pi$ securely computes $f$ works by showing the
existence of an ideal-model adversary $\S$ for every real-model
adversary $\A$. Let $\A$ be a probabilistic polynomial-time
real-model adversary for $\Pi$. Then, by
Theorem~\ref{thm:compose}, there exists a probabilistic
polynomial-time adversary $\A'$ for $\Pi'$ in the ZK-hybrid model
such that
\begin{equation}
\left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\} \indist \left\{\vphantom{0^{0^0}}\real^m_{\Pi,\A}(\ov
x,\ov y,z)\right\}
\label{eq:prop1}
\end{equation}
Next, by the assumption that $\Pi'$ securely computes $f$ in the
ZK-hybrid model, we have that for every probabilistic
polynomial-time adversary $\A'$ there exists a probabilistic
polynomial-time adversary $\S$ for the ideal model such that
\begin{equation}
\left\{\vphantom{0^{0^0}}\ideal_{f,\S}(\ov x,\ov y,z)\right\}
\indist \left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\}
\label{eq:prop2}
\end{equation}
By combining Equations~(\ref{eq:prop1}) and~(\ref{eq:prop2}) we
obtain that the real-model protocol $\Pi$ securely computes $f$
under $m$-bounded concurrent composition.
\EPF
\paragraph{Bounded concurrent secure computation in the plain model.}
By Corollary~\ref{cor:compose}, all that remains is for us to
demonstrate the existence of a protocol that securely computes any
two-party functionality in the ZK-hybrid model under $m$-bounded
concurrent composition. However, such a protocol is already known
to exist. In particular, assuming the existence of enhanced
trapdoor permutations, it has been shown that any two-party
functionality can be securely computed in the ZK-hybrid model
under the definition of universal composability~\cite{CLOS}.%
\footnote{Technically, the results of~\cite{CLOS} do not hold for
{\em any}\/ efficient functionality, but for a subset that they
call ``well-formed''. However, in the case of static adversaries,
the only issue is that the functionality should behave
independently of the set of corrupted parties (in the setting of
universal composability, this information on who is corrupted is
provided to the functionality). In our model, the trusted party is
anyway not given this information and therefore there is no
limitation on the functionalities that can be computed.} %
Since universal composability implies $m$-bounded concurrent
composition, we obtain the following theorem:
\BT {\em (Theorem~\ref{thm:informal-upper} -- restated):}
Assume that enhanced trapdoor permutations and collision resistant
hash functions exist. Then, for any probabilistic polynomial-time
two-party functionality $f$ and for any polynomial $m(\cdot)$,
there exists a protocol\, $\Pi$ that securely computes $f$ under
$m$-bounded concurrent composition.
\label{thm:main}
\ET
\paragraph{Round complexity.} The number of rounds in our protocol
for $m$-bounded concurrent secure two-party computation (in the
real model) is in the order of $m\kappa(n)$ times the number of
rounds in the protocol of~\cite{CLOS}. We remark that the
construction of~\cite{CLOS} for the case of static adversaries can
be made constant-round by using the semi-honest protocol of
Yao~\cite{Y}. Therefore, the round complexity of our protocol is
$O(m\kappa(n))$.
%\section*{Future Work}
%
%This work leaves open a number of interesting questions. First,
%our protocol for the $m$-bounded concurrent model uses both
%black-box and non black-box techniques, whereas the lower bound
%relates only to black-box simulation. So, can a black-box secure
%protocol be achieved with $O(m)$ or more rounds (i.e., without
%using non black-box techniques)? Conversely, can a non black-box
%protocol be achieved for $m$-bounded concurrent composition that
%has less than $m$ rounds? Taking a step beyond $m$-bounded
%concurrency, is it possible to achieve {\em unbounded}\/
%concurrent secure two-party computation? Another step beyond this
%model relates to a more general setting where many pairs of
%parties run a protocol and the adversary can corrupt any subset of
%them. Very little is known for this setting in the plain model.
%Finally, we note that we do not know how to extend our techniques
%for obtaining bounded concurrent secure {\em two-party}\/
%computation to the case of {\em multi-party}\/ computation for an
%honest minority. This extension would also be of interest.
\appendix
%\newpage
\section{Appendix: A Formal Proof of Equation~(\protect\ref{eq:zkhybrid-weak})
in Section~\protect\ref{sec:composition-theorem}}
\label{apdx}
The proof of Theorem~\ref{thm:compose} relies on
\eqref{eq:zkhybrid-weak}, both for the case that $P_1$ is
corrupted and for the case that $P_2$ is corrupted. However, our
proof that \eqref{eq:zkhybrid-weak} holds relies on
Lemma~\ref{lem:subproofs} which is informally stated. A formal
statement of this lemma and its proof can be derived from the
proofs of~\cite{BL}. However, the lemma is actually not proved
there. In this appendix, we provide a formal proof of
\eqref{eq:zkhybrid-weak}. This mainly involves providing a proof
that Protocol~\ref{prot:zkpok} from~\cite{BL} is a system of
zero-knowledge arguments of knowledge in the memberZK-hybrid
model. Actually, we prove something stronger. That is, loosely
speaking, we prove that Protocol~\ref{prot:zkpok} securely
realizes the ideal zero-knowledge proof of knowledge functionality
in the memberZK-hybrid model, even when it is used as a
subprotocol in some larger protocol $\Pi$ that runs in the setting
of $m$-bounded concurrency.
In our presentation here, we recap on some of the basic concepts
used. However, we assume that the reader is familiar with
Sections~\ref{sec:zkpok} and~\ref{sec:compose} (and so much of the
motivation and high-level ideas are not repeated). We begin by
recalling the memberZK and ZK-hybrid models, as defined in
Section~\ref{sec:compose}.
\paragraph{The memberZK-hybrid model.}
Recall that in a hybrid model, the parties interact with each
other as in the real model. However, in addition to this
interaction, they have access to a trusted party who computes some
functionality for them. In the memberZK-hybrid model, this
functionality is defined as follows. Let $L$ be an NP-language.
Then, the trusted party receives a statement $v$ from the proving
party and sends $(v,\chi_L(v))$ to the verifying party, where
$\chi_L(v)=1$ if and only if $v\in L$. That is, the functionality
computed by the trusted party is:
$$
(v,\lambda) \mapsto (\lambda,(v,\chi_L(v)))
$$
Note that if $L\notin\BPP$, this functionality is not efficiently
computable. Nevertheless, it suffices for our needs here. We also
recall that if the statement received by the trusted party is of
the form $v\circ\bot$, then it always sends $(v,0)$ to the
verifying party, irrespective of whether or not $v\in L$. This is
needed in order to model the situation that a (cheating) prover
may cause the verifier to reject a proof, even if the statement is
true.
\paragraph{The ZK-hybrid model.}
The ZK-hybrid model is similar to the memberZK-hybrid model,
except that the functionality is a {\em proof of knowledge}, and
not just a proof of membership. This also means that the
functionality can be efficiently computed. Let $R$ be an
NP-relation. Then, the ideal zero-knowledge proof of knowledge
functionality is defined as follows:
$$
((v,w),\lambda) \mapsto (\lambda,(v,R(v,w)))
$$
Recall that we define a special symbol $\bot$ such that for every
relation $R$, it holds that $R(v,\bot)=0$. Again, this is used to
model the case that a verifier rejects a true statement.
\paragraph{The protocol.}
We now rewrite Protocol~\ref{prot:zkpok} so that it is cast in the
memberZK-hybrid model. We also combine the commit-with-extract
part directly into the protocol, with a slight modification.
Specifically, we have the prover choose the trapdoor permutation
after it receives the commitment from the verifier (unlike the
protocol description in Section~\ref{sec:zkpok} where the trapdoor
permutation is chosen before the ``coin-tossing'' phase). This
simplifies the proof to some extent, but is not necessary.
%\newpage
\BPR \label{prot:fullzkpok} {\em (zero-knowledge argument of knowledge for
$R \in \NP$ in the memberZK-hybrid model):}
\BI
\item {\bf Common Input:} $v$
\item {\bf Auxiliary input to prover:} $w$ such that $(v,w) \in
R$. For simplicity, assume that $|v|=|w|=n$.
\item {\bf Part 1 -- Commit-with-Extract:}
The prover commits to its auxiliary-input witness $w$.
\BE
\item {\sf The parties run a coin-tossing protocol:}
\BE
\item
The verifier chooses a random string $r_1 \rnd \bitset^n$ and
sends $c = {\sf Commit}(r_1)$ to the prover \(using any
perfectly-binding commitment scheme $\sf Commit$\).
\item
{\sf The prover chooses an enhanced trapdoor permutation:} The
prover chooses an enhanced trapdoor permutation $f$
along with its trapdoor $t$ and sends $f$ to the verifier.%
\footnote{For the sake of simplicity, we assume that the protocol
uses {\em certified}\/ \cite{FLS} enhanced trapdoor permutations
(for which the verifier can efficiently verify that $f$ is indeed
a permutation). However, actually any enhanced trapdoor
permutation can be used by having the prover prove that $f$ is a
permutation; see~\cite{BL}. (Of course, here this proof is carried
out using the zero-knowledge proof of membership functionality.)
\label{foot:cert}} %
The verifier checks that $f$ is a permutation, and aborts if not.
\item
The prover chooses a random string $r_2 \rnd \bitset^n$ and sends
$r_2$ to the verifier.
\item
The verifier sends $r_1$ to the prover {\em (}without
decommitting{\em )}.
\item
The verifier proves that $r_1$ is the value that it indeed
committed to. That is, the verifier sends $(c,r_1)$ to the trusted
party computing the zero-knowledge proof of membership
functionality for the language $\{(c,r)\ |\ c = {\sf
Commit}(r)\}$.
\item
The prover receives $((c,r_1),\sigma)$ from the trusted party and
aborts unless $\sigma=1$.
\item
The output of the coin-tossing phase is $r=r_1 \oplus r_2$.
\EE
\item {\sf The actual commitment:}
Let $b$ be a hard-core bit of $f$. Then, the prover computes the
sequence $\tilde r = b(f^{-n}(r))b(f^{-n+1}(r))\cdots
b(f^{-1}(r))$.
The prover sends the value $\tilde c = \tilde r \oplus w$ to the
verifier.
\label{step:actual-commit}
\EE
\item {\bf Part~2 -- Proof of Correctness: }
The prover proves to the verifier that it committed to a valid
witness $w$ for $v$ in the previous step:
\BE
\item Define the language
$$
L = \left\{(v,\tilde c,f,r)\ \left|\vphantom{f^N}\right. \
R(v,\tilde c \oplus \tilde r) = 1 {\rm ~where~} \tilde r =
b(f^{-n}(r))b(f^{-n+1}(r))\cdots
b(f^{-1}(r))\vphantom{f^N}\right\}
$$
Then, the prover sends $(v,\tilde c,f,r)$ to the trusted party
computing the zero-knowledge proof of membership functionality.
\item The verifier accepts if and only if it receives $((v,\tilde
c,f,r),1)$ from the trusted party, where $v$ is its common input,
and $(\tilde c,f,r)$ are as generated in part~1.
\EE
\EI
\EPR
\paragraph{Non-abusing adversaries.}
For the sake of clarity, we use the notion of non-abusing
adversaries, introduced by~\cite{PR}. A non-abusing adversary
works in the memberZK-hybrid model and is such that it sends a
false statement $v\notin L$ to the trusted party that is not of
the form $v'\circ\bot$ with at most negligible probability. That
is, such an adversary attempts to ``lie'' with at most negligible
probability. Note that when non-abusing adversaries are
considered, the functionality can be efficiently computed.
Specifically, there is no need to check whether or not $v\in L$;
all that is necessary is to check whether or not the statement is
of the form $v\circ\bot$. (We assume that honest provers also only
send correct statements.)
\medskip\ni We are now ready to present the formal analog of
Lemma~\ref{lem:subproofs} that will then be used to derive
\eqref{eq:zkhybrid-weak}.
\BL
Let\, $\Pi'$ be a two-party protocol that is designed in the {\rm
ZK-hybrid model} and let $m(\cdot)$ be a polynomial. Let\, $\Pi$
be a two-party protocol that is obtained from\, $\Pi'$ by
replacing the ideal zero-knowledge calls with
Protocol~\ref{prot:fullzkpok} that is designed in the {\rm
memberZK-hybrid model.} Then, assuming the existence of enhanced
trapdoor permutations, for every probabilistic polynomial-time
{\sf non-abusing adversary} $\A$ for\, $\Pi$ there exists a
probabilistic polynomial-time adversary $\A'$ for\, $\Pi'$ such
that
\begin{equation}
\left\{\vphantom{0^{0^0}}\zkhybrid^m_{\Pi',\A'}(\ov x,\ov
y,z)\right\} \indist
\left\{\vphantom{0^{0^0}}\memberzk^m_{\Pi,\A}(\ov x,\ov
y,z)\right\}
\label{eq:weakfull}
\end{equation}
where the ensembles in Eq.~$(\ref{eq:weakfull})$ are over
$n\in\N$, $\ov x,\ov y \in(\bitset^n)^{p(n)}$ and $z\in\bitset^*$.
\label{lem:fullzkpok}
\EL
\BPF
Much of the technical content in the proof of this lemma is the
same as in~\cite{BL}. The main differences are due to the model
and definitions used. First, Lemma~\ref{lem:fullzkpok} is stated
in a setting of composition, whereas~\cite{BL} relates to the
stand-alone setting only. Second, here we construct
Protocol~\ref{prot:fullzkpok} in the memberZK-hybrid model and
show that it securely realizes the ideal zero-knowledge proof of
knowledge functionality. In contrast, the protocol in~\cite{BL} is
constructed with zero-knowledge subproofs of membership, where
these subproofs are assumed to fulfill the standard definition of
zero-knowledge. Likewise, they prove that
Protocol~\ref{prot:fullzkpok} is a zero-knowledge proof of
knowledge, again by the standard definition and not in terms of an
ideal functionality. Finally, \cite{BL} consider any efficient
adversary, whereas here we limit ourselves to {\em non-abusing}\/
adversaries (which suffices for our purpose). Another
presentational difference is that they separately prove that
part~1 is a commit-with-extract scheme and then show that this
suffices for obtaining a zero-knowledge proof of knowledge. In
contrast, we prove the entire protocol as one unit. For
motivational discussion on the protocol and its proof of security,
see~\cite{BL}; we provide a technical proof here only.
In our proof below, we consider the case that $P_1$ is corrupted.
The case that $P_2$ is corrupted is exactly the same. Note that
the adversary $\A$ that controls $P_1$ in $\Pi$ sends three types
of messages: real messages belonging to $\Pi'$, real messages
belonging to Protocol~\ref{prot:fullzkpok} and ideal messages for
the trusted party computing the zero-knowledge proof of membership
functionality.
We now describe the adversary $\A'$ for $\Pi'$. $\A'$ works in the
ZK-hybrid model and therefore interacts with a trusted party who
computes the zero-knowledge proof of knowledge functionality. That
is, $\A'$ sends two types of messages: real messages belonging to
$\Pi'$ and ideal messages for its trusted party. $\A'$ internally
incorporates $\A$ and forwards all of the $\Pi'$-messages between
$\A$ and the external party $P_2$. In contrast, the messages of
Protocol~\ref{prot:fullzkpok} and the ideal messages that $\A$
sends to the trusted party for memberZK are internally dealt with
by $\A'$. Informally speaking, in the case that $\A$ is proving a
statement $v$, adversary $\A'$ must extract a witness $w$ in order
to send $(v,w)$ to its trusted party (who computes the
zero-knowledge proof of knowledge functionality). On the other
hand, in the case that $\A$ is verifying a statement $v$,
adversary $\A'$ must simulate an execution of
Protocol~\ref{prot:fullzkpok} for $\A$. At the conclusion of the
simulation of all $\Pi'$-messages and executions of
Protocol~\ref{prot:fullzkpok}, adversary $\A'$ outputs whatever
$\A$ does.
As we have mentioned, all $\Pi'$-messages are forwarded by $\A'$
between $\A$ and $P_2$. In contrast, the messages of
Protocol~\ref{prot:fullzkpok} are dealt with internally. We
describe the simulation (and present the analysis) separately for
the case that $\A$ is the prover and the case that $\A$ is the
verifier. In the simulation described below, there is no rewinding
or dependence on $\Pi'$-messages that are sent. Therefore, the
simulation of Protocol~\ref{prot:fullzkpok} is not affected by the
$\Pi'$-messages that $\A'$ forwards between $\A$ and the external
$P_2$. We now describe the internal simulation of
Protocol~\ref{prot:fullzkpok}.
\paragraph{Simulating Protocol~\ref{prot:fullzkpok} when $\A$ is the prover.}
In this case, $\A$ provides a proof for a statement $v$ using
Protocol~\ref{prot:fullzkpok}. Since $\A'$ works in the ZK-hybrid
model it must send a pair $(v,w)$ to its trusted party, such that
if $P_2$ would have accepted the proof from $\A$ in $\Pi$ then
$R(v,w)=1$, but if $P_2$ would have rejected the proof then
$R(v,w)=0$. (Recall that for every $R$ it holds that $R(v,\bot)=0$
and so $\A'$ can send $(v,\bot)$ to the trusted party in the case
that $P_2$ would have rejected.) We now demonstrate how this is
done.
Informally speaking, $\A'$ works by biasing the outcome of $r =
r_1 \oplus r_2$ of the coin-tossing protocol, so that it knows
$f^{-n}(r)$. In this case, it will be able compute $\tilde r$ and
then derive $w=\tilde c \oplus \tilde r$. Note that $\A'$ must
generate a view for $\A$ that is indistinguishable from its view
in a real execution, in addition to extracting $w$. Formally,
$\A'$ works as follows:
\BE
\item
{\em $\A'$ biases the outcome of the coin-tossing protocol:}
\BE
\item
$\A'$ passes $c = {\sf Commit}(0^n)$ to $\A$ (this is a commitment
to ``garbage'').
\item
{\em $\A$ chooses a trapdoor permutation:} $\A'$ receives the
description of a permutation $f$ from $\A$. Adversary $\A'$ checks
that $f$ is indeed a permutation. If not, $\A'$ simulates $P_2$
aborting and sends $(v,\bot)$ to the trusted party (causing the
real, external $P_2$ to abort).
\item
$\A'$ obtains a string $r_2$ from $\A$.
\item
$\A'$ chooses $s\rnd\bitset^n$, computes $r=f^n(s)$ and passes
$\A$ the string $r_1 = r \oplus r_2$. (Notice that $r_1$ is chosen
irrespective of the initial commitment $c$, and that $f^{-n}(r_1
\oplus r_2) = f^{-n}(r)=s$.)
\item
$\A'$ passes $\A$ the pair $((c,r_1),1)$ as if it was sent by the
trusted party for the zero-knowledge proof of membership
functionality.
\EE
\item
{\em $\A$ sends the actual commitment:}
$\A'$ receives a string $\tilde c$ from $\A$.
\item {\em Witness extraction:}
$\A'$ computes $\tilde r$ and derives $w=\tilde r \oplus \tilde
c$. (Note that $\A'$ knows $f^{-n}(r)$ and so can compute $\tilde
r$.)
\item
{\em $\A$ ``proves'' that it committed to a valid witness:}
\BE
\item
$\A'$ obtains the ideal message that $\A$ intends to send to its
trusted party.
\item
If the ideal messages was $(v,\tilde c,f,r)$, where $v$ is the
common input and $\tilde c$, $f$ and $r$ are as were generated in
the above simulation of part~1 of the protocol, then $\A'$ sends
its trusted party the pair $(v,w)$. Otherwise, if the values in
the message are incorrect or the message is concatenated with
$\bot$, then $\A'$ sends its trusted party the pair $(v,\bot)$.
\EE
\EE
\ni First, note that the view of $\A$ in the simulation by $\A'$
is computationally indistinguishable from its view in a real
execution. In fact, the only difference between the views is that
in the simulation, $\A'$ sends $r_1$ that is {\em not}\/ the value
committed to in $c$, whereas the honest $P_2$ would send $r_1$
such that $c={\sf Commit}(r_1)$. The indistinguishability
therefore follows directly from the hiding property of
commitments. In order to show this formally, the entire concurrent
execution (including the $\Pi'$ executions and all the executions
of Protocol~\ref{prot:fullzkpok}) must be simulated. However, the
machine that distinguishes commitments to~$0^n$ from commitments
to $r_1$ may be given the inputs to all of these $\Pi'$
executions. Therefore, the simulation of $\Pi'$ is
straightforward. Regarding the executions of
Protocol~\ref{prot:fullzkpok}, a standard hybrid argument is used.
Specifically, let $N$ be the total number of executions of
Protocol~\ref{prot:fullzkpok}. Then, define the $i^\th$ hybrid to
be such that the first $i$ executions of
Protocol~\ref{prot:fullzkpok} are simulated by $\A'$ following the
above instructions, and the last $N\!-\!i$ executions are run
following the honest $P_2$'s instructions. The
indistinguishability of the $i^\th$ and $i\!+\!1^\th$ hybrids then
follows from the hiding property of a single commitment. Details
are omitted.
Next, notice that the view of $\A$ defines a {\em unique}\/ value
$w$ that is ``committed to''. This holds because $f$ is a
permutation. Therefore $f$ and $r$ define a unique $\tilde r$, and
$\tilde c$ and $\tilde r$ define a unique $w$. Furthermore, this
view fully defines $P_2$'s output, both in a real execution of
$\Pi$ and in the simulated execution of $\Pi'$ with $\A'$. In
order to see this, let $V_\A$ be the view of $\A$. Then, we
separately consider two cases:
\BE
\item {\em Case 1 -- $\A$'s view $V_\A$ defines a valid witness $w$:}
If in $V_\A$ the message sent by $\A$ to the trusted party in
part~2 of the protocol equals $(v,\tilde c,f,r)$, where $v$ is the
common input and $(\tilde c,f,r)$ are as were generated in part~1,
then $P_2$ accepts the proof (i.e., outputs $(v,1)$) in both $\Pi$
and $\Pi'$. This holds in $\Pi$ because $(v,\tilde c,f,r)\in L$
and so the trusted party for the zero-knowledge proof of
membership functionality sends $((v,\tilde c,f,r),1)$ to $P_2$,
causing it to accept. Likewise, in the simulation by $\A'$ in
$\Pi'$, adversary $\A'$ would send $(v,w)$ to its trusted party,
where $w$ is a valid witness. This trusted party computes the
zero-knowledge proof of knowledge functionality and so sends the
message $(v,R(v,w)) = (v,1)$ to $P_2$, causing it to accept.
In contrast, if in $V_\A$, the message sent by $\A$ to the trusted
party in part~2 does not equal $(v,\tilde c,f,r)$ as above, then
$P_2$ rejects the proof (i.e., outputs $(v,0)$) in both $\Pi$ and
$\Pi'$. If the message sent by $\A$ concludes with $\bot$, then
$P_2$ would reject in $\Pi$ because the trusted party for memberZK
always sends $0$ in this case. Likewise, in this case, $\A'$ sends
$(v,\bot)$ to its trusted party and so in $\Pi'$ party $P_2$ also
rejects. On the other hand, if the message sent by $\A$ just
contains the ``wrong'' values (e.g., the wrong statement or not
consistent with part~1), then again $P_2$ always rejects.
\item {\em Case 2 -- $\A$'s view $V_\A$ defines a value $w'$ such that
$R(v,w')=0$:} In this case, $P_2$ always rejects in both $\Pi$ and
$\Pi'$. This holds in $\Pi$ because the statement sent by $\A$ in
part~2 must either be false (in which case, the trusted party for
the zero-knowledge proof of membership sends $0$ to $P_2$), or it
must contain ``wrong'' values (in which case, $P_2$ always
rejects). Likewise, in $\Pi'$, adversary $\A'$ will either send
$(v,\bot)$ or $(v,w')$ to its trusted party; in both cases $P_2$
receives $(v,0)$ and so rejects.
\EE
\ni We conclude that the view of $\A$ in a real execution of $\Pi$
is computationally indistinguishable from its view in the
simulation by $\A'$ in $\Pi'$. Furthermore, this view fully
defines the output of $P_2$ from Protocol~\ref{prot:fullzkpok}.
Therefore, the joint distribution of $\A$'s view and $P_2$'s
output in $\Pi$ where Protocol~\ref{prot:fullzkpok} is run, is
indistinguishable from the joint distribution generated by $\A'$
in $\Pi'$.%
\footnote{We remark that in the analysis of this case where $\A$
is the prover, we do not need to assume that $\A$ is non-abusing;
this is needed for the case that $\A$ is the verifier. However,
note that if {\em any}\/ trapdoor permutation is used (rather than
just certified permutations -- see Footnote~\ref{foot:cert}), then
the prover must prove that the function $f$ is a permutation. Our
proof would then need to assume that $\A$ is non-abusing, even for
the case that it plays the prover.}
\paragraph{Simulating Protocol~\ref{prot:fullzkpok} when $\A$ is the verifier.}
In this case, $\A'$ receives an ideal message $(v,1)$ from its
trusted party (that computes the zero-knowledge proof of knowledge
functionality) and must generate a simulation of an execution of
Protocol~\ref{prot:fullzkpok} for $\A$. This is achieved as
follows: $\A'$ follows the instructions for the honest prover in
part~1 of the protocol; with the exception that instead of
committing to a valid witness $w$ in
Step~\ref{step:actual-commit}, it commits to $0^n$ instead.
(Recall that $\A'$ cannot commit to $w$ because it doesn't know
the value.) Let $\tilde c$, $f$ and $r$ be the values that result
from the execution in part~1. Then, in order to simulate part~2,
$\A'$ just hands $\A$ the tuple $((v,\tilde c,f,r),1)$ as if it
was sent from the trusted party. This completes the simulation.
Intuitively, the view of $\A$ in this simulation with $\A'$ is
indistinguishable from its view in a real execution of $\Pi$ with
$P_2$. The only difference is whether the tuple $(\tilde c,f,r)$
defines a commitment to a real witness $w$ or to $0^n$. Thus,
indistinguishability follows from the {\em hiding property}\/ of
this commitment. Note that part~2 of the protocol is exactly the
same in the simulation and in a real execution because $\A'$ plays
the role of the trusted party computing the zero-knowledge proof
of membership functionality.
The hiding property of the commitment $\tilde c$ here follows from
the hiding property of the non-interactive commitment scheme of
\cite{Blum}, and the security of the coin-tossing protocol. In
particular, if $r=r_1 \oplus r_2$ is uniform (or pseudorandom),
then distinguishing between a commitment to $w\in\bitset^n$ and a
commitment to $0^n$ is essentially equivalent to distinguishing
between $\{f^n(U_n),(b(U_n)\cdots b(f^{n-1}(U_n))) \oplus w\}$ and
$\{f(U_n),(b(U_n)\cdots b(f^{n-1}(U_n)))\}$. Since $b$ is a
hard-core bit of $f$, it is infeasible to distinguish between
these distributions in polynomial time. The hiding property
therefore follows from the security of the coin-tossing protocol
that ensures that $r_1 \oplus r_2$ is pseudorandom.
We provide a proof for the case that a single execution of
Protocol~\ref{prot:fullzkpok} takes place. The general case
follows from a standard hybrid argument. Specifically, let $N$ be
the number executions of Protocol~\ref{prot:fullzkpok}. Then, the
first $i$ executions are such that the real witness $w$ is
committed to in $\tilde c$, and the last $N\!-\!i$ executions are
such that $0^n$ is committed to. (Note that the simulation of the
$\Pi'$ messages and the other executions of
Protocol~\ref{prot:fullzkpok} can be carried out because the
distinguisher for the commitments may be given the inputs to all
the $\Pi'$ executions. For example, the distinguisher would
therefore know the real witnesses $w$, as required.)
Indistinguishability of the neighbouring hybrids then follows from
the proof of a single execution.
We now prove the case of a single execution. Let $\A$ be a
probabilistic polynomial-time verifier, and denote by
$V_n^{\A}(v,\alpha)$ the distribution over $\A$'s view when it
interacts with a party who when running
Protocol~\ref{prot:fullzkpok} with common input $v$, follows the
honest prover's instructions except that it sends the value
$\tilde c=\tilde r \oplus \mbox{\boldmath{$\alpha$}}$ in
Step~\ref{step:actual-commit}. Note that in a real execution with
the honest prover, $\A$'s view is $V_n^{\A}(v,w)$ where $w$ is a
valid witness for $v$, whereas in the simulation with $\A'$ its
view is $V_n^{\A}(v,0^n)$. (Recall that this is the only
difference between a real execution and the simulation by $\A'$.)
%(the distribution is over the uniform choice of random coins
%for the honest prover).
Then, we prove the following: for any probabilistic
polynomial-time verifier $\A$, it holds that
\begin{equation}
\left\{V_n^{\A}(v,w)\right\}_{v\in\bitset^n,w\in R(v)} \indist
\left\{V_n^{\A}(v,0^n)\right\}_{v\in\bitset^n}
\label{eq:views}
\end{equation}
where $R(v)$ denotes the witness set of $v$ (i.e., $R(v)=\{w \ |\
(v,w)\in R\}$. (Note that this is actually an abuse of notation
because $v$ is dependent on the inputs to $\Pi'$.)
As we have mentioned, the hiding property is derived from the
hiding property of the non-interactive commitment scheme defined
by
\begin{equation}
C_n(\alpha) \eqdef \langle f^n(U_n),\ (b(U_n)\cdots
b(f^{n-1}(U_n))) \oplus\alpha \rangle
\label{eq:C}
\end{equation}
Loosely speaking, the only difference between the
commit-with-extract part of Protocol~\ref{prot:fullzkpok} and the
commitment scheme $C_n$, is that in Protocol~\ref{prot:fullzkpok}
the value $f^n(U_n)$ is chosen jointly by both parties (and is not
determined solely by the sender). We therefore reduce
\eqref{eq:views} to the hiding property of $C_n$. Assume by
contradiction, that there exists a probabilistic polynomial-time
receiver $\A$, a polynomial-time distinguisher $D$ and a
polynomial $p(\cdot)$ such that for infinitely many $v$'s
\begin{equation}
\adv_v^{D} \eqdef \left|\prob[D(V_n^{\A}(v,w))=1] -
\prob[D(V_n^{\A}(v,0^n))=1]\right| \geq \frac{1}{p(|v|)}
\label{eq:dist-C}
\end{equation}
This implies that for infinitely many $n$'s, there exists a $v$
($|v|=n$) such that $\adv_v^D\geq 1/p(n)$. For simplicity, we will
consider a single $v$ for every such $n$. We will therefore write
$\adv_n^D$ from now on, instead of $\adv_v^D$. We will now use $D$
and $\A$ to construct a distinguisher $D'$ that contradicts the
security of the commitment scheme $C_n$ defined in \eqref{eq:C}.
Intuitively, $D'$ receives a commitment
$$
C_n(\alpha) = \langle f^n(s),\ (b(s)\cdots
b(f^{n-1}(s)))\oplus\alpha \rangle
$$
for input (where $s\rnd\bitset^n$) and works by invoking $\A$ and
obtaining an execution of Protocol~\ref{prot:fullzkpok} in which
$r_1 \oplus r_2 = f^n(s)$. Then, since $D$ has an advantage in
distinguishing $\alpha=w$ from $\alpha=0^n$ given $f,r_1,r_2$ and
$\tilde c=(b(s)\cdots b(f^{n-1}(s)))\oplus\alpha$, this translates
to $D'$ having an advantage in distinguishing $\alpha=w$ from
$\alpha=0^n$ given $C_n(\alpha)$. We first informally describe how
$D'$ obtains an execution with $\A$ where $r_1\oplus r_2=f^n(s)$.
This is achieved using the following strategy. $D'$ invokes $\A$
and obtains a commitment $c={\sf Commit}(r_1)$ from $\A$. $D'$
then attempts to learn $r_1$ by running the continuation of the
coin tossing protocol with $\A$. If it succeeds, then it rewinds
$\A$ and sends $r_2 = f^n(s)\oplus r_1$, where $f^n(s)$ is part of
its input commitment $C_n(\alpha)$. The result of the coin-tossing
protocol is thus $f^n(s)$, as desired. We now proceed with the
formal proof.
Before defining $D'$, we prove the following fact:
\BF
\label{fact:abort}
Let $\e_n^{\A}$ denote the probability that $\A$ does {\em not}\/
abort during a real execution with the honest prover. Then,
$\e_n^{\A} \geq \adv_n^D$.
\EF
\BPF
The only place in which the honest prover uses the witness $w$ is
in Step~\ref{step:actual-commit} of Part~1 of
Protocol~\ref{prot:fullzkpok}. Note that if $\A$ aborts, it must
be before this step (because after this step $\A$ only receives
messages). Therefore, in any execution where $\A$ aborts, the view
of $\A$ is {\em independent}\/ of the value $w$ that is committed
to in Step~\ref{step:actual-commit}. Therefore, the advantage of
$D$ in executions where $\A$ aborts is exactly zero. Thus,
$\adv_n^D \leq \e_n^{\A}$, and the fact follows.
\EPF
\ni By the assumption in Eq.~(\ref{eq:dist-C}), for infinitely
many $n$'s, $\adv_n^D\geq 1/p(n)$. Therefore, it also holds that
for all of these $n$'s, $\e_n^{\A} \geq 1/p(n)$. We are now ready
to describe $D'$.
Distinguisher $D'$ receives for input a one-way permutation $f$
and a commitment $\hat{c}=C_n(\alpha)=\langle f^n(s),\ \tilde
c=(b(s)\cdots b(f^{n-1}(s))\oplus\alpha \rangle$, where
$s\rnd\bitset^n$. Then, $D'$ simulates an execution for $\A$ as
follows. All of the $\Pi'$ messages are perfectly simulated using
the inputs that are known to $D'$. In addition, the (single)
execution of Protocol~\ref{prot:fullzkpok} is simulated according
to the following instructions:
\BE
\item
{\em First simulation of the coin-tossing protocol:}
\BE
\item \label{step:commit}
$D'$ receives a commitment $c = {\sf Commit}(r_1)$ from $\A$.
\item
$D'$ follows the instructions for the honest prover and uniformly
chooses an enhanced trapdoor permutation $g$ and a random string
$r_2\rnd\bitset^n$. $D'$ passes $g$ and $r_2$ to $\A$.
\item \label{step:getr}
$D'$ obtains some string $r_1$ from $\A$, and a message $z$ that
$\A$ intended to send to the trusted party.
\item
If $z=(c,r_1)$, then $D'$ continues. Otherwise if
$z=(c,r_1)\circ\bot$ or it doesn't contain the correct values $c$
and $r_1$, then $D'$ outputs {\sf fail} and halts.
\EE
\item
{\em Second simulation of the coin-tossing protocol:}
\BE
\item
$D'$ rewinds $\A$ to the beginning and once again receives the
same commitment $c = {\sf Commit}(r_1)$ from $\A$.
\item \label{step:cont}
$D'$ sends $\A$ the permutation $f$ and the string $r_2 =
f^n(s)\oplus r_1$, where $r_1$ is the value obtained in
Step~\ref{step:getr} and $(f,f^n(s))$ are from its input.
\item
$D'$ obtains some string $r_1'$ from $\A$, and a message $z'$ that
$\A$ intended to send to the trusted party. There are three
possibilities at this point: \label{step:verify2}
\BE
\item
If $z'$ is not equal to $(c,r_1')$ (either because it has
different values or because it equals $(c,r_1')\circ\bot$), then
$D'$ jumps to Step~\ref{step:output}. (This is interpreted as an
abort from $\A$, as in a real execution.)
\item \label{step:r1check}
If $z'=(c,r_1')$ but $r_1'\neq r_1$, then $D'$ halts and outputs
{\sf bad}.
\item
If $z'=(c,r_1')$ and $r_1'=r_1$, then $D'$ proceeds to
Step~\ref{step:actual} below.
\EE
\EE
\item \label{step:actual}
{\em Simulation of the actual commitment:} $D'$ passes the string
$\tilde c$ (from its input commitment $\hat c$) to~$\A$.
\item {\em Output:} \label{step:output}
At the end of the simulation, $D'$ passes the view output by $\A$
to $D$, and outputs whatever $D$ does. (Without loss of
generality, we assume that $\A$ always outputs its view.)
\EE
\ni Informally speaking, if $D'$ does not output {\sf fail} or
{\sf bad}, then the view generated for $\A$ is identical to its
view in a real execution. Furthermore, this view is such that
$r_1\oplus r_2=f^n(s)$, as desired. Of course, this is only
helpful if $D'$'s probability of outputting {\sf fail} or {\sf
bad} is small. Now, $D'$ can output {\sf bad} if $r_1'\neq r_1$,
as in Step~\ref{step:r1check}. However, intuitively this cannot
happen with non-negligible probability, due to the binding
property of commitments and the fact that $\A$ is non-abusing.%
\footnote{In this proof, it is essential that $\A$ is non-abusing.} %
In addition to the above, $D'$ can output {\sf fail} if it does
not receive $z=(c,r_1)$ from $\A$ in Step~\ref{step:getr}. Loosely
speaking, the main observation here is that by combining
Fact~\ref{fact:abort} with Eq.~(\ref{eq:dist-C}), we have that
$\A$'s non-abort probability is at least $1/p(n)$. Therefore, with
probability at least $1/p(n)$ it must be that $\A$ convinces $D'$
in Step~\ref{step:getr} (otherwise, $\A$ has aborted).
%We begin by modifying $D'$ to a machine $D''$ who works in exactly
%the same way as $D'$ except that it never outputs {\sf bad}.
%Rather, if it receives $z'=(c,r_1')$ but $r_1'\neq r_1$, then it
%completes the commitment . (We assume that
We now formally prove a bound on $D'$'s probability of failing.
\newcommand{\fail}{{\sf fail}_n^{D'}}
\BCM
\label{clm:bound-fail}
Denote the probability that $D'$ outputs fail upon receiving input
drawn from $C_n(\alpha)$ by\/ $\fail(\alpha)$. Then, for every
$\alpha$ and for every\/ $n$ for which\/ $\adv_n^D \geq 1/p(n)$,
it holds that\/ $\fail(\alpha) < 1-1/p(n)$.
\ECM
\BPF
First, $\fail(\alpha)$ is the same for every $\alpha$ because $D'$
only uses the value $\tilde c$ after all checks enabling {\sf
fail} have passed. Since $\tilde c$ is the only value that is
dependent on $\alpha$, we have that $D'$'s {\sf fail} probability
is independent of $\alpha$.
Next, we show that for any $\alpha\in\bitset^n$, $\fail(\alpha) <
1- 1/p(n)$. Now, machine $D'$ outputs {\sf fail} if the value $z$
that it received in Step~\ref{step:getr} is not equal to
$(c,r_1)$.
%, or if $r_1\neq r_1'$. This second event (of $r_1\neq r_1'$) can
%only occur with negligible probability. This is the case because
%the commitment $c$ provided by $\A$ in Step~\ref{step:commit} is
%perfectly binding. Therefore, the value $c$ can be a commitment to
%at most one of $r_1$ or $r_1'$. In other words, at least one of
%the statements $c={\sf Commit}(r_1)$ or $c={\sf Commit}(r_1')$ is
%false. Since $\A$ is non-abusing, the probability that it will
%send both $z=(c,r_1)$ and $z'=(c,r_1')$ is negligible.
%
%Next, we show that the probability that $D'$ outputs {\sf fail}
%due to receiving $z\neq(c,r_1)$ in Step~\ref{step:getr} is at most $1 - 1/p(n)$.
In order to bound this probability, notices that $\A$'s view in
the first simulation of the coin-tossing with $D'$ is {\em
identical}\/ to its view in a real execution. Therefore, $\A$'s
non-abort probability until this point in the simulation is the
same as in a real execution. By Fact~\ref{fact:abort}, $\A$'s
non-abort probability $\e_n^{\A}$ is greater than or equal to
$\adv_n^D$. Therefore, for any $n$ where $\adv_n^D \geq 1/p(n)$,
$\A$'s non-abort probability in a real execution is at least
$1/p(n)$. By the above, we have that for these $n$'s, $\A$'s
non-abort probability in the simulated execution with $D'$ is also
at least $1/p(n)$. $D'$ outputs {\sf fail} if $\A$ aborts, and so
the probability that $D'$ outputs {\sf fail} is at most
$1-1/p(n)$.
\EPF
\ni We now show that, conditioned on the fact that $D'$ does not
output fail, $D'$ distinguishes commitments to~$w$ from
commitments to~$0^n$ with non-negligible probability:
\BCM \label{clm:dist} If $D'$ does not output {\sf fail}, then
$D'$ distinguishes $C_n(w)$ from $C_n(0^n)$ with advantage that is
negligibly close to $\adv_n^D$. That is, for every $n$:
$$
\left| \prob[ D'(C_n(w)) = 1\ |\ D'(C_n(w))\neq {\sf fail } ] -
\prob[ D'(C_n(0^n)) = 1\ |\ D'(C_n(0^n))\neq {\sf fail } ]\right|
> \adv_n^D - \mu(n)
$$
\ECM
\BPF
In this claim, we consider the case in which $\A$ behaves in such
a way that $D'$ never outputs {\sf fail}. There are two possible
cases. In the first case, $D'$ outputs {\sf bad}. Then, $D'$'s
advantage of distinguishing $C_n(w)$ from $C_n(0^n)$ may be~0.
However, this case can occur with at most negligible probability.
In the second case, $D'$ does not output {\sf bad}. Therefore,
when $r_1$ and $r_2$ appear in $\A$'s output view (as is the case
when $\A$ does not abort), it holds that $r_1 \oplus r_2 =
f^n(s)$, where $(f,f^n(s))$ is part of $D'$'s input commitment
$C_n(\alpha)$. Furthermore, when $\alpha=w$, the distribution over
$\A$'s final view in the simulation is identical to its view
$V_n^{\A}(v,w)$ in a real execution with the honest prover. On the
other hand, when $\alpha=0^n$, the distribution over $\A$'s final
view in the simulation with $D'$ is identical to its view
$V_n^{\A}(v,0^n)$ in a simulated execution with $\A'$. We stress
that the two simulations by $D'$ of the coin-tossing are
completely independent. Therefore, the conditioning over $D'$ not
outputting {\sf fail} has no effect over the view of $\A$.
We therefore have that for both cases of $\alpha=w$ and
$\alpha=0^n$,
$$
\left|\prob[D'(C_n(\alpha))=1\ |\ D'(C_n(\alpha))\neq {\sf fail }]
-\prob[D(V_n^{\A}(v,\alpha))=1]\right| < \mu(n)
$$
where the negligible difference of $\mu(n)$ is due to the case
that $D'$ may output {\sf bad}. Therefore,
$$
\left|\prob[D'(C_n(w))=1\ |\ D'(C_n(w))\neq {\sf fail}] -
\prob[D'(C_n(0^n))=1\ |\ D'(C_n(0^n))\neq {\sf fail}]\right| \geq
\adv_n^D - 2\mu(n)
$$
This completes the proof of the claim.
\EPF
\ni Combining Claims~\ref{clm:bound-fail} and~\ref{clm:dist}, we
have that for any $n$ for which $\adv_n^D \geq 1/p(n)$,
$$
\left| \prob[ D'(C_n(w)) = 1] - \prob[ D'(C_n(0^n)) = 1]\right| >
\frac{\adv_n^D-\mu(n)}{p(n)}
$$
By the assumption in Eq.~(\ref{eq:dist-C}), there are infinitely
many $n$'s for which $\adv_n^D \geq 1/p(n)$. Therefore, we
conclude that there are infinitely many $n$'s for which
$$
\left| \prob[ D'(C_n(w)) = 1] - \prob[ D'(C_n(0^n)) = 1]\right| >
\frac{1}{p(n)^2} -\mu(n)
$$
in contradiction to the hiding property of the commitment scheme
$C_n$. \eqref{eq:views} therefore follows, and we obtain that the
simulation by $\A'$ is indistinguishable. That is, the view of
$\A$ in a real execution of $\Pi$ is indistinguishable from its
view in $\Pi'$ with the simulation by $\A'$. This suffices because
the view of $P_2$ when it plays the prover always contains the
statement $v$ only. (Recall that honest parties do not write the
contents of Protocol~\ref{prot:fullzkpok} onto their $\Pi'$
transcript.) Therefore, the joint distribution over the view of
$\A$ and output of $P_2$ in $\Pi$ is indistinguishable from in
$\Pi'$.
\paragraph{Conclusion.}
As we have mentioned, all of the $\Pi'$-messages are forwarded
unmodified by $\A'$ between $\A$ and $P_2$. In addition, the
executions of Protocol~\ref{prot:fullzkpok} are simulated so that
the joint distribution of $\A$'s view and $P_2$'s output in all
executions of Protocol~\ref{prot:fullzkpok} in $\Pi$ is
indistinguishable from in $\Pi'$. This holds both when $\A$ plays
the prover and the verifier. Finally, $\A'$ outputs whatever $\A$
does. Combining all this together, we obtain that the joint
distributions over $\A$ and $P_2$'s output in $\Pi$ is
indistinguishable from the joint distribution over $\A'$ and
$P_2$'s output in $\Pi'$. This completes the proof.
\EPF
\paragraph{Obtaining Equation (\ref{eq:zkhybrid-weak}).}
In order to formally prove \eqref{eq:zkhybrid-weak}, take the
protocol $\Pi$ in Lemma~\ref{lem:fullzkpok} to be Protocol~$\tpi$
from \eqref{eq:zkhybrid-weak}. Note that $\tpi$ is exactly of the
form $\Pi$ that is described in Lemma~\ref{lem:fullzkpok}. Next,
notice that adversary $\ta$ from \eqref{eq:zkhybrid-weak} is {\em
non-abusing}. This follows from the construction of $\ta$, and is
discussed in Footnote~\ref{foot:nonabusing}. We can therefore
apply Lemma~\ref{lem:fullzkpok} and so obtain
\eqref{eq:zkhybrid-weak}.
\section*{Acknowledgements}
We would like to thank Ran Canetti, Jonathan Katz, Rafael Pass, Tal
Rabin, and Alon Rosen for helpful discussions and comments.
\begin{thebibliography}{ABCD}
\bibitem{B} B.~Barak.
\newblock How to Go Beyond the Black-Box Simulation Barrier.
\newblock In {\em $42$nd FOCS}, pages 106--115, 2001.
\bibitem{B02} B.~Barak.
\newblock Constant-Round Coin-Tossing With a Man in the Middle or
Realizing the Shared Random String Model.
\newblock In {\em $43$rd FOCS}, pages 345--355, 2002.
\bibitem{BG} B.~Barak and O.~Goldreich.
\newblock Universal Arguments and their Applications.
\newblock {\em $17$th IEEE Conference on Computational Complexity},
pages 194--203, 2002.
\bibitem{BL} B.~Barak and Y.~Lindell.
Strict Polynomial-Time in Simulation and Extraction. {\em SIAM
Journal on Computing,} 33(4):783--818, 2004.
\bibitem{Be1} D.~Beaver.
\newblock Foundations of Secure Interactive Computing.
\newblock In {\em CRYPTO'91}, Springer-Verlag (LNCS 576), pages 377--391, 1991.
\bibitem{Blum} M.~Blum.
\newblock Coin Flipping by Phone.
\newblock {\em IEEE Spring COMPCOM}, pages 133--137, 1982.
\bibitem{C97} R.~Canetti.
\newblock Security and Composition of Multiparty Cryptographic Protocols.
\newblock {\em Theory of Cryptography Library}, Record 98-18,
version of June 4th, 1998 (later versions do not contain the
referenced material).
\bibitem{C00} R.~Canetti.
\newblock Security and Composition of Multiparty Cryptographic Protocols.
\newblock {\em Journal of Cryptology}, 13(1):143--202, 2000.
\bibitem{C01} R.~Canetti.
\newblock Universally Composable Security: A New Paradigm for Cryptographic
Protocols.
\newblock In {\em $42$nd FOCS}, pages 136--145, 2001.
\newblock Full version available at the Cryptology ePrint Archive
{\tt http://eprint.iacr.org/}, Report \#067, 2000.
\bibitem{C04} R.~Canetti.
\newblock On Universally Composable Notions of Security for Signature, Certification and
Authentication.
\newblock Available at the Cryptology ePrint Archive
{\tt http://eprint.iacr.org/}, Report \#2003/239, 2003.
\bibitem{CF} R.~Canetti and M.~Fischlin.
\newblock Universally Composable Commitments.
\newblock In {\em CRYPTO'01}, Springer-Verlag (LNCS 2139), pages 19--40, 2001.
\bibitem{CKPR}
R.~Canetti, J.~Kilian, E.~Petrank, and A.~Rosen.
\newblock {Black-Box Concurrent Zero-Knowledge Requires $\tilde\Omega(\log n)$ Rounds}.
\newblock In {\em $33$rd STOC}, pages 570--579. 2001.
\bibitem{CKL} R.~Canetti, E.~Kushilevitz and Y.~Lindell.
\newblock On the Limitations of Universally Composable Two-Party
Computation Without Set-Up Assumptions.
\newblock In {\em Eurocrypt 2003,} Springer-Verlag (LNCS 2656),
pages 68--86, 2003.
\bibitem{CLOS}
R.~Canetti, Y.~Lindell, R.~Ostrovsky and A.~Sahai.
\newblock Universally Composable Two-Party and Multi-Party Computation.
\newblock In {\em $34$th STOC}, pages 494--503, 2002.
\newblock Preliminary full version available from Cryptology
ePrint Archive, {\tt http://eprint.iacr.org}, Report \#14, 2002.
\bibitem{DDN}
D.~Dolev, C.~Dwork and M.~Naor.
\newblock Non-malleable cryptography.
\newblock {\em SIAM Journal on Computing,} 30(2):391--437, 2000.
\bibitem{DNS} C.~Dwork, M.~Naor, and A.~Sahai.
\newblock Concurrent Zero-Knowledge.
\newblock In {\em $30$th STOC}, pages 409--418, 1998.
\bibitem{FLS} U.~Feige, D.~Lapidot and A.~Shamir.
\newblock Multiple Non-Interactive Zero-Knowledge Proofs Under General
Assumptions.
\newblock {\em SIAM Journal on Computing}, 29(1):1--28, 1999.
\bibitem{FS90} U.~Feige and A.~Shamir.
\newblock Witness Indistinguishability and Witness Hiding Protocols.
\newblock In {\em 22nd STOC}, pages 416--426, 1990.
\bibitem{GM}
J.~Garay and P.~Mackenzie.
\newblock Concurrent Oblivious Transfer.
\newblock In {\em $41$st FOCS}, pages 314--324, 2000.
\bibitem{oded-book}
O.~Goldreich.
\newblock {\em Foundations of Cryptography: Volume~1 -- Basic Tools.}
\newblock Cambridge University Press, 2001.
\bibitem{GoldreichBook2} O.~Goldreich.
\newblock {\em Foundations of Cryptography: Volume 2 -- Basic Applications}.
\newblock Cambridge University Press, 2004.
\bibitem{GoKa} O.~Goldreich and A.~Kahan.
\newblock How To Construct Constant-Round Zero-Knowledge
Proof Systems for NP.
\newblock {\em Journal of Cryptology}, 9(3):167--190, 1996.
\bibitem{GL} O.~Goldreich and Y.~Lindell.
\newblock Session-Key Generation Using Human Passwords Only.
\newblock In {\em CRYPTO 2001}, Springer-Verlag (LNCS 2139), pages
408--432, 2001.
\bibitem{GMW2} O.~Goldreich, S.~Micali and A.~Wigderson.
\newblock How to Play any Mental Game --
A Completeness Theorem for Protocols with Honest Majority.
\newblock In {\em 19th STOC,} pages 218--229, 1987.
\newblock For details see~\cite{GoldreichBook2}.
\bibitem{GoLe}
S.~Goldwasser and L.~Levin.
\newblock Fair Computation of General Functions
in Presence of Immoral Majority.
\newblock In {\em CRYPTO'90,} Springer-Verlag (LNCS 537), pages 77--93, 1990.
\bibitem{KP} J.~Kilian and E.~Petrank.
\newblock Concurrent and Resettable Zero-Knowledge in
Poly-logarithmic Rounds.
\newblock In {\em $33$rd STOC,} pages 560--569, 2001.
\bibitem{KPR} J.~Kilian, E.~Petrank and C.~Rackoff.
\newblock Lower Bounds for Zero Knowledge on the Internet.
\newblock In {\em $39$th FOCS,} pages 484--492, 1998.
\bibitem{Lin}
Y.~Lindell.
\newblock Parallel Coin-Tossing and Constant-Round Secure
Two-Party Computation.
\newblock {\em Journal of Cryptology}, 16(3):143--184, 2003.
\bibitem{L03b} Y.~Lindell.
\newblock Bounded-Concurrent Secure Two-Party Computation Without Setup
Assumptions.
\newblock In {\em $35$th STOC}, pages 683--692, 2003.
\bibitem{L03c} Y.~Lindell.
\newblock General Composition and Universal Composability in
Secure Multi-Party Computation.
\newblock In {\em $44$th FOCS}, pages 394--403, 2003.
\bibitem{L03d} Y.~Lindell.
\newblock {\em Composition of Secure Multi-Party Protocols -- A
Comprehensive Study.}
\newblock Lecture Notes in Computer Science Vol.~2815,
Springer-Verlag, 2003.
\bibitem{L04a} Y.~Lindell.
\newblock Lower Bounds for Concurrent Self Composition.
\newblock In the {\em 1st Theory of Cryptography Conference}
(TCC), Springer-Verlag (LNCS 2951), pages 203--222, 2004.
\bibitem{L06} Y. Lindell.
\newblock Lower Bounds and Impossibility Results for Concurrent Self
Composition.
\newblock To appear in the {\em Journal of Cryptology.}
\bibitem{LLR}
Y.~Lindell, A.~Lysysanskaya and T.~Rabin.
\newblock On the Composition of Authenticated Byzantine Agreement.
\newblock In {\em $34$th STOC}, pages 514--523, 2002.
\bibitem{MR}
S.~Micali and P.~Rogaway.
\newblock Secure Computation.
\newblock Unpublished manuscript, 1992.
\newblock Preliminary version in {\em CRYPTO'91},
Springer-Verlag (LNCS 576), pages 392--404, 1991.
\bibitem{P04} R.~Pass.
\newblock Bounded-Concurrent Secure Multi-Party Computation with a
Dishonest Majority.
\newblock To appear in the {\em $36$th STOC,} 2004.
\bibitem{PR} R.~Pass and A.~Rosen
\newblock Bounded-Concurrent Secure Two-Party Computation in a
Constant Number of Rounds.
\newblock In {\em $44$th FOCS}, pages 404--413, 2003.
\bibitem{PW00}
B.~Pfitzmann and M.~Waidner.
\newblock Composition and Integrity Preservation of Secure Reactive
Systems.
\newblock In {\em 7th ACM Conference on Computer and Communication
Security}, pages 245--254, 2000.
\bibitem{PRS} M.~Prabhakaran, A.~Rosen and A.~Sahai.
\newblock Concurrent Zero Knowledge With Logarithmic Round Complexity.
\newblock In {\em $43$rd FOCS,} pages 366--375, 2002.
\bibitem{RK} R. Richardson and J. Kilian.
\newblock On the Concurrent Composition of Zero-Knowledge Proofs.
\newblock In {\em Eurocrypt'99}, Springer-Verlag (LNCS 1592),
pages 415--413, 1999.
\bibitem{R00} A.~Rosen.
\newblock A Note on the Round-Complexity of Concurrent
Zero-Knowledge.
\newblock In {\em CRYPTO 2000,} Springer-Verlag (LNCS 1880), pages
451--468, 2000.
\bibitem{Y} A.~Yao.
\newblock How to Generate and Exchange Secrets.
\newblock In {\em $27$th FOCS}, pages 162--167, 1986.
\end{thebibliography}
\end{document}