% The Chicago Journal of Theoretical Computer Science, Volume 1997, Article 2
% Published by MIT Press, Cambridge, Massachusetts USA
% Copyright 1997 by Massachusetts Institute of Technology
%
\newtoks\rstyle
\rstyle={cjlook}
%
\documentclass[v1.1,published]{cjstruct}
%
% Local style resources for this article.
%
\usestyleresource{amstex}
%
% Local macro definitions for this article.
%
\catcode`\@=11
\ifx\nopagebreakbeginpar\undeftex
\newcommand{\nopagebreakbeginpar}{\@beginparpenalty10000}
\fi
\catcode`\@=12
%
\newcommand{\prooffontshape}{\fontshape{n}\selectfont}
\mathstyleclass{\complexityclass}{\mathord}{\textup}
\mathstyleclass{\problem}{\mathord}{\textsc}
\newcommand{\classofgraph}[1]{{\mathcal #1}}
\newcommand{\setsize}[1]{\mathopen{|}#1\mathclose{|}}
\newcommand{\problemlabel}[1]{\textmd{\textsc{#1}}}
\newcommand{\funcspace}[2]{#1\mathbin{\rightarrow}#2}
\newcommand{\oftype}{\colon}
\newcommand{\orderle}[1]{O(#1)}
\newcommand{\ordereq}[1]{\Theta(#1)}
\newcommand{\orderge}[1]{\Omega(#1)}
\newcommand{\optval}[1]{%
\mathchoice{{\mbox{\textsl{opt}}_{#1}}}%
{{\mbox{\textsl{opt}}_{#1}}}%
{{\mbox{\scriptsize\textsl{opt}}_{#1}}}%
{{\mbox{\tiny\textsl{opt}}_{#1}}}%
}
\newcommand{\absvalue}[1]{\mathopen{|}#1\mathclose{|}}
\newcommand{\strlength}[1]{\mathopen{|}#1\mathclose{|}}
\newcommand{\monochredges}{%
\mathchoice{{\mbox{\textsc{Mono}}}}%
{{\mbox{\textsc{Mono}}}}%
{{\mbox{\scriptsize\textsc{Mono}}}}%
{{\mbox{\tiny\textsc{Mono}}}}%
}
\newcommand{\crossedges}{%
\mathchoice{{\mbox{\textsc{Cross}}}}%
{{\mbox{\textsc{Cross}}}}%
{{\mbox{\scriptsize\textsc{Cross}}}}%
{{\mbox{\tiny\textsc{Cross}}}}%
}
\newcommand{\probability}[1]{\mathord{\text{Pr}}\left[#1\right]}
\newcommand{\floor}[1]{\left\lfloor#1\right\rfloor}
\newcommand{\mult}{\cdot}
\newcommand{\setintsct}{\cap}
%
\hyphenation{theo-ry}
%
% Macros for the publisher's comment
%
% The circle R symbol for a registered trade name is adapted from the
% circled c copyright symbol defined in The TeXBook by Donald Knuth.
%
\def\registered{\({}^{\ooalign%
{\hfil\raise.07ex\hbox{{\tiny\textup{R}}}%
\hfil\crcr\scriptsize\mathhexbox20D\cr}}\)}
%
% end of local macro definitions for this article
%
\begin{document}
%
\begin{articleinfo}
%
\publisher{The MIT Press}
%
\publishercomment{%
{\Large
\begin{center}
Volume 1997, Article 2\\
\textit{3 June 1997}
\end{center}
}
\par\noindent
ISSN 1073--0486\@. MIT Press Journals, Five Cambridge Center, Cambridge, MA
02142-1493 USA; (617)253-2889;
\emph{journals-orders@mit.edu, journals-info@mit.edu}\@.
Published one article at a time in \LaTeX\ source form on the
Internet\@. Pagination varies from copy to copy\@. For more
information and other articles see:
\begin{itemize}
\item \emph{http://www-mitpress.mit.edu/jrnls-catalog/chicago.html}
\item \emph{http://www.cs.uchicago.edu/publications/cjtcs/}
\item \emph{ftp://mitpress.mit.edu/pub/CJTCS}
\item \emph{ftp://cs.uchicago.edu/pub/publications/cjtcs}
\end{itemize}
\sentence
\par\noindent
The \emph{Chicago Journal of Theoretical Computer Science} is
abstracted or indexed in \emph{Research Alert,\registered
SciSearch,\registered Current Contents\registered/Engineering
Computing \& Technology}, and \emph{CompuMath Citation Index\@.\registered}
}
%
\copyrightstmt{%
\copyright 1997 The Massachusetts Institute of Technology\@.
Subscribers are licensed to use journal articles in a variety of
ways, limited only as required to insure fair attribution to authors
and the journal, and to prohibit use in a competing commercial
product\@. See the journal's World Wide Web site for further
details\@. Address inquiries to the Subsidiary Rights Manager, MIT
Press Journals; (617)253-2864;
\emph{journals-rights@mit.edu}\@.\pagebreak[2]
}
%
\journal{Chicago\nolinebreak[1] Journal of Theoretical Computer\nolinebreak[1] Science}
\journalcomment{%
The \emph{Chicago Journal of Theoretical Computer Science}
is a peer-reviewed scholarly journal in theoretical computer
science\@. The journal is committed to providing a forum for
significant results on theoretical aspects of all topics in computer
science\@.
\par
\begin{trivlist}
\item[\emph{Editor in chief:}] Janos Simon
\item[\emph{Consulting editors:}]
Joseph Halpern, Stuart A.\ Kurtz, Raimund Seidel
\item[\emph{Editors:}]
\begin{tabular}[t]{lll}
Martin Abadi & Greg Frederickson & John Mitchell \\
Pankaj Agarwal & Andrew Goldberg & Ketan Mulmuley \\
Eric Allender & Georg Gottlob & Gil Neiger \\
Tetsuo Asano & Vassos Hadzilacos & David Peleg \\
Laszl\'o Babai & Juris Hartmanis & Andrew Pitts \\
Eric Bach & Maurice Herlihy & James Royer \\
Stephen Brookes & Ted Herman & Alan Selman \\
Jin-Yi Cai & Stephen Homer & Nir Shavit \\
Anne Condon & Neil Immerman & Eva Tardos \\
Cynthia Dwork & Howard Karloff & Sam Toueg \\
David Eppstein & Philip Klein & Moshe Vardi \\
Ronald Fagin & Phokion Kolaitis & Jennifer Welch \\
Lance Fortnow & Stephen Mahaney & Pierre Wolper \\
Steven Fortune & Michael Merritt
\end{tabular}
\vspace{1ex}
\item[\emph{Managing editor:}] Michael J.\ O'Donnell
\vspace{1ex}
\item[\emph{Electronic mail:}] \emph{chicago-journal@cs.uchicago.edu}
\end{trivlist}
\sentence
\pagebreak[0]
}
%
\bannerfile{cjtcs-banner.tex}
%
\volume{1997}
%
\articleid{2}
%
\selfcitation{
@article{cj97-02,
author={Viggo Kann and Sanjeev Khanna and Jens Lagergren and
Alessandro Panconesi},
title={On the Hardness of Approximating \protect\(\problem{Max
\protect\(k\protect\)-Cut}\protect\) and Its Dual},
journal={Chicago Journal of Theoretical Computer Science},
volume={1997},
number={2},
publisher={MIT Press},
month={June},
year={1997}
}
}
%
\begin{retrievalinfo}
\end{retrievalinfo}
%
\title{On the Hardness of Approximating
\protect\(\problem{Max \protect\(k\protect\)-Cut}\protect\) and Its Dual}
\shorttitle{Hardness of Approximating \protect\(\problem{Max
\protect\(k\protect\)-Cut}\protect\)}
%
\begin{authorinfo}
%
% Correction to error in cjstruct.sty version 1.1.
%
\def\fixversion{1.1}
\ifx\cjversion\fixversion
\renewcommand{\printname}[1]{
\global\authorstring={#1}
\global\authorstrue
\ifinfo\item Printed name: #1\fi
}
\fi
%
% End of correction.
%
\printname{Viggo Kann}
\collname{}{Kann, Viggo}
\begin{institutioninfo}
\instname{Royal Institute of Technology}
\department{Department of Numerical Analysis and Computing Science}
\address{}{Stockholm}{}{Sweden}{SE-100 44}
\end{institutioninfo}
\email{viggo@nada.kth.se}
\end{authorinfo}
%
\begin{authorinfo}
\printname{Sanjeev Khanna}
\collname{}{Khanna, Sanjeev}
\begin{institutioninfo}
\instname{Bell Laboratories}
\department{Fundamental Mathematics Research Department}
\address{700 Mountain Avenue}{Murray Hill}{NJ}{USA}{07974}
\end{institutioninfo}
\email{sanjeev@research.bell-labs.com}
\end{authorinfo}
%
\begin{authorinfo}
\printname{Jens Lagergren}
\collname{}{Lagergren, Jens}
\begin{institutioninfo}
\instname{Royal Institute of Technology}
\department{Department of Numerical Analysis and Computing Science}
\address{}{Stockholm}{}{Sweden}{SE-100 44}
\end{institutioninfo}
\email{jensl@nada.kth.se}
\end{authorinfo}
%
\begin{authorinfo}
\printname{Alessandro Panconesi}
\collname{}{Panconesi, Alessandro}
\begin{institutioninfo}
\instname{The Swedish Institute of Computer Science}
\address{Box 1263}{Kista}{}{Sweden}{S-174 28}
\end{institutioninfo}
\email{ale@nada.kth.se}
\end{authorinfo}
%
\shortauthors{Kann et al.}
%
\support{Viggo Kann and Jens Lagergren were supported by grants from
the Swedish Natural Science Research Council (NFR) and the Swedish
Research Council for Engineering Sciences (TFR)\@. Alessandro
Panconesi was supported by a postdoctoral fellowship from the
European Research Consortium in Informatics and Mathematics (ERCIM)
and by an Alexander von Humboldt research fellowship and by the
Swedish Institute of Computer Science\@. He enjoyed the hospitality
of the Swedish Institute of Computer Science and the Royal Institute
of Technology (KTH) in Stockholm while performing the research
reported in this article\@.}
%
\begin{editinfo}
\submitted{27}{2}{1996}\reported{27}{11}{1996}
\submitted{14}{1}{1997}\reported{16}{4}{1997}\published{3}{6}{1997}
\end{editinfo}
%
\end{articleinfo}
%
\begin{articletext}
%
\begin{abstract}
%
\begin{sloppypar}
\apar{Abstract-1}
We study the \(\problem{Max \(k\)-Cut}\) problem and its dual, the
\(\problem{Min \(k\)-Partition}\) problem\@. In the
\(\problem{Min \(k\)-Partition}\) problem, given a graph \(G=(V,E)\)
and positive edge weights, we want to find an edge set of minimum
weight whose removal makes \(G\) \(k\)-colorable\@. For the
\(\problem{Max \(k\)-Cut}\) problem we show that, if
\(\complexityclass{P}\neq\complexityclass{NP}\), no polynomial time
approximation algorithm can achieve a relative error better than
\(1/(34k)\)\@. It is well known that a relative error of \(1/k\) is
obtained by a naive randomized heuristic\@.
\end{sloppypar}
%
\apar{Abstract-2}
For the \(\problem{Min \(k\)-Partition}\) problem, we show that for
\(k>2\) and for every \(\epsilon>0\), there exists a constant
\(\alpha\) such that the problem cannot be approximated within
\(\alpha\setsize{V}^{2-\epsilon}\), even for dense graphs\@. Both
problems are directly related to the frequency allocation problem for
cellular (mobile) telephones, an application of industrial relevance\@.
%
\end{abstract}
\asection{1}{Introduction}
%
\apar{1-1}
To motivate the problems studied in this paper, consider the frequency
allocation problem for cellular telephones\@. We are given a
\emph{fixed} set of \(k\) frequencies, and a set of radio
transmitters\@. Moreover, for each pair of transmitters, we are given a
number that represents the percentage of local performance
deterioration incurred if the pair receives the same frequency\@. The
problem is to allocate frequencies to radio transmitters so that the
global performance deterioration is minimized\@. It is indeed natural
to model this problem by a coloring problem for edge-weighted graphs\@.
Vertices of the graph correspond to transmitters, and colors
correspond to frequencies\@. Edges indicate nonzero local performance
deterioration, and their weights indicate the percentages of local
performance deterioration\@. Formally, the graph problem can be stated
as follows:
%
\paragraph{\protect\(\problem{Min
\mathversion{bold}\protect\(k\protect\)-Partition}\protect\)}
%
\begin{description}
%
\item[\problemlabel{Input:}] A graph \(G=(V,E)\), with weighted edges\@.
%
\item[\problemlabel{Goal:}] Find a color assignment
\(c\oftype\funcspace{V}{[k]}\) that minimizes the total weight of
monochromatic edges (an edge is monochromatic if its endpoints have
the same color)\@.
%
\end{description}
%
In other words, we seek an edge set of minimum weight whose removal
makes the graph \(k\)-colorable\@. This problem is relevant in
industrial applications\@.\footnote{Many problems that appear in
applications have the problem formulated here as a special case\@.
However, in this paper we are interested in lower bounds, and
therefore our results apply directly to these problems\@.}
%
\apar{1-2}
For \(k=2\), Garg, Vazirani, and Yannakakis~\cite{cj97-02-08} use
multicommodity flow techniques to give a polynomial time
\(\orderle{\log n}\)-approximation algorithm, where \(n\) denotes the
number of vertices in the input graph \(G\)\@. The problem is
\(\complexityclass{MAX SNP}\)-hard; the best lower bound known for the
approximation of \(\problem{Max Cut}\) (i.e., \(\problem{Max
\(k\)-Cut}\) with \(k=2\)) translates into a \(1.058\) lower bound
for \(\problem{Min \(2\)-Partition}\)\@. Shrinking this gap between
upper and lower approximation bounds appears to be a challenging open
problem\@.
%
\apar{1-3}
When \(k\ge 3\), a result of Petrank implies that it is NP-hard to
approximate \(\problem{Min \(k\)-Partition}\) within
\(O(n)\)~\cite{cj97-02-13}\@. This result, however, holds only for
``sparse'' graphs, that is, graphs with \(\orderle{n}\) edges\@.
Petrank left the approximability of \(\problem{Min \(k\)-Partition}\)
for nonsparse graphs as an open question\@. Edwards~\cite{cj97-02-06}
and Arora, Karger, and Karpinski~\cite{cj97-02-01} have shown that
\(3\)-colorability is polynomial time solvable on graphs where each
vertex has degree at least \(\epsilon n\) for some constant
\(\epsilon>0\)\@.
%
\apar{1-4}
In this paper, we continue this line of research by showing that, for
\(k\geq 3\):
%
\begin{itemize}
%
\item For every fixed \(\epsilon>0\) and every \(\delta<(1-1/k)/2\),
it is \(\complexityclass{NP}\)-hard to approximate \(\problem{Min
\(k\)-Partition}\) within \(O(n^{2-\epsilon})\), even when the
problem is restricted to dense graphs with
\(\setsize{E}=\delta n^{2}\)\@. If \(\delta>(1-1/k)/2\), the graph is
certainly not \(k\)-colorable by Tur\'an's theorem~\cite{cj97-02-15}
(see a standard graph theory text such as \cite{cj97-02-16}, for
instance), and the whole graph is trivially a constant factor
approximation\@.
%
\item It is \(\complexityclass{NP}\)-hard to approximate
\(\problem{Min \(k\)-Partition}\) within \(\orderle{\setsize{E}}\),
even when restricting to graphs with
\(\setsize{E}=\orderge{n^{2-\epsilon}}\)\@.
%
\item \(\problem{Min \(3\)-Partition}\) can be approximated within
\(\epsilon n^{2}\), for every \(\epsilon>0\)\@. This completes the
picture as far as the densities are concerned, and answers
completely the open question of Petrank\@.
%
\end{itemize}
%
\apar{1-5}
In view of the above two negative results, it seems natural to
consider the dual problem:
%
\paragraph{\protect\(\problem{Max
\mathversion{bold}\protect\(k\protect\)-Cut}\protect\)}
%
\begin{description}
%
\item[\problemlabel{Input:}] A graph \(G=(V,E)\) with weighted edges\@.
%
\item[\problemlabel{Goal:}] Find a color assignment
\(c\oftype\funcspace{V}{[k]}\) that maximizes the total weight of
properly colored edges (an edge is properly colored if its endpoints
have different colors)\@.
%
\end{description}
%
There are many interesting theoretical results on this problem\@. In
particular, the \(\problem{Max Cut}\) problem has received much
attention (see \cite{cj97-02-09,cj97-02-14}, for example)\@. In a
seminal paper, Papadimitriou and Yannakakis~\cite{cj97-02-12} showed
that the problem is \(\complexityclass{MAX SNP}\)-complete\@. Recent
results in the theory of probabilistic checking of proofs provide
powerful new tools for proving hardness of approximation results,
under suitable complexity theoretic assumptions (see \cite{cj97-02-02}
for an overview)\@. The best-known lower bound on the relative error
achievable for \(\problem{Max Cut}\) is \(0.0588\), under the
assumption
\(\complexityclass{P}\neq\complexityclass{NP}\)~\cite{cj97-02-10}\@.
For \(\problem{Max \(k\)-Cut}\) it is well known that a naive
randomized heuristic achieves a relative error of \(1/k\): Each vertex
is assigned one of the \(k\) colors uniformly at random\@. This simple
procedure can be derandomized by a straightforward application of the
\emph{method of conditional probabilities} (see \cite{cj97-02-11}, for
example)\@.
%
\apar{1-6}
For the case \(k=2\), the above heuristic algorithm has a relative
error bound of \(0.5\), a bound that held for about two decades until
the recent pioneering work of Goemans and Williamson
\cite{cj97-02-09}\@. Their techniques---a beautiful blend of
mathematical programming and probabilistic methods---yield a relative
error bound of \(0.122\)~\cite{cj97-02-09}\@. More recently, Frieze and
Jerrum~\cite{cj97-02-07} have tried to generalize the Goemans and
Williamson technique to the \(\problem{Max \(k\)-Cut}\) problem\@.
Their results are quite interesting from a technical point of view but
yield only a marginal improvement---from \(1/k\) to (roughly)
\(1/k-2k^{-2}\ln k\)\@. It is natural to ask whether better
approximations are possible, say, a relative error of
\(1/k^{1+\epsilon}\), for some \(\epsilon>0\)\@. In this paper, we give
(under the assumption \(\complexityclass{P}\neq\complexityclass{NP}\))
a negative answer to this question\@. We show that if
\(\complexityclass{P}\neq\complexityclass{NP}\), for each \(k\ge 2\)
the true relative error bound for approximating
\(\problem{Max \(k\)-Cut}\) lies between \(1/k\) and \(1/\sigma k\),
where currently \(\sigma=34\) (the value of \(\sigma\) depends on the
best lower bound available on the approximation of
\(\problem{Max Cut}\))\@. Hence, unless
\(\complexityclass{P}=\complexityclass{NP}\), we cannot hope for
relative error bounds like \(1/k^{1+\epsilon}\) or even, say,
\(1/(k\log\log\dots\log k)\)\@. On the other hand, the naive randomized
heuristics achieve a relative error that is very close to the best
possible (assuming \(\complexityclass{P}\neq\complexityclass{NP}\))\@.
%
\apar{1-7}
A few words on the techniques used in this paper\@. The
\(\problem{Max \(k\)-Cut}\) result is obtained by making use of an
approximation preserving reduction from \(\problem{Max Cut}\)\@. The
difficulty in the proof is to exhibit a reduction that increases the
relative error only linearly as a function of \(k\)\@. The proof uses
\emph{the probabilistic method} and a construction involving the
Hamming distance between characteristic vectors\@. Our reduction to
\(\problem{Max \(k\)-Cut}\) increases the relative error by
approximately a factor of \(2k\)\@. In contrast, the original reduction
in \cite{cj97-02-12} increases the relative error by a factor greater
than \(1000k^{2}\)\@. The \(\problem{Min \(k\)-Partition}\) lower
bound of \(\Omega(n^{2-\epsilon})\) is obtained by amplifying the
NP-hardness of \(k\)-coloring into a zero versus
\(\Omega(n^{2-\epsilon})\) separation\@. The
\(\epsilon n^{2}\)-approximation algorithm is based on the fact that
3-colorability can be solved in polynomial time for dense
instances~\cite{cj97-02-06,cj97-02-01}\@.
%
\apar{1-8}
We emphasize that our hardness results do not depend on the existence of
exponentially large weights in the input instances; in fact, they
also hold for the unweighted case (see also~\cite{cj97-02-05})\@.
%
\apar{1-9}
This paper is organized as follows\@. Section~2 defines the
notion of approximation preserving reductions used in this paper\@. In
Section~3, we present the hardness result for
\(\problem{Max \(k\)-Cut}\) and a precise lower bound computation
using the currently best-known lower bound for \(\problem{Max Cut}\)\@.
Finally, in Section~4, we present our upper and lower
bounds for approximating \(\problem{Min \(k\)-Partition}\)\@.
%
\asection{2}{Definitions}
%
\apar{2-1}
We will use the standard approximation terminology\@. See for
example~\cite{cj97-02-03} for detailed definitions\@.
%
\apar{2-2}
Solving an optimization problem \(F\) given the input instance \(x\)
means finding a solution \(y\) such that the value of the objective
function \(m_{F}(x,y)\) is equal to the optimum (maximum or minimum)\@.
Let \(\optval{F}(x)\) denote this optimum value of \(m_{F}\)\@.
%
\apar{2-3}
Approximating an optimization problem \(F\) given the input \(x\)
means finding some feasible solution \(y\)\@. How good the approximation
is depends on the relation between \(m_{F}(x,y)\) and
\(\optval{F}(x)\)\@. There are two equivalent measures of this
relation: The performance ratio and the relative error\@. The
\emph{performance ratio} of a solution \(y\) to an optimization
problem \(F\) is defined as
%
\[R_{F}(x,y)=
\max\left\{\frac{\optval{F}(x)}{m_F(x,y)},\frac{m_F(x,y)}{\optval{F}(x)}
\right\}\]
\sentence
The \emph{relative error} is defined as
\[\mathcal{E}_{F}(x,y)=
\frac{\absvalue{\optval{F}(x)-m_{F}(x,y)}}{\optval{F}(x)}\]
\sentence
%
\apar{2-4}
These definitions are only meaningful when \(\optval{F}(x)\neq 0\)\@.
This is a problem for \(\problem{Min \(k\)-Partition}\) when the input
graph is \(k\)-colorable\@. To make the definition robust and to
simplify the statement of our results, we define the optimum value for
\(\problem{Min \(k\)-Partition}\) as \(\max\{1,\min_{c}m(G,c)\}\)
where \(G=(V,E)\) is the input graph, \(c\) ranges over all possible
color assignments \(c\oftype\funcspace{V}{\{1,\dots,k\}}\), and
\(m(G,c)\) measures the number of monochromatic edges under \(c\)\@.
%
\apar{2-5}
An optimization problem \(F\) can be \emph{approximated within
\(f(n)\)} for a function \(f\) if there exists a polynomial time
algorithm \(A\) such that for all input instances \(x\), \(A(x)\) is a
feasible solution and \(R_{F}(x,A(x))\le f(\strlength{x})\)\@. The
algorithm \(A(x)\) is called an \emph{\(f(n)\)-approximation
algorithm}\@.
%
\apar{2-6}
Although various reductions preserving approximability within
constants have been proposed (see \cite{cj97-02-04}), the L-reduction
defined in \cite{cj97-02-12} is perhaps the easiest to use\@. Given two
NP optimization problems \(F\) and \(G\), an \emph{L-reduction} from
\(F\) to \(G\) consists of two polynomial-time computable functions
\(f\) and \(g\) and two positive constants \(\alpha\) and \(\beta\),
such that \(f\) transforms instances of \(F\) into instances of \(G\),
and such that, for every instance \(x\) of \(F\),
%
\begin{itemize}
%
\item \(\optval{G}(f(x))\le\alpha\mult\optval{F}(x)\), and
%
\item for every feasible solution \(y\) of \(f(x)\) with objective value
\(m_G(f(x),y)=s_2\), \(y'=g(x,y)\) is a feasible solution of \(x\)
with \(m_{F}(x,y')=s_{1}\) such that
\(\absvalue{\optval{F}(x)-s_{1}}\leq\beta\absvalue{\optval{G}(f(x))-s_{2}}\)\@.
%
\end{itemize}
%
\apar{2-7}
The composition of L-reductions is also an L-reduction\@. If \(F\)
L-reduces to \(G\) with constants \(\alpha\) and \(\beta\) and there
is a polynomial time approximation algorithm for \(G\) with worst-case
relative error \(\epsilon\), then there is a polynomial time
approximation algorithm for \(F\) with worst-case relative error
\(\alpha\beta\epsilon\) \cite{cj97-02-12}\@. Conversely, if \(\delta\)
is a lower bound for the approximation of \(F\), then
\(\delta/(\alpha\beta)\) is a lower bound for \(G\)\@. An L-reduction
from \(F\) to \(G\), therefore, can be used both to show hardness of
approximability (for \(G\)) and to find a new approximation algorithm
(for \(F\))\@.
%
\apar{2-8}
Given two \(k\)-dimensional vectors \(\chi_{1}\) and \(\chi_{2}\), the
Hamming distance between them is denoted by \(h(\chi_{1},\chi_{2})\)\@.
For a graph \(G\) and a vertex \(v\) of \(G\), we denote the degree of
\(v\) in \(G\) by \(d_{G}(v)\)\@. If the edges of \(G\) have positive
weights, the degree of a vertex \(v\) refers to its \emph{weighted
degree}, given by the sum of the weights of the edges incident on
it\@. In this paper, a \emph{coloring} of a graph \(G\) is \emph{any}
assignment of colors to the vertices, and a \emph{\(k\)-coloring} is
any coloring using at most \(k\) colors\@.
%
\asection{3}{Hardness of Approximating
\protect\(\problem{Max
{\mathversion{bold}\protect\(k\protect\)}-Cut}\protect\)}
%
\apar{3-1}
In this section, we give an L-reduction from \(\problem{Max Cut}\) to
\(\problem{Max \(k\)-Cut}\)\@. The reduction maps a
\(\problem{Max Cut}\) instance \(G\) to a \(\problem{Max \(k\)-Cut}\)
instance \(H\), and is such that:
%
\begin{itemize}
%
\item \(\optval{\problem{Max \(k\)-Cut}}(H)\leq
k(k-1)\optval{\problem{Max Cut}}(G)\), i.e., \(\alpha=k(k-1)\), and
%
\item given any \(\problem{Max \(k\)-Cut}\) solution \(h\) with cost
\(s_{h}\), we can find in polynomial time a \(\problem{Max Cut}\)
solution \(g\) with cost \(s_{g}\) satisfying
\[\absvalue{\optval{\problem{Max Cut}}(G)-s_{g}}\leq
\frac{2}{k}\absvalue{\optval{\problem{Max \(k\)-Cut}}(H)-s_{h}}\]
i.e., \(\beta=2/k\)\@.
%
\end{itemize}
%
\apar{3-2}
This reduction together with a lower bound of the approximability of
\(\problem{Max Cut}\) gives the following theorem\@.
%
\begin{alabsubtext}{Theorem}{1}{}
%
There is a constant \(\sigma\) between \(1\) and \(34\) such that it
is \(\complexityclass{NP}\)-hard to approximate \(\problem{Max
\(k\)-Cut}\) with relative error less than \(\frac{1}{\sigma(k-1)}\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{1}{}
\prooffontshape
%
\apar{Prove Theorem 1-1}
A lower bound of \(\delta\) for the relative error of
\(\problem{Max Cut}\) will, using the L-reduction, give us a lower
bound of \(\frac{\delta}{\alpha\beta}=\frac{\delta}{2(k-1)}\) for
\(\problem{Max \(k\)-Cut}\)\@. In H\aa stad~\cite{cj97-02-10} it was
shown that it is \(\complexityclass{NP}\)-hard to approximate
\(\problem{Max Cut}\) within \(17/16\), which gives us the lower bound
\(1/17\) for the relative error, and the theorem follows\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Theorem 1}}
\end{alabsubtext}
%
\begin{alabsubtext}{Remark}{1}{}
%
Better lower bounds for \(\problem{Max Cut}\) will automatically give
better lower bounds for \(\problem{Max \(k\)-Cut}\)\@.
%
\end{alabsubtext}
%
\apar{3-3}
We now give the L-reduction from \(\problem{Max Cut}\) to
\(\problem{Max \(k\)-Cut}\)\@. In the proof, we assume that \(k\) is
even, and we transform an unweighted graph \(G\) into a weighted graph
\(H\)\@. These two restrictions can be assumed without loss of
generality\@. The case when \(k\) is odd follows from the following
simple reduction from \(\problem{Max \(k\)-Cut}\) to
\(\problem{Max \((k+1)\)-Cut}\): Given an instance \(H\) of the
former, produce an instance \(H'\) of the latter by adding an extra
node \(u\) and connecting it to all vertices in \(H\)\@. All edges in
\(H'\) have weight \(1\), except edges of the form \((u,x)\),
\(x\in V(H)\), which have weight \(w(u,x)=d_{H}(u)/k\)\@. This is an
L-reduction with \(\alpha=(k+1)/(k-1)\) and \(\beta=1\)\@. The proof
is rather straightforward and hence is omitted\@. Just observe that one
can assume without loss of generality that all cuts in \(H'\) use
color \(k+1\) for \(u\) and colors \(1\) through \(k\) for the
isomorphic copy of \(H\) contained in \(H'\)\@. The second restriction
(the fact that \(H\) is weighted, while \(G\) is not) could be
eliminated by further refining the reduction to yield an unweighted
instance of the \(\problem{Max \(k\)-Cut}\) problem\@. This is not
needed, however, in view of the more general result of Crescenzi,
Silvestri, and Trevisan~\cite{cj97-02-05}\@. They show that for a
rather general class of optimization problems, including
\(\problem{Max \(k\)-Cut}\), the approximation ratios of the weighted
and unweighted cases coincide, provided the weights are polynomially
bounded\@. Our reduction below uses weights bounded by
\(\setsize{V(G)}\), and hence that result applies\@.
%
\apar{3-4}
We now turn to the task of defining the L-reduction\@. The graph \(H\)
is defined as follows\@. Assume that \(V(G)=\{v_{1},\dots,v_{n}\}\)\@.
Let \(V_{1},\dots,V_{k/2}\) be sets (of vertices) defined by
\(V_{i}=\{v_{1}^{i},\dots,v_{n}^{i}\}\)\@. That is, the sets
\(V_{1},\dots,V_{k/2}\) are ``copies'' of the vertex set of \(G\)\@.
Actually, we will say that for every \(1\leq i\leq k/2\), the vertex
\(v_{l}^{i}\) is a \emph{copy} of the vertex \(v_{l}\) of \(G\)\@.
Let \(H\) be the graph with vertex set
\(V_{1},\cup\dots\cup V_{k/2}\) and with the following edges:
%
\begin{itemize}
%
\item If \((v_{l},v_{m})\) is an edge of
\(G\), then \((v_{l}^{i},v_{m}^{j})\) is an edge of \(H\), for
\(1\leq i\leq j\leq k/2\)\@. These edges are called \emph{cross edges}\@.
%
\item All edges between all copies of the same vertex, i.e., for every
vertex \(v_{l}\) of \(G\) and \(1\leq i2\), it is
\(\complexityclass{NP}\)-hard to approximate \(\problem{Min
\(k\)-Partition}\) to within a factor of
\(\orderle{n^{2-\epsilon}}\) for every \(\epsilon>0\)\@. An interesting
aspect of this result is that it holds even when we restrict ourselves
to graphs with \(\orderge{n^{2-\epsilon}}\) edges\@.
\end{sloppypar}
%
\begin{alabsubtext}{Definition}{1}{Dense and Everywhere Dense Graphs}
%
An \(n\)-vertex graph is called \emph{dense} if it has
\(\orderge{n^{2}}\) edges, and it is called (everywhere)
\(\epsilon\)-dense if each vertex in the graph has degree at least
\(\epsilon n\) for some fixed \(\epsilon>0\)\@.
%
\end{alabsubtext}
%
\apar{4-2}
We begin with the following simple lemma\@.
%
\begin{alabsubtext}{Lemma}{6}{}
%
For each \(k\geq 3\), it is \(\complexityclass{NP}\)-hard to decide if
a graph is \(k\)-colorable over graphs with
\(\Theta(n^{\alpha})\) edges, where \(0<\alpha\leq 2\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{6}{}
\prooffontshape
%
\apar{Prove Lemma 6-1}
It suffices to show that the result holds on the family of
\(n\)-vertex graphs with \(\orderge{n^{2}}\) edges (i.e., dense
graphs): We can obtain a graph with \(\ordereq{n^{\alpha}}\) edges
by adding a disjoint empty graph on \(\ordereq{n^{{2}/{\alpha}}}\)
vertices\@. Clearly, this transformation leaves the chromatic number
unchanged\@.
%
\apar{Prove Lemma 6-2}
We show only the hardness of \(3\)-coloring on dense graphs:
Edwards~\cite{cj97-02-06} proved that for \(k\geq 4\), \(k\)-coloring
a dense graph is \(\complexityclass{NP}\)-hard\@. Given a graph \(G\)
on \(n\) vertices, we construct another graph \(G'\), which is simply
a disjoint union of \(G\) and a complete \(3\)-partite graph \(H\) on
\(N\) vertices such that each part of the partition has the same size\@.
If we choose \(N\) to be the smallest multiple of 3 greater than or
equal to \(n^{2}\), then clearly the graph \(G'\) is dense\@.
Furthermore, it is immediate from the construction that \(G'\) is
3-colorable if and only if \(G\) is \(3\)-colorable\@. Hence it is
\(\complexityclass{NP}\)-hard to decide if a dense graph is
\(3\)-colorable\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 6}}
\end{alabsubtext}
%
\apar{4-3}
From here on, we use \(\gamma(G,k)\) to denote the minimum number of
edges that must be deleted from \(G\) to obtain a \(k\)-partite graph\@.
The next lemma shows that the \(\complexityclass{NP}\)-hardness of
distinguishing between \(\gamma(G,k)=0\) and \(\gamma(G,k)\geq 1\) can
be amplified to that of distinguishing between \(\gamma(H,k)=0\) and
\(\gamma(H,k)=\orderge{n^{2-\epsilon}}\) for each fixed
\(\epsilon>0\)\@. This yields the desired hardness result\@. The precise
statement is as follows\@.
%
\begin{alabsubtext}{Lemma}{7}{Amplification Lemma}
%
For all positive integers \(k\) and \(s\), given a graph \(G\), one can
in polynomial time construct a graph \(H\) such that
\(\setsize{V(H)}=s\setsize{V(G)}\),
\(\setsize{E(H)}=s^{2}\setsize{E(G)}\), and
\(\gamma(H,k)=s^2\gamma(G,k)\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{7}{}
\prooffontshape
%
\apar{Prove Lemma 7-1}
The graph \(H\) in the Amplification lemma is constructed as follows:
%
\begin{itemize}
%
\item For each vertex \(u\) in \(G\), the set \(V(H)\) contains \(s\)
copies named \(u_{1},\dots,u_{s}\)\@.
%
\item \((u_{i},v_{j})\) is an edge in \(H\) if and only if \((u,v)\)
is an edge in \(G\)\@.
%
\end{itemize}
%
Notice that the set of copies of each vertex \(u\), that is,
\({u_{1},\dots,u_{s}}\), is an independent set of \(H\)\@.
It is easily seen that \(\setsize{V(H)}=s\setsize{V(G)}\),
\(\setsize{E(H)}=s^{2}\setsize{E(G)}\), and that the construction can
be done in polynomial time\@.
%
In the remainder of this section, \(H\) will denote the graph obtained
from \(G\) by the construction above\@.
%
\apar{Prove Lemma 7-2}
\begin{alabsubtext}{Definition}{2}{Quasi-coloring}
%
A coloring of a graph \(G\) is called an \(\langle m\rangle\) quasi-coloring
if it induces at most \(m\) monochromatic edges\@.
%
\end{alabsubtext}
%
We say that an \(\langle m\rangle\) quasi-coloring of \(H\) is
\emph{normalized} if and only if, for each \(u \in V(G)\), all the
\(s\) copies of \(u\) in \(H\), that is, \(u_{1},\dots,u_{s}\), have
the same color\@.
%
\begin{alabsubtext}{Lemma}{8}{}
%
Each \(\langle m\rangle\) quasi-coloring of \(H\) can be transformed into a
normalized \(\langle m\rangle\) quasi-coloring in polynomial time\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{8}{}
\prooffontshape
%
\apar{Prove Lemma 8-1}
Let \(c\) be a given \(\langle m\rangle\) quasi-coloring\@. Consider
any vertex \(u\) in \(G\) and let \(u_{\min}\) be the copy of \(u\)
in \(H\) with the smallest number of monochromatic edges incident on
it under the coloring \(c\)\@. Since the \(u_{i}\)s have the same
neighbors, the coloring \(c'\) that assigns the color of the vertex
\(u_{\min}\) to each copy \(u_{i}\), and agrees with coloring \(c\)
everywhere else, is still an \(\langle m\rangle\) quasi-coloring\@.
Iterating this process over each vertex \(u\) in \(G\), we get a
normalized \(\langle m\rangle\) quasi-coloring for \(H\)\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 8}}
\end{alabsubtext}
%
\apar{Prove Lemma 7-3}
We can now prove that he graph \(H\) constructed above satisfies
\(\gamma(H,k)=s^{2}\gamma(G,k)\). Every \(\langle m\rangle\)
quasi-coloring of \(G\) can be transformed into an
\(\langle s^{2}m\rangle\) quasi-coloring for \(H\)\@. For each vertex
\(u\) in \(G\), assign its color to all the copies of \(u\) in
\(H\)\@. Thus \(\gamma(H,k)\leq s^{2}\gamma(G,k)\)\@.
%
\apar{Prove Lemma 7-4}
On the other hand, an \(\langle m\rangle\) quasi-coloring for \(H\)
implies an \(\langle\floor{m/s^{2}}\rangle\) quasi-coloring for \(G\)\@.
By Lemma~8, we can assume without loss of generality that we are given
a normalized \(\langle m \rangle\) quasi-coloring, say \(c_{H}\), of
\(H\)\@. Construct a coloring \(c_{G}\) for \(G\) by assigning each
vertex \(u\) the same color as assigned to each of its copies in \(H\)
by the coloring \(c_{H}\)\@. By our construction of the graph \(H\), it
is easy to see that each monochromatic edge in \(G\) corresponds to a
unique set of \(s^{2}\) monochromatic edges in \(H\)\@. We conclude
that \(\gamma(H,k)\leq s^{2}\gamma(G,k)\)\@. The lemma follows\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 7}}
\end{alabsubtext}
%
\apar{4-5}
Hence we have the following theorem\@.
%
\begin{alabsubtext}{Theorem}{2}{}
%
For every fixed \(\epsilon>0\) and every
\((2-\epsilon)<\alpha\leq2\), it is \(\complexityclass{NP}\)-hard to
approximate \(\gamma(\cdot,k)\) within \(\orderle{n^{2-\epsilon}}\)
over graphs with \(\ordereq{n^{\alpha}}\) edges\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{2}{}
\prooffontshape
%
\apar{Prove Theorem 2-1}
Lemma~6 shows that it is \(\complexityclass{NP}\)-hard to
distinguish between \(\gamma(G,k)=0\) and \(\gamma(G,k)\geq 1\),
even when we are restricted to a family of graphs with
\(\ordereq{n^{\alpha}}\) edges where \(0<\alpha\leq 2\)\@.
%
\begin{sloppypar}
\apar{Prove Theorem 2-2}
To show the desired result, we start with the family of graphs with
\(\ordereq{n^{\alpha+{(\alpha-2)(2-\epsilon)}/{\epsilon}}}\) edges\@.
Given a graph \(G\) in this family, we construct the graph \(H\) using
the Amplification lemma, where we choose
\(s=n^{\frac{2}{\epsilon}-1}\)\@. Clearly, \(\gamma(H,k)\) is \(0\) if
and only if \(\gamma(G,k)=0\), and \(\gamma(H,k)\) is
\(\Omega(N^{2-\epsilon})\) otherwise, where \(N\) is the number of
vertices in \(H\)\@. This completes the proof\@.
\end{sloppypar}
%
{\nopagebreakbeginpar\markendlst{Proof of Theorem 2}}
\end{alabsubtext}
%
\begin{alabsubtext}{Remark}{2}{}
%
\begin{sloppypar}
In fact, we can show that it is \(\complexityclass{NP}\)-hard to
approximate \(\problem{Min \(k\)-Partition}\) within
\(\orderle{n^{2-\epsilon}}\), even when restricting the problem to
dense graphs with \(\setsize{E}=\delta n^{2}\) for any
\(\delta<(1-1/k)/2\)\@. This is a sharp bound, since if
\(\delta>(1-1/k)/2\), the graph cannot be \(k\)-colorable (according
to Tur\'an's theorem), and the whole graph is trivially a
\(1+2\delta/(1-1/k)\)-approximation\@.
\end{sloppypar}
%
\end{alabsubtext}
%
\apar{4-6}
The following lemma shows that the hardness result established above is
almost tight when \(k=3\)\@.
%
\begin{alabsubtext}{Lemma}{9}{}
%
\(\gamma(G,3)\) can be approximated within \(\epsilon n^{2}\) for
every fixed \(\epsilon>0\)\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Lemma}{9}{}
\prooffontshape
%
\apar{Prove Lemma 9-1}
Let \(p=1/\epsilon\)\@. If \(\gamma(G,3)>p\), then the set of all
edges constitutes a sufficiently good solution (as discussed in
Section~2, we take \(\max(\gamma(G,3),1)\) to compute the
performance ratio)\@. So we assume that \(\gamma(G,3)\leq p\)\@. Delete
from \(G\) all vertices of degree at most \((\epsilon/2)n\)\@. We
delete at most \((\epsilon/2)n^{2}\) edges in this process\@. Now we
are left with a graph \(G'\) that satisfies the property that each
vertex has degree at least \((\epsilon/2)n\)\@. In \(G'\), for every
subset of at most \(p\) edges, we test whether the graph obtained by
deleting \(F\) from \(G'\) is 3-colorable\@. Assuming that \(n\) is
large enough, the resulting graph is still \(\epsilon'\)-dense for
some \(\epsilon'<\epsilon\)\@. Hence if the resulting graph is
3-colorable, we can indeed verify so by using the coloring algorithm
of Edwards~\cite{cj97-02-06} or Arora et al. \cite{cj97-02-01}\@. The
result follows\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Lemma 9}}
\end{alabsubtext}
%
\apar{4-7}
Petrank showed in~\cite{cj97-02-13} that for graphs with
\(\ordereq{n}\) edges, it is \(\complexityclass{NP}\)-hard to
approximate \(\gamma(H,k)\) to within a factor of
\(\orderle{\setsize{E(H)}}\) (whenever \(k\geq 3\))\@. We can use the
Amplification lemma to strengthen this result and obtain the
following\@.
%
\begin{alabsubtext}{Theorem}{3}{}
%
For each \(k>2\) and \(0<\epsilon<1\),
it is \(\complexityclass{NP}\)-hard to approximate \(\gamma(H,k)\) within
\(\orderle{\setsize{E(H)}}\) over the graphs with
\(\orderge{n^{2-\epsilon}}\) edges\@.
%
\end{alabsubtext}
%
\begin{alabsubtext}{Proof of Theorem}{3}{}
\prooffontshape
%
\apar{Prove Theorem 3-1}
Petrank showed in~\cite{cj97-02-13} the following\@. For each
\(k\geq 3\), there is some constant \(c\) such that it is
\(\complexityclass{NP}\)-hard to tell whether a given graph \(G\),
from a family \(\classofgraph{F}\) of graphs with \(\ordereq{n}=c'n\)
edges, satisfies \(\gamma(G,k)=0\) or \(\gamma(G,k)\geq cn\)\@.
Applying the Amplification lemma with \(s=n^{(1-\epsilon)/\epsilon}\)
to a graph \(G\) of \(\classofgraph{F}\) gives a graph \(H\) such
that: \(\setsize{V(H)}=sn\), \(\setsize{E(H)}=c's^{2}n\),
\(\gamma(G,k)=0\) if and only if \(\gamma(H,k)=0\), and
\(\gamma(G,k)\geq cn\) if and only if \(\gamma(H,k)\geq s^{2}cn\)\@.
That is, it is \(\complexityclass{NP}\)-hard to approximate
\(\gamma(H,k)\) within \(\orderle{\setsize{E(H)}}\)\@. Since
\(sn=n^{1/\epsilon}\) and \(c's^{2}n=c'n^{(2-\epsilon)/\epsilon}\),
the result follows\@.
%
{\nopagebreakbeginpar\markendlst{Proof of Theorem 3}}
\end{alabsubtext}
%
\asection{5}{Acknowledgments}
%
\apar{5-1}
We thank Naveen Garg and Johan H{\aa}stad for several useful discussions\@.
%
\end{articletext}
%
\begin{articlebib}
%
\bibliographystyle{\the\rbibstyle}
\bibliography{cj97-02}
%
\end{articlebib}
%
\end{document}