Index:
[(1)addftinfo(1)
addr2line(1)
afmtodit(1)
alias(1)
alloc(1)
apply(1)
apropos(1)
ar(1)
as(1)
asa(1)
at(1)
atq(1)
atrm(1)
awk(1)
b64decode(1)
b64encode(1)
basename(1)
batch(1)
bc(1)
bdes(1)
bg(1)
biff(1)
bind(1)
bindkey(1)
brandelf(1)
break(1)
breaksw(1)
bsdtar(1)
bsnmpd(1)
bthost(1)
btsockstat(1)
builtin(1)
builtins(1)
bunzip2(1)
byacc(1)
bzcat(1)
bzegrep(1)
bzfgrep(1)
bzgrep(1)
bzip2(1)
c++(1)
c89(1)
c99(1)
cal(1)
calendar(1)
cap_mkdb(1)
case(1)
cat(1)
catman(1)
cc(1)
cd(1)
cdcontrol(1)
chdir(1)
checknr(1)
chflags(1)
chfn(1)
chgrp(1)
chio(1)
chkey(1)
chmod(1)
chpass(1)
chsh(1)
ci(1)
ckdist(1)
cksum(1)
clear(1)
cmp(1)
co(1)
col(1)
colcrt(1)
colldef(1)
colrm(1)
column(1)
comm(1)
command(1)
compile_et(1)
complete(1)
compress(1)
continue(1)
cp(1)
cpio(1)
cpp(1)
crontab(1)
crunchgen(1)
crunchide(1)
crypt(1)
csh(1)
csplit(1)
ctags(1)
ctm(1)
ctm_dequeue(1)
ctm_rmail(1)
ctm_smail(1)
cu(1)
cursor(1)
cut(1)
cvs(1)
date(1)
dc(1)
dd(1)
default(1)
df(1)
dialog(1)
diff(1)
diff3(1)
dig(1)
dirname(1)
dirs(1)
do(1)
domainname(1)
done(1)
dtmfdecode(1)
du(1)
echo(1)
echotc(1)
ed(1)
edit(1)
ee(1)
egrep(1)
elfdump(1)
elif(1)
else(1)
end(1)
endif(1)
endsw(1)
enigma(1)
env(1)
eqn(1)
esac(1)
eval(1)
ex(1)
exec(1)
exit(1)
expand(1)
export(1)
expr(1)
f77(1)
false(1)
fc(1)
fdformat(1)
fdread(1)
fdwrite(1)
fetch(1)
fg(1)
fgrep(1)
fi(1)
file(1)
file2c(1)
filetest(1)
find(1)
finger(1)
flex++(1)
flex(1)
fmt(1)
fold(1)
fontedit(1)
for(1)
foreach(1)
from(1)
fstat(1)
fsync(1)
ftp(1)
g++(1)
g711conv(1)
gate-ftp(1)
gcc(1)
gcore(1)
gcov(1)
gdb(1)
gencat(1)
gensnmptree(1)
getNAME(1)
getconf(1)
getfacl(1)
getopt(1)
getopts(1)
glob(1)
goto(1)
gperf(1)
gprof(1)
grep(1)
grn(1)
grodvi(1)
groff(1)
grog(1)
grolbp(1)
grolj4(1)
grops(1)
grotty(1)
groups(1)
gtar(1)
gunzip(1)
gzcat(1)
gzexe(1)
gzip(1)
hash(1)
hashstat(1)
hd(1)
head(1)
hesinfo(1)
hexdump(1)
history(1)
host(1)
hostname(1)
hpftodit(1)
hup(1)
id(1)
ident(1)
idprio(1)
if(1)
indent(1)
indxbib(1)
info(1)
install-info(1)
install(1)
intro(1)
introduction(1)
ipcrm(1)
ipcs(1)
ipftest(1)
ipnat(1)
ipresend(1)
ipsend(1)
iptest(1)
jobid(1)
jobs(1)
join(1)
jot(1)
kbdcontrol(1)
kbdmap(1)
kcon(1)
kdestroy(1)
kdump(1)
kenv(1)
keylogin(1)
keylogout(1)
kgdb(1)
kill(1)
killall(1)
kinit(1)
klist(1)
kpasswd(1)
krb5-config(1)
ktrace(1)
lam(1)
last(1)
lastcomm(1)
ld-elf.so.1(1)
ld(1)
ld(1)
ldd(1)
leave(1)
less(1)
lesskey(1)
lex++(1)
lex(1)
limit(1)
limits(1)
link(1)
lint(1)
lkbib(1)
ln(1)
loadfont(1)
locale(1)
locate(1)
lock(1)
lockf(1)
log(1)
logger(1)
login(1)
logins(1)
logname(1)
logout(1)
look(1)
lookbib(1)
lorder(1)
lp(1)
lpq(1)
lpr(1)
lprm(1)
lptest(1)
ls-F(1)
ls(1)
lsvfs(1)
m4(1)
mail(1)
mailq(1)
mailx(1)
make(1)
makeinfo(1)
makewhatis(1)
man(1)
manpath(1)
md5(1)
merge(1)
mesg(1)
minigzip(1)
mkdep(1)
mkdir(1)
mkfifo(1)
mklocale(1)
mkstr(1)
mktemp(1)
mmroff(1)
more(1)
mptable(1)
msgs(1)
mt(1)
mv(1)
nawk(1)
nc(1)
ncal(1)
ncplist(1)
ncplogin(1)
ncplogout(1)
neqn(1)
netstat(1)
newaliases(1)
newgrp(1)
nex(1)
nfsstat(1)
nice(1)
nl(1)
nm(1)
nohup(1)
notify(1)
nroff(1)
nslookup(1)
nvi(1)
nview(1)
objcopy(1)
objdump(1)
objformat(1)
od(1)
omshell(1)
onintr(1)
opieinfo(1)
opiekey(1)
opiepasswd(1)
otp-md4(1)
otp-md5(1)
otp-sha(1)
pagesize(1)
passwd(1)
paste(1)
patch(1)
pathchk(1)
pawd(1)
pax(1)
pfbtops(1)
pftp(1)
pgrep(1)
pic(1)
pkg_add(1)
pkg_check(1)
pkg_create(1)
pkg_delete(1)
pkg_info(1)
pkg_sign(1)
pkg_version(1)
pkill(1)
popd(1)
pr(1)
printenv(1)
printf(1)
ps(1)
psroff(1)
pushd(1)
pwd(1)
quota(1)
ranlib(1)
rcp(1)
rcs(1)
rcsclean(1)
rcsdiff(1)
rcsfreeze(1)
rcsintro(1)
rcsmerge(1)
read(1)
readelf(1)
readlink(1)
readonly(1)
realpath(1)
red(1)
ree(1)
refer(1)
rehash(1)
repeat(1)
reset(1)
rev(1)
rfcomm_sppd(1)
rlog(1)
rlogin(1)
rm(1)
rmd160(1)
rmdir(1)
rpcgen(1)
rs(1)
rsh(1)
rtld(1)
rtprio(1)
rup(1)
ruptime(1)
rusers(1)
rwall(1)
rwho(1)
sched(1)
scon(1)
scp(1)
script(1)
sdiff(1)
sed(1)
send-pr(1)
sendbug(1)
set(1)
setenv(1)
setfacl(1)
settc(1)
setty(1)
setvar(1)
sftp(1)
sh(1)
sha1(1)
shar(1)
shift(1)
size(1)
sleep(1)
slogin(1)
smbutil(1)
sockstat(1)
soelim(1)
sort(1)
source(1)
split(1)
sscop(1)
ssh-add(1)
ssh-agent(1)
ssh-keygen(1)
ssh-keyscan(1)
ssh(1)
startslip(1)
stat(1)
stop(1)
strings(1)
strip(1)
stty(1)
su(1)
sum(1)
suspend(1)
switch(1)
systat(1)
tabs(1)
tail(1)
talk(1)
tar(1)
tbl(1)
tcopy(1)
tcpdump(1)
tcpslice(1)
tcsh(1)
tee(1)
telltc(1)
telnet(1)
test(1)
texindex(1)
tfmtodit(1)
tftp(1)
then(1)
time(1)
tip(1)
top(1)
touch(1)
tput(1)
tr(1)
trace(1)
trap(1)
troff(1)
true(1)
truncate(1)
truss(1)
tset(1)
tsort(1)
tty(1)
type(1)
ul(1)
ulimit(1)
umask(1)
unalias(1)
uname(1)
uncomplete(1)
uncompress(1)
unexpand(1)
unhash(1)
unifdef(1)
unifdefall(1)
uniq(1)
units(1)
unlimit(1)
unlink(1)
unset(1)
unsetenv(1)
until(1)
unvis(1)
uptime(1)
usbhidaction(1)
usbhidctl(1)
users(1)
uudecode(1)
uuencode(1)
uuidgen(1)
vacation(1)
vgrind(1)
vi(1)
vidcontrol(1)
vidfont(1)
view(1)
vis(1)
vt220keys(1)
vttest(1)
w(1)
wait(1)
wall(1)
wc(1)
what(1)
whatis(1)
where(1)
whereis(1)
which(1)
while(1)
who(1)
whoami(1)
whois(1)
window(1)
write(1)
xargs(1)
xstr(1)
yacc(1)
yes(1)
ypcat(1)
ypchfn(1)
ypchpass(1)
ypchsh(1)
ypmatch(1)
yppasswd(1)
ypwhich(1)
yyfix(1)
zcat(1)
zcmp(1)
zdiff(1)
zegrep(1)
zfgrep(1)
zforce(1)
zgrep(1)
zmore(1)
znew(1)
bzcat(1)
NAME
bzip2, bunzip2 - a block-sorting file compressor, v1.0.2 bzcat - decompresses files to stdout bzip2recover - recovers data from damaged bzip2 files
SYNOPSIS
bzip2 [ -cdfkqstvzVL123456789 ] [ filenames ... ] bunzip2 [ -fkvsVL ] [ filenames ... ] bzcat [ -s ] [ filenames ... ] bzip2recover filename
DESCRIPTION
bzip2 compresses files using the Burrows-Wheeler block sorting text compression algorithm, and Huffman coding. Compression is generally considerably better than that achieved by more conventional LZ77/LZ78-based compressors, and approaches the performance of the PPM family of statistical compressors. The command-line options are deliberately very similar to those of GNU gzip, but they are not identical. bzip2 expects a list of file names to accompany the command-line flags. Each file is replaced by a compressed version of itself, with the name "original_name.bz2". Each compressed file has the same modification date, permissions, and, when possible, ownership as the corresponding original, so that these properties can be correctly restored at decom- pression time. File name handling is naive in the sense that there is no mechanism for preserving original file names, permissions, owner- ships or dates in filesystems which lack these concepts, or have seri- ous file name length restrictions, such as MS-DOS. bzip2 and bunzip2 will by default not overwrite existing files. If you want this to happen, specify the -f flag. If no file names are specified, bzip2 compresses from standard input to standard output. In this case, bzip2 will decline to write compressed output to a terminal, as this would be entirely incomprehensible and therefore pointless. bunzip2 (or bzip2 -d) decompresses all specified files. Files which were not created by bzip2 will be detected and ignored, and a warning issued. bzip2 attempts to guess the filename for the decompressed file from that of the compressed file as follows: filename.bz2 becomes filename filename.bz becomes filename filename.tbz2 becomes filename.tar filename.tbz becomes filename.tar anyothername becomes anyothername.out If the file does not end in one of the recognised endings, .bz2, .bz, .tbz2 or .tbz, bzip2 complains that it cannot guess the name of the original file, and uses the original name with .out appended. You can also compress or decompress files to the standard output by giving the -c flag. Multiple files may be compressed and decompressed like this. The resulting outputs are fed sequentially to stdout. Com- pression of multiple files in this manner generates a stream containing multiple compressed file representations. Such a stream can be decom- pressed correctly only by bzip2 version 0.9.0 or later. Earlier ver- sions of bzip2 will stop after decompressing the first file in the stream. bzcat (or bzip2 -dc) decompresses all specified files to the standard output. bzip2 will read arguments from the environment variables BZIP2 and BZIP, in that order, and will process them before any arguments read from the command line. This gives a convenient way to supply default arguments. Compression is always performed, even if the compressed file is slightly larger than the original. Files of less than about one hun- dred bytes tend to get larger, since the compression mechanism has a constant overhead in the region of 50 bytes. Random data (including the output of most file compressors) is coded at about 8.05 bits per byte, giving an expansion of around 0.5%. As a self-check for your protection, bzip2 uses 32-bit CRCs to make sure that the decompressed version of a file is identical to the origi- nal. This guards against corruption of the compressed data, and against undetected bugs in bzip2 (hopefully very unlikely). The chances of data corruption going undetected is microscopic, about one chance in four billion for each file processed. Be aware, though, that the check occurs upon decompression, so it can only tell you that some- thing is wrong. It can't help you recover the original uncompressed data. You can use bzip2recover to try to recover data from damaged files. Return values: 0 for a normal exit, 1 for environmental problems (file not found, invalid flags, I/O errors, &c), 2 to indicate a corrupt com- pressed file, 3 for an internal consistency error (eg, bug) which caused bzip2 to panic.
OPTIONS
-c --stdout Compress or decompress to standard output. -d --decompress Force decompression. bzip2, bunzip2 and bzcat are really the same program, and the decision about what actions to take is done on the basis of which name is used. This flag overrides that mechanism, and forces bzip2 to decompress. -z --compress The complement to -d: forces compression, regardless of the invocation name. -t --test Check integrity of the specified file(s), but don't decompress bzip2 normally declines to decompress files which don't have the correct magic header bytes. If forced (-f), however, it will pass such files through unmodified. This is how GNU gzip behaves. -k --keep Keep (don't delete) input files during compression or decompres- sion. -s --small Reduce memory usage, for compression, decompression and testing. Files are decompressed and tested using a modified algorithm which only requires 2.5 bytes per block byte. This means any file can be decompressed in 2300k of memory, albeit at about half the normal speed. During compression, -s selects a block size of 200k, which lim- its memory use to around the same figure, at the expense of your compression ratio. In short, if your machine is low on memory (8 megabytes or less), use -s for everything. See MEMORY MAN- AGEMENT below. -q --quiet Suppress non-essential warning messages. Messages pertaining to I/O errors and other critical events will not be suppressed. -v --verbose Verbose mode -- show the compression ratio for each file pro- cessed. Further -v's increase the verbosity level, spewing out lots of information which is primarily of interest for diagnos- tic purposes. -L --license -V --version Display the software version, license terms and conditions. -1 (or --fast) to -9 (or --best) Set the block size to 100 k, 200 k .. 900 k when compressing. Has no effect when decompressing. See MEMORY MANAGEMENT below. The --fast and --best aliases are primarily for GNU gzip compat- ibility. In particular, --fast doesn't make things signifi- cantly faster. And --best merely selects the default behaviour. -- Treats all subsequent arguments as file names, even if they start with a dash. This is so you can handle files with names beginning with a dash, for example: bzip2 -- -myfilename. --repetitive-fast --repetitive-best These flags are redundant in versions 0.9.5 and above. They provided some coarse control over the behaviour of the sorting algorithm in earlier versions, which was sometimes useful. 0.9.5 and above have an improved algorithm which renders these flags irrelevant.
MEMORY MANAGEMENT
bzip2 compresses large files in blocks. The block size affects both the compression ratio achieved, and the amount of memory needed for compression and decompression. The flags -1 through -9 specify the as: Compression: 400k + ( 8 x block size ) Decompression: 100k + ( 4 x block size ), or 100k + ( 2.5 x block size ) Larger block sizes give rapidly diminishing marginal returns. Most of the compression comes from the first two or three hundred k of block size, a fact worth bearing in mind when using bzip2 on small machines. It is also important to appreciate that the decompression memory requirement is set at compression time by the choice of block size. For files compressed with the default 900k block size, bunzip2 will require about 3700 kbytes to decompress. To support decompression of any file on a 4 megabyte machine, bunzip2 has an option to decompress using approximately half this amount of memory, about 2300 kbytes. Decompression speed is also halved, so you should use this option only where necessary. The relevant flag is -s. In general, try and use the largest block size memory constraints allow, since that maximises the compression achieved. Compression and decompression speed are virtually unaffected by block size. Another significant point applies to files which fit in a single block -- that means most files you'd encounter using a large block size. The amount of real memory touched is proportional to the size of the file, since the file is smaller than a block. For example, compressing a file 20,000 bytes long with the flag -9 will cause the compressor to allocate around 7600k of memory, but only touch 400k + 20000 * 8 = 560 kbytes of it. Similarly, the decompressor will allocate 3700k but only touch 100k + 20000 * 4 = 180 kbytes. Here is a table which summarises the maximum memory usage for different block sizes. Also recorded is the total compressed size for 14 files of the Calgary Text Compression Corpus totalling 3,141,622 bytes. This column gives some feel for how compression varies with block size. These figures tend to understate the advantage of larger block sizes for larger files, since the Corpus is dominated by smaller files. Compress Decompress Decompress Corpus Flag usage usage -s usage Size -1 1200k 500k 350k 914704 -2 2000k 900k 600k 877703 -3 2800k 1300k 850k 860338 -4 3600k 1700k 1100k 846899 -5 4400k 2100k 1350k 845160 -6 5200k 2500k 1600k 838626 -7 6100k 2900k 1850k 834096 -8 6800k 3300k 2100k 828642 -9 7600k 3700k 2350k 828642
RECOVERING DATA FROM DAMAGED FILES
bzip2 compresses files in blocks, usually 900kbytes long. Each block is handled independently. If a media or transmission error causes a multi-block .bz2 file to become damaged, it may be possible to recover in .bz2 files, and write each block out into its own .bz2 file. You can then use bzip2 -t to test the integrity of the resulting files, and decompress those which are undamaged. bzip2recover takes a single argument, the name of the damaged file, and writes a number of files "rec00001file.bz2", "rec00002file.bz2", etc, containing the extracted blocks. The output filenames are designed so that the use of wildcards in subsequent processing -- for example, "bzip2 -dc rec*file.bz2 > recovered_data" -- processes the files in the correct order. bzip2recover should be of most use dealing with large .bz2 files, as these will contain many blocks. It is clearly futile to use it on dam- aged single-block files, since a damaged block cannot be recov- ered. If you wish to minimise any potential data loss through media or transmission errors, you might consider compressing with a smaller block size.
PERFORMANCE NOTES
The sorting phase of compression gathers together similar strings in the file. Because of this, files containing very long runs of repeated symbols, like "aabaabaabaab ..." (repeated several hundred times) may compress more slowly than normal. Versions 0.9.5 and above fare much better than previous versions in this respect. The ratio between worst-case and average-case compression time is in the region of 10:1. For previous versions, this figure was more like 100:1. You can use the -vvvv option to monitor progress in great detail, if you want. Decompression speed is unaffected by these phenomena. bzip2 usually allocates several megabytes of memory to operate in, and then charges all over it in a fairly random fashion. This means that performance, both for compressing and decompressing, is largely deter- mined by the speed at which your machine can service cache misses. Because of this, small changes to the code to reduce the miss rate have been observed to give disproportionately large performance improve- ments. I imagine bzip2 will perform best on machines with very large caches.
CAVEATS
I/O error messages are not as helpful as they could be. bzip2 tries hard to detect I/O errors and exit cleanly, but the details of what the problem is sometimes seem rather misleading. This manual page pertains to version 1.0.2 of bzip2. Compressed data created by this version is entirely forwards and backwards compatible with the previous public releases, versions 0.1pl2, 0.9.0, 0.9.5, 1.0.0 and 1.0.1, but with the following exception: 0.9.0 and above can cor- rectly decompress multiple concatenated compressed files. 0.1pl2 can- not do this; it will stop after decompressing just the first file in the stream. bzip2recover versions prior to this one, 1.0.2, used 32-bit integers to represent bit positions in compressed files, so it could not handle compressed files more than 512 megabytes long. Version 1.0.2 and above uses 64-bit ints on some platforms which support them (GNU supported
AUTHOR
Julian Seward, jseward@acm.org. http://sources.redhat.com/bzip2 The ideas embodied in bzip2 are due to (at least) the following people: Michael Burrows and David Wheeler (for the block sorting transforma- tion), David Wheeler (again, for the Huffman coder), Peter Fenwick (for the structured coding model in the original bzip, and many refine- ments), and Alistair Moffat, Radford Neal and Ian Witten (for the arithmetic coder in the original bzip). I am much indebted for their help, support and advice. See the manual in the source distribution for pointers to sources of documentation. Christian von Roques encour- aged me to look for faster sorting algorithms, so as to speed up com- pression. Bela Lubkin encouraged me to improve the worst-case compres- sion performance. The bz* scripts are derived from those of GNU gzip. Many people sent patches, helped with portability problems, lent machines, gave advice and were generally helpful. bzip2(1)
SPONSORED LINKS
Man(1) output converted with man2html , sed , awk