1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
|
#!/usr/bin/env bash
#
# max limits on compression in huge qcow2 files
#
# Copyright (C) 2018 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
seq=$(basename $0)
echo "QA output created by $seq"
status=1 # failure is the default!
_cleanup()
{
_cleanup_test_img
}
trap "_cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common.rc
. ./common.filter
. ./common.pattern
_supported_fmt qcow2
_supported_proto file
_supported_os Linux
# To use a different refcount width but 16 bits we need compat=1.1,
# and external data files do not support compressed clusters.
_unsupported_imgopts 'compat=0.10' data_file
echo "== Creating huge file =="
# Sanity check: We require a file system that permits the creation
# of a HUGE (but very sparse) file. tmpfs works, ext4 does not.
_require_large_file 513T
_make_test_img -o 'cluster_size=2M,refcount_bits=1' 513T
echo "== Populating refcounts =="
# We want an image with 256M refcounts * 2M clusters = 512T referenced.
# Each 2M cluster holds 16M refcounts; the refcount table initially uses
# 1 refblock, so we need to add 15 more. The refcount table lives at 2M,
# first refblock at 4M, L2 at 6M, so our remaining additions start at 8M.
# Then, for each refblock, mark it as fully populated.
to_hex() {
printf %016x\\n $1 | sed 's/\(..\)/\\x\1/g'
}
truncate --size=38m "$TEST_IMG"
entry=$((0x200000))
$QEMU_IO_PROG -f raw -c "w -P 0xff 4m 2m" "$TEST_IMG" | _filter_qemu_io
for i in {1..15}; do
offs=$((0x600000 + i*0x200000))
poke_file "$TEST_IMG" $((i*8 + entry)) $(to_hex $offs)
$QEMU_IO_PROG -f raw -c "w -P 0xff $offs 2m" "$TEST_IMG" | _filter_qemu_io
done
echo "== Checking file before =="
# FIXME: 'qemu-img check' doesn't diagnose refcounts beyond the end of
# the file as leaked clusters
_check_test_img 2>&1 | sed '/^Leaked cluster/d'
stat -c 'image size %s' "$TEST_IMG"
echo "== Trying to write compressed cluster =="
# Given our file size, the next available cluster at 512T lies beyond the
# maximum offset that a compressed 2M cluster can reside in
$QEMU_IO_PROG -c 'w -c 0 2m' "$TEST_IMG" | _filter_qemu_io
# The attempt failed, but ended up allocating a new refblock
stat -c 'image size %s' "$TEST_IMG"
echo "== Writing normal cluster =="
# The failed write should not corrupt the image, so a normal write succeeds
$QEMU_IO_PROG -c 'w 0 2m' "$TEST_IMG" | _filter_qemu_io
echo "== Checking file after =="
# qemu-img now sees the millions of leaked clusters, thanks to the allocations
# at 512T. Undo many of our faked references to speed up the check.
$QEMU_IO_PROG -f raw -c "w -z 5m 1m" -c "w -z 8m 30m" "$TEST_IMG" |
_filter_qemu_io
_check_test_img 2>&1 | sed '/^Leaked cluster/d'
# success, all done
echo "*** done"
rm -f $seq.full
status=0
|