2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu-common.h"
25 #include "block_int.h"
31 Differences with QCOW:
33 - Support for multiple incremental snapshots.
34 - Memory management by reference counts.
35 - Clusters which have a reference count of one have the bit
36 QCOW_OFLAG_COPIED to optimize write performance.
37 - Size of compressed clusters is stored in sectors to reduce bit usage
38 in the cluster offsets.
39 - Support for storing additional data (such as the VM state) in the
41 - If a backing store is used, the cluster size is not constrained
42 (could be backported to QCOW).
43 - L2 tables have always a size of one cluster.
47 //#define DEBUG_ALLOC2
49 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
50 #define QCOW_VERSION 2
52 #define QCOW_CRYPT_NONE 0
53 #define QCOW_CRYPT_AES 1
55 #define QCOW_MAX_CRYPT_CLUSTERS 32
57 /* indicate that the refcount of the referenced cluster is exactly one. */
58 #define QCOW_OFLAG_COPIED (1LL << 63)
59 /* indicate that the cluster is compressed (they never have the copied flag) */
60 #define QCOW_OFLAG_COMPRESSED (1LL << 62)
62 #define REFCOUNT_SHIFT 1 /* refcount size is 2 bytes */
64 typedef struct QCowHeader {
67 uint64_t backing_file_offset;
68 uint32_t backing_file_size;
69 uint32_t cluster_bits;
70 uint64_t size; /* in bytes */
71 uint32_t crypt_method;
72 uint32_t l1_size; /* XXX: save number of clusters instead ? */
73 uint64_t l1_table_offset;
74 uint64_t refcount_table_offset;
75 uint32_t refcount_table_clusters;
76 uint32_t nb_snapshots;
77 uint64_t snapshots_offset;
80 typedef struct __attribute__((packed)) QCowSnapshotHeader {
81 /* header is 8 byte aligned */
82 uint64_t l1_table_offset;
91 uint64_t vm_clock_nsec;
93 uint32_t vm_state_size;
94 uint32_t extra_data_size; /* for extension */
95 /* extra data follows */
100 #define L2_CACHE_SIZE 16
102 typedef struct QCowSnapshot {
103 uint64_t l1_table_offset;
107 uint32_t vm_state_size;
110 uint64_t vm_clock_nsec;
113 typedef struct BDRVQcowState {
114 BlockDriverState *hd;
121 int l1_vm_state_index;
124 uint64_t cluster_offset_mask;
125 uint64_t l1_table_offset;
128 uint64_t l2_cache_offsets[L2_CACHE_SIZE];
129 uint32_t l2_cache_counts[L2_CACHE_SIZE];
130 uint8_t *cluster_cache;
131 uint8_t *cluster_data;
132 uint64_t cluster_cache_offset;
134 uint64_t *refcount_table;
135 uint64_t refcount_table_offset;
136 uint32_t refcount_table_size;
137 uint64_t refcount_block_cache_offset;
138 uint16_t *refcount_block_cache;
139 int64_t free_cluster_index;
140 int64_t free_byte_offset;
142 uint32_t crypt_method; /* current crypt method, 0 if no key yet */
143 uint32_t crypt_method_header;
144 AES_KEY aes_encrypt_key;
145 AES_KEY aes_decrypt_key;
147 int64_t highest_alloc; /* highest cluester allocated (in clusters) */
149 uint64_t snapshots_offset;
152 QCowSnapshot *snapshots;
155 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
156 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
157 uint8_t *buf, int nb_sectors);
158 static int qcow_read_snapshots(BlockDriverState *bs);
159 static void qcow_free_snapshots(BlockDriverState *bs);
160 static int refcount_init(BlockDriverState *bs);
161 static void refcount_close(BlockDriverState *bs);
162 static int get_refcount(BlockDriverState *bs, int64_t cluster_index);
163 static int update_cluster_refcount(BlockDriverState *bs,
164 int64_t cluster_index,
166 static void update_refcount(BlockDriverState *bs,
167 int64_t offset, int64_t length,
169 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size);
170 static int64_t alloc_bytes(BlockDriverState *bs, int size);
171 static void free_clusters(BlockDriverState *bs,
172 int64_t offset, int64_t size);
174 static void check_refcounts(BlockDriverState *bs);
176 static void scan_refcount(BlockDriverState *bs, int64_t *high);
179 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
181 const QCowHeader *cow_header = (const void *)buf;
183 if (buf_size >= sizeof(QCowHeader) &&
184 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
185 be32_to_cpu(cow_header->version) == QCOW_VERSION)
191 static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
193 BDRVQcowState *s = bs->opaque;
194 int len, i, shift, ret;
197 /* Performance is terrible right now with cache=writethrough due mainly
198 * to reference count updates. If the user does not explicitly specify
199 * a caching type, force to writeback caching.
201 if ((flags & BDRV_O_CACHE_DEF)) {
202 flags |= BDRV_O_CACHE_WB;
203 flags &= ~BDRV_O_CACHE_DEF;
205 ret = bdrv_file_open(&s->hd, filename, flags);
208 if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header))
210 be32_to_cpus(&header.magic);
211 be32_to_cpus(&header.version);
212 be64_to_cpus(&header.backing_file_offset);
213 be32_to_cpus(&header.backing_file_size);
214 be64_to_cpus(&header.size);
215 be32_to_cpus(&header.cluster_bits);
216 be32_to_cpus(&header.crypt_method);
217 be64_to_cpus(&header.l1_table_offset);
218 be32_to_cpus(&header.l1_size);
219 be64_to_cpus(&header.refcount_table_offset);
220 be32_to_cpus(&header.refcount_table_clusters);
221 be64_to_cpus(&header.snapshots_offset);
222 be32_to_cpus(&header.nb_snapshots);
224 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
226 if (header.size <= 1 ||
227 header.cluster_bits < 9 ||
228 header.cluster_bits > 16)
230 if (header.crypt_method > QCOW_CRYPT_AES)
232 s->crypt_method_header = header.crypt_method;
233 if (s->crypt_method_header)
235 s->cluster_bits = header.cluster_bits;
236 s->cluster_size = 1 << s->cluster_bits;
237 s->cluster_sectors = 1 << (s->cluster_bits - 9);
238 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
239 s->l2_size = 1 << s->l2_bits;
240 bs->total_sectors = header.size / 512;
241 s->csize_shift = (62 - (s->cluster_bits - 8));
242 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
243 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
244 s->refcount_table_offset = header.refcount_table_offset;
245 s->refcount_table_size =
246 header.refcount_table_clusters << (s->cluster_bits - 3);
248 s->snapshots_offset = header.snapshots_offset;
249 s->nb_snapshots = header.nb_snapshots;
251 /* read the level 1 table */
252 s->l1_size = header.l1_size;
253 shift = s->cluster_bits + s->l2_bits;
254 s->l1_vm_state_index = (header.size + (1LL << shift) - 1) >> shift;
255 /* the L1 table must contain at least enough entries to put
257 if (s->l1_size < s->l1_vm_state_index)
259 s->l1_table_offset = header.l1_table_offset;
260 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
261 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
262 s->l1_size * sizeof(uint64_t))
264 for(i = 0;i < s->l1_size; i++) {
265 be64_to_cpus(&s->l1_table[i]);
268 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
269 s->cluster_cache = qemu_malloc(s->cluster_size);
270 /* one more sector for decompressed data alignment */
271 s->cluster_data = qemu_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
273 s->cluster_cache_offset = -1;
275 if (refcount_init(bs) < 0)
278 scan_refcount(bs, &s->highest_alloc);
280 /* read the backing file name */
281 if (header.backing_file_offset != 0) {
282 len = header.backing_file_size;
285 if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len)
287 bs->backing_file[len] = '\0';
289 if (qcow_read_snapshots(bs) < 0)
298 qcow_free_snapshots(bs);
300 qemu_free(s->l1_table);
301 qemu_free(s->l2_cache);
302 qemu_free(s->cluster_cache);
303 qemu_free(s->cluster_data);
308 static int qcow_set_key(BlockDriverState *bs, const char *key)
310 BDRVQcowState *s = bs->opaque;
314 memset(keybuf, 0, 16);
318 /* XXX: we could compress the chars to 7 bits to increase
320 for(i = 0;i < len;i++) {
323 s->crypt_method = s->crypt_method_header;
325 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
327 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
337 AES_encrypt(in, tmp, &s->aes_encrypt_key);
338 AES_decrypt(tmp, out, &s->aes_decrypt_key);
339 for(i = 0; i < 16; i++)
340 printf(" %02x", tmp[i]);
342 for(i = 0; i < 16; i++)
343 printf(" %02x", out[i]);
350 /* The crypt function is compatible with the linux cryptoloop
351 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
353 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
354 uint8_t *out_buf, const uint8_t *in_buf,
355 int nb_sectors, int enc,
364 for(i = 0; i < nb_sectors; i++) {
365 ivec.ll[0] = cpu_to_le64(sector_num);
367 AES_cbc_encrypt(in_buf, out_buf, 512, key,
375 static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
376 uint64_t cluster_offset, int n_start, int n_end)
378 BDRVQcowState *s = bs->opaque;
384 ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
387 if (s->crypt_method) {
388 encrypt_sectors(s, start_sect + n_start,
390 s->cluster_data, n, 1,
391 &s->aes_encrypt_key);
393 ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
400 static void l2_cache_reset(BlockDriverState *bs)
402 BDRVQcowState *s = bs->opaque;
404 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
405 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
406 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
409 static inline int l2_cache_new_entry(BlockDriverState *bs)
411 BDRVQcowState *s = bs->opaque;
415 /* find a new entry in the least used one */
417 min_count = 0xffffffff;
418 for(i = 0; i < L2_CACHE_SIZE; i++) {
419 if (s->l2_cache_counts[i] < min_count) {
420 min_count = s->l2_cache_counts[i];
427 static int64_t align_offset(int64_t offset, int n)
429 offset = (offset + n - 1) & ~(n - 1);
433 static int grow_l1_table(BlockDriverState *bs, int min_size)
435 BDRVQcowState *s = bs->opaque;
436 int new_l1_size, new_l1_size2, ret, i;
437 uint64_t *new_l1_table;
438 uint64_t new_l1_table_offset;
441 new_l1_size = s->l1_size;
442 if (min_size <= new_l1_size)
444 while (min_size > new_l1_size) {
445 new_l1_size = (new_l1_size * 3 + 1) / 2;
448 printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
451 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
452 new_l1_table = qemu_mallocz(new_l1_size2);
453 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
455 /* write new table (align to cluster) */
456 new_l1_table_offset = alloc_clusters(bs, new_l1_size2);
458 for(i = 0; i < s->l1_size; i++)
459 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
460 ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
461 if (ret != new_l1_size2)
463 for(i = 0; i < s->l1_size; i++)
464 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
467 cpu_to_be32w((uint32_t*)data, new_l1_size);
468 cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
469 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
470 sizeof(data)) != sizeof(data))
472 qemu_free(s->l1_table);
473 free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
474 s->l1_table_offset = new_l1_table_offset;
475 s->l1_table = new_l1_table;
476 s->l1_size = new_l1_size;
479 qemu_free(s->l1_table);
486 * seek l2_offset in the l2_cache table
487 * if not found, return NULL,
489 * increments the l2 cache hit count of the entry,
490 * if counter overflow, divide by two all counters
491 * return the pointer to the l2 cache entry
495 static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
499 for(i = 0; i < L2_CACHE_SIZE; i++) {
500 if (l2_offset == s->l2_cache_offsets[i]) {
501 /* increment the hit count */
502 if (++s->l2_cache_counts[i] == 0xffffffff) {
503 for(j = 0; j < L2_CACHE_SIZE; j++) {
504 s->l2_cache_counts[j] >>= 1;
507 return s->l2_cache + (i << s->l2_bits);
516 * Loads a L2 table into memory. If the table is in the cache, the cache
517 * is used; otherwise the L2 table is loaded from the image file.
519 * Returns a pointer to the L2 table on success, or NULL if the read from
520 * the image file failed.
523 static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
525 BDRVQcowState *s = bs->opaque;
529 /* seek if the table for the given offset is in the cache */
531 l2_table = seek_l2_table(s, l2_offset);
532 if (l2_table != NULL)
535 /* not found: load a new entry in the least used one */
537 min_index = l2_cache_new_entry(bs);
538 l2_table = s->l2_cache + (min_index << s->l2_bits);
539 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
540 s->l2_size * sizeof(uint64_t))
542 s->l2_cache_offsets[min_index] = l2_offset;
543 s->l2_cache_counts[min_index] = 1;
551 * Allocate a new l2 entry in the file. If l1_index points to an already
552 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
553 * table) copy the contents of the old L2 table into the newly allocated one.
554 * Otherwise the new table is initialized with zeros.
558 static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
560 BDRVQcowState *s = bs->opaque;
562 uint64_t old_l2_offset, tmp;
563 uint64_t *l2_table, l2_offset;
565 old_l2_offset = s->l1_table[l1_index];
567 /* allocate a new l2 entry */
569 l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
571 /* update the L1 entry */
573 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
575 tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
576 if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
577 &tmp, sizeof(tmp)) != sizeof(tmp))
580 /* allocate a new entry in the l2 cache */
582 min_index = l2_cache_new_entry(bs);
583 l2_table = s->l2_cache + (min_index << s->l2_bits);
585 if (old_l2_offset == 0) {
586 /* if there was no old l2 table, clear the new table */
587 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
589 /* if there was an old l2 table, read it from the disk */
590 if (bdrv_pread(s->hd, old_l2_offset,
591 l2_table, s->l2_size * sizeof(uint64_t)) !=
592 s->l2_size * sizeof(uint64_t))
595 /* write the l2 table to the file */
596 if (bdrv_pwrite(s->hd, l2_offset,
597 l2_table, s->l2_size * sizeof(uint64_t)) !=
598 s->l2_size * sizeof(uint64_t))
601 /* update the l2 cache entry */
603 s->l2_cache_offsets[min_index] = l2_offset;
604 s->l2_cache_counts[min_index] = 1;
609 static int size_to_clusters(BDRVQcowState *s, int64_t size)
611 return (size + (s->cluster_size - 1)) >> s->cluster_bits;
614 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
615 uint64_t *l2_table, uint64_t start, uint64_t mask)
618 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
623 for (i = start; i < start + nb_clusters; i++)
624 if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
630 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
634 while(nb_clusters-- && l2_table[i] == 0)
643 * For a given offset of the disk image, return cluster offset in
646 * on entry, *num is the number of contiguous clusters we'd like to
647 * access following offset.
649 * on exit, *num is the number of contiguous clusters we can read.
651 * Return 1, if the offset is found
652 * Return 0, otherwise.
656 static uint64_t get_cluster_offset(BlockDriverState *bs,
657 uint64_t offset, int *num)
659 BDRVQcowState *s = bs->opaque;
660 int l1_index, l2_index;
661 uint64_t l2_offset, *l2_table, cluster_offset;
663 int index_in_cluster, nb_available, nb_needed, nb_clusters;
665 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
666 nb_needed = *num + index_in_cluster;
668 l1_bits = s->l2_bits + s->cluster_bits;
670 /* compute how many bytes there are between the offset and
671 * the end of the l1 entry
674 nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
676 /* compute the number of available sectors */
678 nb_available = (nb_available >> 9) + index_in_cluster;
682 /* seek the the l2 offset in the l1 table */
684 l1_index = offset >> l1_bits;
685 if (l1_index >= s->l1_size)
688 l2_offset = s->l1_table[l1_index];
690 /* seek the l2 table of the given l2 offset */
695 /* load the l2 table in memory */
697 l2_offset &= ~QCOW_OFLAG_COPIED;
698 l2_table = l2_load(bs, l2_offset);
699 if (l2_table == NULL)
702 /* find the cluster offset for the given disk offset */
704 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
705 cluster_offset = be64_to_cpu(l2_table[l2_index]);
706 nb_clusters = size_to_clusters(s, nb_needed << 9);
708 if (!cluster_offset) {
709 /* how many empty clusters ? */
710 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
712 /* how many allocated clusters ? */
713 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
714 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
717 nb_available = (c * s->cluster_sectors);
719 if (nb_available > nb_needed)
720 nb_available = nb_needed;
722 *num = nb_available - index_in_cluster;
724 return cluster_offset & ~QCOW_OFLAG_COPIED;
730 * free clusters according to its type: compressed or not
734 static void free_any_clusters(BlockDriverState *bs,
735 uint64_t cluster_offset, int nb_clusters)
737 BDRVQcowState *s = bs->opaque;
739 /* free the cluster */
741 if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
743 nb_csectors = ((cluster_offset >> s->csize_shift) &
745 free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511,
750 free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
758 * for a given disk offset, load (and allocate if needed)
761 * the l2 table offset in the qcow2 file and the cluster index
762 * in the l2 table are given to the caller.
766 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
767 uint64_t **new_l2_table,
768 uint64_t *new_l2_offset,
771 BDRVQcowState *s = bs->opaque;
772 int l1_index, l2_index, ret;
773 uint64_t l2_offset, *l2_table;
775 /* seek the the l2 offset in the l1 table */
777 l1_index = offset >> (s->l2_bits + s->cluster_bits);
778 if (l1_index >= s->l1_size) {
779 ret = grow_l1_table(bs, l1_index + 1);
783 l2_offset = s->l1_table[l1_index];
785 /* seek the l2 table of the given l2 offset */
787 if (l2_offset & QCOW_OFLAG_COPIED) {
788 /* load the l2 table in memory */
789 l2_offset &= ~QCOW_OFLAG_COPIED;
790 l2_table = l2_load(bs, l2_offset);
791 if (l2_table == NULL)
795 free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
796 l2_table = l2_allocate(bs, l1_index);
797 if (l2_table == NULL)
799 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
802 /* find the cluster offset for the given disk offset */
804 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
806 *new_l2_table = l2_table;
807 *new_l2_offset = l2_offset;
808 *new_l2_index = l2_index;
814 * alloc_compressed_cluster_offset
816 * For a given offset of the disk image, return cluster offset in
819 * If the offset is not found, allocate a new compressed cluster.
821 * Return the cluster offset if successful,
822 * Return 0, otherwise.
826 static uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,
830 BDRVQcowState *s = bs->opaque;
832 uint64_t l2_offset, *l2_table, cluster_offset;
835 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
839 cluster_offset = be64_to_cpu(l2_table[l2_index]);
840 if (cluster_offset & QCOW_OFLAG_COPIED)
841 return cluster_offset & ~QCOW_OFLAG_COPIED;
844 free_any_clusters(bs, cluster_offset, 1);
846 cluster_offset = alloc_bytes(bs, compressed_size);
847 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
848 (cluster_offset >> 9);
850 cluster_offset |= QCOW_OFLAG_COMPRESSED |
851 ((uint64_t)nb_csectors << s->csize_shift);
853 /* update L2 table */
855 /* compressed clusters never have the copied flag */
857 l2_table[l2_index] = cpu_to_be64(cluster_offset);
858 if (bdrv_pwrite(s->hd,
859 l2_offset + l2_index * sizeof(uint64_t),
861 sizeof(uint64_t)) != sizeof(uint64_t))
864 return cluster_offset;
867 typedef struct QCowL2Meta
875 static int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
878 BDRVQcowState *s = bs->opaque;
879 int i, j = 0, l2_index, ret;
880 uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
882 if (m->nb_clusters == 0)
885 old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
887 /* copy content of unmodified sectors */
888 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
890 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
895 if (m->nb_available & (s->cluster_sectors - 1)) {
896 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
897 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
898 m->nb_available - end, s->cluster_sectors);
904 /* update L2 table */
905 if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
908 for (i = 0; i < m->nb_clusters; i++) {
909 if(l2_table[l2_index + i] != 0)
910 old_cluster[j++] = l2_table[l2_index + i];
912 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
913 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
916 if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),
917 l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=
918 m->nb_clusters * sizeof(uint64_t))
921 for (i = 0; i < j; i++)
922 free_any_clusters(bs, old_cluster[i], 1);
926 qemu_free(old_cluster);
931 * alloc_cluster_offset
933 * For a given offset of the disk image, return cluster offset in
936 * If the offset is not found, allocate a new cluster.
938 * Return the cluster offset if successful,
939 * Return 0, otherwise.
943 static uint64_t alloc_cluster_offset(BlockDriverState *bs,
945 int n_start, int n_end,
946 int *num, QCowL2Meta *m)
948 BDRVQcowState *s = bs->opaque;
950 uint64_t l2_offset, *l2_table, cluster_offset;
951 int nb_clusters, i = 0;
953 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
957 nb_clusters = size_to_clusters(s, n_end << 9);
959 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
961 cluster_offset = be64_to_cpu(l2_table[l2_index]);
963 /* We keep all QCOW_OFLAG_COPIED clusters */
965 if (cluster_offset & QCOW_OFLAG_COPIED) {
966 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
967 &l2_table[l2_index], 0, 0);
969 cluster_offset &= ~QCOW_OFLAG_COPIED;
975 /* for the moment, multiple compressed clusters are not managed */
977 if (cluster_offset & QCOW_OFLAG_COMPRESSED)
980 /* how many available clusters ? */
982 while (i < nb_clusters) {
983 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
984 &l2_table[l2_index], i, 0);
986 if(be64_to_cpu(l2_table[l2_index + i]))
989 i += count_contiguous_free_clusters(nb_clusters - i,
990 &l2_table[l2_index + i]);
992 cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
994 if ((cluster_offset & QCOW_OFLAG_COPIED) ||
995 (cluster_offset & QCOW_OFLAG_COMPRESSED))
1000 /* allocate a new cluster */
1002 cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
1004 /* save info needed for meta data update */
1006 m->n_start = n_start;
1007 m->nb_clusters = nb_clusters;
1010 m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
1012 *num = m->nb_available - n_start;
1014 return cluster_offset;
1017 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
1018 int nb_sectors, int *pnum)
1020 uint64_t cluster_offset;
1023 cluster_offset = get_cluster_offset(bs, sector_num << 9, pnum);
1025 return (cluster_offset != 0);
1028 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1029 const uint8_t *buf, int buf_size)
1031 z_stream strm1, *strm = &strm1;
1034 memset(strm, 0, sizeof(*strm));
1036 strm->next_in = (uint8_t *)buf;
1037 strm->avail_in = buf_size;
1038 strm->next_out = out_buf;
1039 strm->avail_out = out_buf_size;
1041 ret = inflateInit2(strm, -12);
1044 ret = inflate(strm, Z_FINISH);
1045 out_len = strm->next_out - out_buf;
1046 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1047 out_len != out_buf_size) {
1055 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
1057 int ret, csize, nb_csectors, sector_offset;
1060 coffset = cluster_offset & s->cluster_offset_mask;
1061 if (s->cluster_cache_offset != coffset) {
1062 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1063 sector_offset = coffset & 511;
1064 csize = nb_csectors * 512 - sector_offset;
1065 ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
1069 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1070 s->cluster_data + sector_offset, csize) < 0) {
1073 s->cluster_cache_offset = coffset;
1078 /* handle reading after the end of the backing file */
1079 static int backing_read1(BlockDriverState *bs,
1080 int64_t sector_num, uint8_t *buf, int nb_sectors)
1083 if ((sector_num + nb_sectors) <= bs->total_sectors)
1085 if (sector_num >= bs->total_sectors)
1088 n1 = bs->total_sectors - sector_num;
1089 memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1));
1093 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
1094 uint8_t *buf, int nb_sectors)
1096 BDRVQcowState *s = bs->opaque;
1097 int ret, index_in_cluster, n, n1;
1098 uint64_t cluster_offset;
1100 while (nb_sectors > 0) {
1102 cluster_offset = get_cluster_offset(bs, sector_num << 9, &n);
1103 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1104 if (!cluster_offset) {
1105 if (bs->backing_hd) {
1106 /* read from the base image */
1107 n1 = backing_read1(bs->backing_hd, sector_num, buf, n);
1109 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
1114 memset(buf, 0, 512 * n);
1116 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
1117 if (decompress_cluster(s, cluster_offset) < 0)
1119 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
1121 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1124 if (s->crypt_method) {
1125 encrypt_sectors(s, sector_num, buf, buf, n, 0,
1126 &s->aes_decrypt_key);
1136 static int qcow_write(BlockDriverState *bs, int64_t sector_num,
1137 const uint8_t *buf, int nb_sectors)
1139 BDRVQcowState *s = bs->opaque;
1140 int ret, index_in_cluster, n;
1141 uint64_t cluster_offset;
1145 while (nb_sectors > 0) {
1146 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1147 n_end = index_in_cluster + nb_sectors;
1148 if (s->crypt_method &&
1149 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1150 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1151 cluster_offset = alloc_cluster_offset(bs, sector_num << 9,
1153 n_end, &n, &l2meta);
1154 if (!cluster_offset)
1156 if (s->crypt_method) {
1157 encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
1158 &s->aes_encrypt_key);
1159 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
1160 s->cluster_data, n * 512);
1162 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1164 if (ret != n * 512 || alloc_cluster_link_l2(bs, cluster_offset, &l2meta) < 0) {
1165 free_any_clusters(bs, cluster_offset, l2meta.nb_clusters);
1172 s->cluster_cache_offset = -1; /* disable compressed cache */
1176 typedef struct QCowAIOCB {
1177 BlockDriverAIOCB common;
1182 uint64_t cluster_offset;
1183 uint8_t *cluster_data;
1184 BlockDriverAIOCB *hd_aiocb;
1189 static void qcow_aio_read_cb(void *opaque, int ret);
1190 static void qcow_aio_read_bh(void *opaque)
1192 QCowAIOCB *acb = opaque;
1193 qemu_bh_delete(acb->bh);
1195 qcow_aio_read_cb(opaque, 0);
1198 static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb)
1203 acb->bh = qemu_bh_new(cb, acb);
1207 qemu_bh_schedule(acb->bh);
1212 static void qcow_aio_read_cb(void *opaque, int ret)
1214 QCowAIOCB *acb = opaque;
1215 BlockDriverState *bs = acb->common.bs;
1216 BDRVQcowState *s = bs->opaque;
1217 int index_in_cluster, n1;
1219 acb->hd_aiocb = NULL;
1222 acb->common.cb(acb->common.opaque, ret);
1223 qemu_aio_release(acb);
1227 /* post process the read buffer */
1228 if (!acb->cluster_offset) {
1230 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1233 if (s->crypt_method) {
1234 encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
1236 &s->aes_decrypt_key);
1240 acb->nb_sectors -= acb->n;
1241 acb->sector_num += acb->n;
1242 acb->buf += acb->n * 512;
1244 if (acb->nb_sectors == 0) {
1245 /* request completed */
1246 acb->common.cb(acb->common.opaque, 0);
1247 qemu_aio_release(acb);
1251 /* prepare next AIO request */
1252 acb->n = acb->nb_sectors;
1253 acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
1254 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1256 if (!acb->cluster_offset) {
1257 if (bs->backing_hd) {
1258 /* read from the base image */
1259 n1 = backing_read1(bs->backing_hd, acb->sector_num,
1262 acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num,
1263 acb->buf, acb->n, qcow_aio_read_cb, acb);
1264 if (acb->hd_aiocb == NULL)
1267 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1272 /* Note: in this case, no need to wait */
1273 memset(acb->buf, 0, 512 * acb->n);
1274 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1278 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1279 /* add AIO support for compressed blocks ? */
1280 if (decompress_cluster(s, acb->cluster_offset) < 0)
1283 s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
1284 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1288 if ((acb->cluster_offset & 511) != 0) {
1292 acb->hd_aiocb = bdrv_aio_read(s->hd,
1293 (acb->cluster_offset >> 9) + index_in_cluster,
1294 acb->buf, acb->n, qcow_aio_read_cb, acb);
1295 if (acb->hd_aiocb == NULL)
1300 static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
1301 int64_t sector_num, uint8_t *buf, int nb_sectors,
1302 BlockDriverCompletionFunc *cb, void *opaque)
1306 acb = qemu_aio_get(bs, cb, opaque);
1309 acb->hd_aiocb = NULL;
1310 acb->sector_num = sector_num;
1312 acb->nb_sectors = nb_sectors;
1314 acb->cluster_offset = 0;
1315 acb->l2meta.nb_clusters = 0;
1319 static BlockDriverAIOCB *qcow_aio_read(BlockDriverState *bs,
1320 int64_t sector_num, uint8_t *buf, int nb_sectors,
1321 BlockDriverCompletionFunc *cb, void *opaque)
1325 acb = qcow_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
1329 qcow_aio_read_cb(acb, 0);
1330 return &acb->common;
1333 static void qcow_aio_write_cb(void *opaque, int ret)
1335 QCowAIOCB *acb = opaque;
1336 BlockDriverState *bs = acb->common.bs;
1337 BDRVQcowState *s = bs->opaque;
1338 int index_in_cluster;
1339 const uint8_t *src_buf;
1342 acb->hd_aiocb = NULL;
1346 acb->common.cb(acb->common.opaque, ret);
1347 qemu_aio_release(acb);
1351 if (alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta) < 0) {
1352 free_any_clusters(bs, acb->cluster_offset, acb->l2meta.nb_clusters);
1356 acb->nb_sectors -= acb->n;
1357 acb->sector_num += acb->n;
1358 acb->buf += acb->n * 512;
1360 if (acb->nb_sectors == 0) {
1361 /* request completed */
1362 acb->common.cb(acb->common.opaque, 0);
1363 qemu_aio_release(acb);
1367 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1368 n_end = index_in_cluster + acb->nb_sectors;
1369 if (s->crypt_method &&
1370 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1371 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1373 acb->cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9,
1375 n_end, &acb->n, &acb->l2meta);
1376 if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
1380 if (s->crypt_method) {
1381 if (!acb->cluster_data) {
1382 acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS *
1385 encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
1386 acb->n, 1, &s->aes_encrypt_key);
1387 src_buf = acb->cluster_data;
1391 acb->hd_aiocb = bdrv_aio_write(s->hd,
1392 (acb->cluster_offset >> 9) + index_in_cluster,
1394 qcow_aio_write_cb, acb);
1395 if (acb->hd_aiocb == NULL)
1399 static BlockDriverAIOCB *qcow_aio_write(BlockDriverState *bs,
1400 int64_t sector_num, const uint8_t *buf, int nb_sectors,
1401 BlockDriverCompletionFunc *cb, void *opaque)
1403 BDRVQcowState *s = bs->opaque;
1406 s->cluster_cache_offset = -1; /* disable compressed cache */
1408 acb = qcow_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
1412 qcow_aio_write_cb(acb, 0);
1413 return &acb->common;
1416 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1418 QCowAIOCB *acb = (QCowAIOCB *)blockacb;
1420 bdrv_aio_cancel(acb->hd_aiocb);
1421 qemu_aio_release(acb);
1424 static void qcow_close(BlockDriverState *bs)
1426 BDRVQcowState *s = bs->opaque;
1427 qemu_free(s->l1_table);
1428 qemu_free(s->l2_cache);
1429 qemu_free(s->cluster_cache);
1430 qemu_free(s->cluster_data);
1435 /* XXX: use std qcow open function ? */
1436 typedef struct QCowCreateState {
1439 uint16_t *refcount_block;
1440 uint64_t *refcount_table;
1441 int64_t l1_table_offset;
1442 int64_t refcount_table_offset;
1443 int64_t refcount_block_offset;
1446 static void create_refcount_update(QCowCreateState *s,
1447 int64_t offset, int64_t size)
1450 int64_t start, last, cluster_offset;
1453 start = offset & ~(s->cluster_size - 1);
1454 last = (offset + size - 1) & ~(s->cluster_size - 1);
1455 for(cluster_offset = start; cluster_offset <= last;
1456 cluster_offset += s->cluster_size) {
1457 p = &s->refcount_block[cluster_offset >> s->cluster_bits];
1458 refcount = be16_to_cpu(*p);
1460 *p = cpu_to_be16(refcount);
1464 static int qcow_create(const char *filename, int64_t total_size,
1465 const char *backing_file, int flags)
1467 int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits;
1469 uint64_t tmp, offset;
1470 QCowCreateState s1, *s = &s1;
1472 memset(s, 0, sizeof(*s));
1474 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1477 memset(&header, 0, sizeof(header));
1478 header.magic = cpu_to_be32(QCOW_MAGIC);
1479 header.version = cpu_to_be32(QCOW_VERSION);
1480 header.size = cpu_to_be64(total_size * 512);
1481 header_size = sizeof(header);
1482 backing_filename_len = 0;
1484 header.backing_file_offset = cpu_to_be64(header_size);
1485 backing_filename_len = strlen(backing_file);
1486 header.backing_file_size = cpu_to_be32(backing_filename_len);
1487 header_size += backing_filename_len;
1489 s->cluster_bits = 12; /* 4 KB clusters */
1490 s->cluster_size = 1 << s->cluster_bits;
1491 header.cluster_bits = cpu_to_be32(s->cluster_bits);
1492 header_size = (header_size + 7) & ~7;
1493 if (flags & BLOCK_FLAG_ENCRYPT) {
1494 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
1496 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
1498 l2_bits = s->cluster_bits - 3;
1499 shift = s->cluster_bits + l2_bits;
1500 l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift);
1501 offset = align_offset(header_size, s->cluster_size);
1502 s->l1_table_offset = offset;
1503 header.l1_table_offset = cpu_to_be64(s->l1_table_offset);
1504 header.l1_size = cpu_to_be32(l1_size);
1505 offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size);
1507 s->refcount_table = qemu_mallocz(s->cluster_size);
1508 s->refcount_block = qemu_mallocz(s->cluster_size);
1510 s->refcount_table_offset = offset;
1511 header.refcount_table_offset = cpu_to_be64(offset);
1512 header.refcount_table_clusters = cpu_to_be32(1);
1513 offset += s->cluster_size;
1515 s->refcount_table[0] = cpu_to_be64(offset);
1516 s->refcount_block_offset = offset;
1517 offset += s->cluster_size;
1519 /* update refcounts */
1520 create_refcount_update(s, 0, header_size);
1521 create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t));
1522 create_refcount_update(s, s->refcount_table_offset, s->cluster_size);
1523 create_refcount_update(s, s->refcount_block_offset, s->cluster_size);
1525 /* write all the data */
1526 write(fd, &header, sizeof(header));
1528 write(fd, backing_file, backing_filename_len);
1530 lseek(fd, s->l1_table_offset, SEEK_SET);
1532 for(i = 0;i < l1_size; i++) {
1533 write(fd, &tmp, sizeof(tmp));
1535 lseek(fd, s->refcount_table_offset, SEEK_SET);
1536 write(fd, s->refcount_table, s->cluster_size);
1538 lseek(fd, s->refcount_block_offset, SEEK_SET);
1539 write(fd, s->refcount_block, s->cluster_size);
1541 qemu_free(s->refcount_table);
1542 qemu_free(s->refcount_block);
1547 static int qcow_make_empty(BlockDriverState *bs)
1550 /* XXX: not correct */
1551 BDRVQcowState *s = bs->opaque;
1552 uint32_t l1_length = s->l1_size * sizeof(uint64_t);
1555 memset(s->l1_table, 0, l1_length);
1556 if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0)
1558 ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length);
1567 /* XXX: put compressed sectors first, then all the cluster aligned
1568 tables to avoid losing bytes in alignment */
1569 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
1570 const uint8_t *buf, int nb_sectors)
1572 BDRVQcowState *s = bs->opaque;
1576 uint64_t cluster_offset;
1578 if (nb_sectors == 0) {
1579 /* align end of file to a sector boundary to ease reading with
1580 sector based I/Os */
1581 cluster_offset = bdrv_getlength(s->hd);
1582 cluster_offset = (cluster_offset + 511) & ~511;
1583 bdrv_truncate(s->hd, cluster_offset);
1587 if (nb_sectors != s->cluster_sectors)
1590 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
1592 /* best compression, small window, no zlib header */
1593 memset(&strm, 0, sizeof(strm));
1594 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
1596 9, Z_DEFAULT_STRATEGY);
1602 strm.avail_in = s->cluster_size;
1603 strm.next_in = (uint8_t *)buf;
1604 strm.avail_out = s->cluster_size;
1605 strm.next_out = out_buf;
1607 ret = deflate(&strm, Z_FINISH);
1608 if (ret != Z_STREAM_END && ret != Z_OK) {
1613 out_len = strm.next_out - out_buf;
1617 if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
1618 /* could not compress: write normal cluster */
1619 qcow_write(bs, sector_num, buf, s->cluster_sectors);
1621 cluster_offset = alloc_compressed_cluster_offset(bs, sector_num << 9,
1623 if (!cluster_offset)
1625 cluster_offset &= s->cluster_offset_mask;
1626 if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
1636 static void qcow_flush(BlockDriverState *bs)
1638 BDRVQcowState *s = bs->opaque;
1642 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1644 BDRVQcowState *s = bs->opaque;
1645 bdi->cluster_size = s->cluster_size;
1646 bdi->vm_state_offset = (int64_t)s->l1_vm_state_index <<
1647 (s->cluster_bits + s->l2_bits);
1648 bdi->highest_alloc = s->highest_alloc << s->cluster_bits;
1652 /*********************************************************/
1653 /* snapshot support */
1655 /* update the refcounts of snapshots and the copied flag */
1656 static int update_snapshot_refcount(BlockDriverState *bs,
1657 int64_t l1_table_offset,
1661 BDRVQcowState *s = bs->opaque;
1662 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
1663 int64_t old_offset, old_l2_offset;
1664 int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
1670 l1_size2 = l1_size * sizeof(uint64_t);
1672 if (l1_table_offset != s->l1_table_offset) {
1673 l1_table = qemu_malloc(l1_size2);
1675 if (bdrv_pread(s->hd, l1_table_offset,
1676 l1_table, l1_size2) != l1_size2)
1678 for(i = 0;i < l1_size; i++)
1679 be64_to_cpus(&l1_table[i]);
1681 assert(l1_size == s->l1_size);
1682 l1_table = s->l1_table;
1686 l2_size = s->l2_size * sizeof(uint64_t);
1687 l2_table = qemu_malloc(l2_size);
1689 for(i = 0; i < l1_size; i++) {
1690 l2_offset = l1_table[i];
1692 old_l2_offset = l2_offset;
1693 l2_offset &= ~QCOW_OFLAG_COPIED;
1695 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
1697 for(j = 0; j < s->l2_size; j++) {
1698 offset = be64_to_cpu(l2_table[j]);
1700 old_offset = offset;
1701 offset &= ~QCOW_OFLAG_COPIED;
1702 if (offset & QCOW_OFLAG_COMPRESSED) {
1703 nb_csectors = ((offset >> s->csize_shift) &
1706 update_refcount(bs, (offset & s->cluster_offset_mask) & ~511,
1707 nb_csectors * 512, addend);
1708 /* compressed clusters are never modified */
1712 refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
1714 refcount = get_refcount(bs, offset >> s->cluster_bits);
1718 if (refcount == 1) {
1719 offset |= QCOW_OFLAG_COPIED;
1721 if (offset != old_offset) {
1722 l2_table[j] = cpu_to_be64(offset);
1728 if (bdrv_pwrite(s->hd,
1729 l2_offset, l2_table, l2_size) != l2_size)
1734 refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
1736 refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
1738 if (refcount == 1) {
1739 l2_offset |= QCOW_OFLAG_COPIED;
1741 if (l2_offset != old_l2_offset) {
1742 l1_table[i] = l2_offset;
1748 for(i = 0; i < l1_size; i++)
1749 cpu_to_be64s(&l1_table[i]);
1750 if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
1751 l1_size2) != l1_size2)
1753 for(i = 0; i < l1_size; i++)
1754 be64_to_cpus(&l1_table[i]);
1757 qemu_free(l1_table);
1758 qemu_free(l2_table);
1762 qemu_free(l1_table);
1763 qemu_free(l2_table);
1767 static void qcow_free_snapshots(BlockDriverState *bs)
1769 BDRVQcowState *s = bs->opaque;
1772 for(i = 0; i < s->nb_snapshots; i++) {
1773 qemu_free(s->snapshots[i].name);
1774 qemu_free(s->snapshots[i].id_str);
1776 qemu_free(s->snapshots);
1777 s->snapshots = NULL;
1778 s->nb_snapshots = 0;
1781 static int qcow_read_snapshots(BlockDriverState *bs)
1783 BDRVQcowState *s = bs->opaque;
1784 QCowSnapshotHeader h;
1786 int i, id_str_size, name_size;
1788 uint32_t extra_data_size;
1790 if (!s->nb_snapshots) {
1791 s->snapshots = NULL;
1792 s->snapshots_size = 0;
1796 offset = s->snapshots_offset;
1797 s->snapshots = qemu_mallocz(s->nb_snapshots * sizeof(QCowSnapshot));
1798 for(i = 0; i < s->nb_snapshots; i++) {
1799 offset = align_offset(offset, 8);
1800 if (bdrv_pread(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1802 offset += sizeof(h);
1803 sn = s->snapshots + i;
1804 sn->l1_table_offset = be64_to_cpu(h.l1_table_offset);
1805 sn->l1_size = be32_to_cpu(h.l1_size);
1806 sn->vm_state_size = be32_to_cpu(h.vm_state_size);
1807 sn->date_sec = be32_to_cpu(h.date_sec);
1808 sn->date_nsec = be32_to_cpu(h.date_nsec);
1809 sn->vm_clock_nsec = be64_to_cpu(h.vm_clock_nsec);
1810 extra_data_size = be32_to_cpu(h.extra_data_size);
1812 id_str_size = be16_to_cpu(h.id_str_size);
1813 name_size = be16_to_cpu(h.name_size);
1815 offset += extra_data_size;
1817 sn->id_str = qemu_malloc(id_str_size + 1);
1818 if (bdrv_pread(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1820 offset += id_str_size;
1821 sn->id_str[id_str_size] = '\0';
1823 sn->name = qemu_malloc(name_size + 1);
1824 if (bdrv_pread(s->hd, offset, sn->name, name_size) != name_size)
1826 offset += name_size;
1827 sn->name[name_size] = '\0';
1829 s->snapshots_size = offset - s->snapshots_offset;
1832 qcow_free_snapshots(bs);
1836 /* add at the end of the file a new list of snapshots */
1837 static int qcow_write_snapshots(BlockDriverState *bs)
1839 BDRVQcowState *s = bs->opaque;
1841 QCowSnapshotHeader h;
1842 int i, name_size, id_str_size, snapshots_size;
1845 int64_t offset, snapshots_offset;
1847 /* compute the size of the snapshots */
1849 for(i = 0; i < s->nb_snapshots; i++) {
1850 sn = s->snapshots + i;
1851 offset = align_offset(offset, 8);
1852 offset += sizeof(h);
1853 offset += strlen(sn->id_str);
1854 offset += strlen(sn->name);
1856 snapshots_size = offset;
1858 snapshots_offset = alloc_clusters(bs, snapshots_size);
1859 offset = snapshots_offset;
1861 for(i = 0; i < s->nb_snapshots; i++) {
1862 sn = s->snapshots + i;
1863 memset(&h, 0, sizeof(h));
1864 h.l1_table_offset = cpu_to_be64(sn->l1_table_offset);
1865 h.l1_size = cpu_to_be32(sn->l1_size);
1866 h.vm_state_size = cpu_to_be32(sn->vm_state_size);
1867 h.date_sec = cpu_to_be32(sn->date_sec);
1868 h.date_nsec = cpu_to_be32(sn->date_nsec);
1869 h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec);
1871 id_str_size = strlen(sn->id_str);
1872 name_size = strlen(sn->name);
1873 h.id_str_size = cpu_to_be16(id_str_size);
1874 h.name_size = cpu_to_be16(name_size);
1875 offset = align_offset(offset, 8);
1876 if (bdrv_pwrite(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1878 offset += sizeof(h);
1879 if (bdrv_pwrite(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1881 offset += id_str_size;
1882 if (bdrv_pwrite(s->hd, offset, sn->name, name_size) != name_size)
1884 offset += name_size;
1887 /* update the various header fields */
1888 data64 = cpu_to_be64(snapshots_offset);
1889 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, snapshots_offset),
1890 &data64, sizeof(data64)) != sizeof(data64))
1892 data32 = cpu_to_be32(s->nb_snapshots);
1893 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, nb_snapshots),
1894 &data32, sizeof(data32)) != sizeof(data32))
1897 /* free the old snapshot table */
1898 free_clusters(bs, s->snapshots_offset, s->snapshots_size);
1899 s->snapshots_offset = snapshots_offset;
1900 s->snapshots_size = snapshots_size;
1906 static void find_new_snapshot_id(BlockDriverState *bs,
1907 char *id_str, int id_str_size)
1909 BDRVQcowState *s = bs->opaque;
1911 int i, id, id_max = 0;
1913 for(i = 0; i < s->nb_snapshots; i++) {
1914 sn = s->snapshots + i;
1915 id = strtoul(sn->id_str, NULL, 10);
1919 snprintf(id_str, id_str_size, "%d", id_max + 1);
1922 static int find_snapshot_by_id(BlockDriverState *bs, const char *id_str)
1924 BDRVQcowState *s = bs->opaque;
1927 for(i = 0; i < s->nb_snapshots; i++) {
1928 if (!strcmp(s->snapshots[i].id_str, id_str))
1934 static int find_snapshot_by_id_or_name(BlockDriverState *bs, const char *name)
1936 BDRVQcowState *s = bs->opaque;
1939 ret = find_snapshot_by_id(bs, name);
1942 for(i = 0; i < s->nb_snapshots; i++) {
1943 if (!strcmp(s->snapshots[i].name, name))
1949 /* if no id is provided, a new one is constructed */
1950 static int qcow_snapshot_create(BlockDriverState *bs,
1951 QEMUSnapshotInfo *sn_info)
1953 BDRVQcowState *s = bs->opaque;
1954 QCowSnapshot *snapshots1, sn1, *sn = &sn1;
1956 uint64_t *l1_table = NULL;
1958 memset(sn, 0, sizeof(*sn));
1960 if (sn_info->id_str[0] == '\0') {
1961 /* compute a new id */
1962 find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str));
1965 /* check that the ID is unique */
1966 if (find_snapshot_by_id(bs, sn_info->id_str) >= 0)
1969 sn->id_str = qemu_strdup(sn_info->id_str);
1972 sn->name = qemu_strdup(sn_info->name);
1975 sn->vm_state_size = sn_info->vm_state_size;
1976 sn->date_sec = sn_info->date_sec;
1977 sn->date_nsec = sn_info->date_nsec;
1978 sn->vm_clock_nsec = sn_info->vm_clock_nsec;
1980 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1);
1984 /* create the L1 table of the snapshot */
1985 sn->l1_table_offset = alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
1986 sn->l1_size = s->l1_size;
1988 l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
1989 for(i = 0; i < s->l1_size; i++) {
1990 l1_table[i] = cpu_to_be64(s->l1_table[i]);
1992 if (bdrv_pwrite(s->hd, sn->l1_table_offset,
1993 l1_table, s->l1_size * sizeof(uint64_t)) !=
1994 (s->l1_size * sizeof(uint64_t)))
1996 qemu_free(l1_table);
1999 snapshots1 = qemu_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot));
2001 memcpy(snapshots1, s->snapshots, s->nb_snapshots * sizeof(QCowSnapshot));
2002 qemu_free(s->snapshots);
2004 s->snapshots = snapshots1;
2005 s->snapshots[s->nb_snapshots++] = *sn;
2007 if (qcow_write_snapshots(bs) < 0)
2010 check_refcounts(bs);
2014 qemu_free(sn->name);
2015 qemu_free(l1_table);
2019 /* copy the snapshot 'snapshot_name' into the current disk image */
2020 static int qcow_snapshot_goto(BlockDriverState *bs,
2021 const char *snapshot_id)
2023 BDRVQcowState *s = bs->opaque;
2025 int i, snapshot_index, l1_size2;
2027 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2028 if (snapshot_index < 0)
2030 sn = &s->snapshots[snapshot_index];
2032 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, -1) < 0)
2035 if (grow_l1_table(bs, sn->l1_size) < 0)
2038 s->l1_size = sn->l1_size;
2039 l1_size2 = s->l1_size * sizeof(uint64_t);
2040 /* copy the snapshot l1 table to the current l1 table */
2041 if (bdrv_pread(s->hd, sn->l1_table_offset,
2042 s->l1_table, l1_size2) != l1_size2)
2044 if (bdrv_pwrite(s->hd, s->l1_table_offset,
2045 s->l1_table, l1_size2) != l1_size2)
2047 for(i = 0;i < s->l1_size; i++) {
2048 be64_to_cpus(&s->l1_table[i]);
2051 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1) < 0)
2055 check_refcounts(bs);
2062 static int qcow_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2064 BDRVQcowState *s = bs->opaque;
2066 int snapshot_index, ret;
2068 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2069 if (snapshot_index < 0)
2071 sn = &s->snapshots[snapshot_index];
2073 ret = update_snapshot_refcount(bs, sn->l1_table_offset, sn->l1_size, -1);
2076 /* must update the copied flag on the current cluster offsets */
2077 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 0);
2080 free_clusters(bs, sn->l1_table_offset, sn->l1_size * sizeof(uint64_t));
2082 qemu_free(sn->id_str);
2083 qemu_free(sn->name);
2084 memmove(sn, sn + 1, (s->nb_snapshots - snapshot_index - 1) * sizeof(*sn));
2086 ret = qcow_write_snapshots(bs);
2088 /* XXX: restore snapshot if error ? */
2092 check_refcounts(bs);
2097 static int qcow_snapshot_list(BlockDriverState *bs,
2098 QEMUSnapshotInfo **psn_tab)
2100 BDRVQcowState *s = bs->opaque;
2101 QEMUSnapshotInfo *sn_tab, *sn_info;
2105 sn_tab = qemu_mallocz(s->nb_snapshots * sizeof(QEMUSnapshotInfo));
2106 for(i = 0; i < s->nb_snapshots; i++) {
2107 sn_info = sn_tab + i;
2108 sn = s->snapshots + i;
2109 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str),
2111 pstrcpy(sn_info->name, sizeof(sn_info->name),
2113 sn_info->vm_state_size = sn->vm_state_size;
2114 sn_info->date_sec = sn->date_sec;
2115 sn_info->date_nsec = sn->date_nsec;
2116 sn_info->vm_clock_nsec = sn->vm_clock_nsec;
2119 return s->nb_snapshots;
2122 /*********************************************************/
2123 /* refcount handling */
2125 static int refcount_init(BlockDriverState *bs)
2127 BDRVQcowState *s = bs->opaque;
2128 int ret, refcount_table_size2, i;
2130 s->refcount_block_cache = qemu_malloc(s->cluster_size);
2131 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
2132 s->refcount_table = qemu_malloc(refcount_table_size2);
2133 if (s->refcount_table_size > 0) {
2134 ret = bdrv_pread(s->hd, s->refcount_table_offset,
2135 s->refcount_table, refcount_table_size2);
2136 if (ret != refcount_table_size2)
2138 for(i = 0; i < s->refcount_table_size; i++)
2139 be64_to_cpus(&s->refcount_table[i]);
2146 static void refcount_close(BlockDriverState *bs)
2148 BDRVQcowState *s = bs->opaque;
2149 qemu_free(s->refcount_block_cache);
2150 qemu_free(s->refcount_table);
2154 static int load_refcount_block(BlockDriverState *bs,
2155 int64_t refcount_block_offset)
2157 BDRVQcowState *s = bs->opaque;
2159 ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
2161 if (ret != s->cluster_size)
2163 s->refcount_block_cache_offset = refcount_block_offset;
2167 static void scan_refcount(BlockDriverState *bs, int64_t *high)
2169 BDRVQcowState *s = bs->opaque;
2170 int64_t refcnt_index, cluster_index, cluster_end, h = 0;
2172 for (refcnt_index=0; refcnt_index < s->refcount_table_size; refcnt_index++){
2173 if (s->refcount_table[refcnt_index] == 0) {
2176 cluster_index = refcnt_index << (s->cluster_bits - REFCOUNT_SHIFT);
2177 cluster_end = (refcnt_index + 1) << (s->cluster_bits - REFCOUNT_SHIFT);
2178 for ( ; cluster_index < cluster_end; cluster_index++) {
2179 if (get_refcount(bs, cluster_index) == 0)
2180 /* do nothing -- reserved for free counting */;
2190 static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
2192 BDRVQcowState *s = bs->opaque;
2193 int refcount_table_index, block_index;
2194 int64_t refcount_block_offset;
2196 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2197 if (refcount_table_index >= s->refcount_table_size)
2199 refcount_block_offset = s->refcount_table[refcount_table_index];
2200 if (!refcount_block_offset)
2202 if (refcount_block_offset != s->refcount_block_cache_offset) {
2203 /* better than nothing: return allocated if read error */
2204 if (load_refcount_block(bs, refcount_block_offset) < 0)
2207 block_index = cluster_index &
2208 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2209 return be16_to_cpu(s->refcount_block_cache[block_index]);
2212 /* return < 0 if error */
2213 static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
2215 BDRVQcowState *s = bs->opaque;
2218 nb_clusters = size_to_clusters(s, size);
2220 for(i = 0; i < nb_clusters; i++) {
2221 int64_t i = s->free_cluster_index++;
2222 if (get_refcount(bs, i) != 0)
2226 printf("alloc_clusters: size=%lld -> %lld\n",
2228 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
2231 if (s->highest_alloc < s->free_cluster_index)
2232 s->highest_alloc = s->free_cluster_index;
2234 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
2237 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size)
2241 offset = alloc_clusters_noref(bs, size);
2242 update_refcount(bs, offset, size, 1);
2246 /* only used to allocate compressed sectors. We try to allocate
2247 contiguous sectors. size must be <= cluster_size */
2248 static int64_t alloc_bytes(BlockDriverState *bs, int size)
2250 BDRVQcowState *s = bs->opaque;
2251 int64_t offset, cluster_offset;
2252 int free_in_cluster;
2254 assert(size > 0 && size <= s->cluster_size);
2255 if (s->free_byte_offset == 0) {
2256 s->free_byte_offset = alloc_clusters(bs, s->cluster_size);
2259 free_in_cluster = s->cluster_size -
2260 (s->free_byte_offset & (s->cluster_size - 1));
2261 if (size <= free_in_cluster) {
2262 /* enough space in current cluster */
2263 offset = s->free_byte_offset;
2264 s->free_byte_offset += size;
2265 free_in_cluster -= size;
2266 if (free_in_cluster == 0)
2267 s->free_byte_offset = 0;
2268 if ((offset & (s->cluster_size - 1)) != 0)
2269 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2271 offset = alloc_clusters(bs, s->cluster_size);
2272 cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
2273 if ((cluster_offset + s->cluster_size) == offset) {
2274 /* we are lucky: contiguous data */
2275 offset = s->free_byte_offset;
2276 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2277 s->free_byte_offset += size;
2279 s->free_byte_offset = offset;
2286 static void free_clusters(BlockDriverState *bs,
2287 int64_t offset, int64_t size)
2289 update_refcount(bs, offset, size, -1);
2292 static int grow_refcount_table(BlockDriverState *bs, int min_size)
2294 BDRVQcowState *s = bs->opaque;
2295 int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
2296 uint64_t *new_table;
2297 int64_t table_offset;
2300 int64_t old_table_offset;
2302 if (min_size <= s->refcount_table_size)
2304 /* compute new table size */
2305 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2307 if (refcount_table_clusters == 0) {
2308 refcount_table_clusters = 1;
2310 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
2312 new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
2313 if (min_size <= new_table_size)
2317 printf("grow_refcount_table from %d to %d\n",
2318 s->refcount_table_size,
2321 new_table_size2 = new_table_size * sizeof(uint64_t);
2322 new_table = qemu_mallocz(new_table_size2);
2323 memcpy(new_table, s->refcount_table,
2324 s->refcount_table_size * sizeof(uint64_t));
2325 for(i = 0; i < s->refcount_table_size; i++)
2326 cpu_to_be64s(&new_table[i]);
2327 /* Note: we cannot update the refcount now to avoid recursion */
2328 table_offset = alloc_clusters_noref(bs, new_table_size2);
2329 ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
2330 if (ret != new_table_size2)
2332 for(i = 0; i < s->refcount_table_size; i++)
2333 be64_to_cpus(&new_table[i]);
2335 cpu_to_be64w((uint64_t*)data, table_offset);
2336 cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
2337 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
2338 data, sizeof(data)) != sizeof(data))
2340 qemu_free(s->refcount_table);
2341 old_table_offset = s->refcount_table_offset;
2342 old_table_size = s->refcount_table_size;
2343 s->refcount_table = new_table;
2344 s->refcount_table_size = new_table_size;
2345 s->refcount_table_offset = table_offset;
2347 update_refcount(bs, table_offset, new_table_size2, 1);
2348 free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
2351 free_clusters(bs, table_offset, new_table_size2);
2352 qemu_free(new_table);
2356 /* addend must be 1 or -1 */
2357 /* XXX: cache several refcount block clusters ? */
2358 static int update_cluster_refcount(BlockDriverState *bs,
2359 int64_t cluster_index,
2362 BDRVQcowState *s = bs->opaque;
2363 int64_t offset, refcount_block_offset;
2364 int ret, refcount_table_index, block_index, refcount;
2367 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2368 if (refcount_table_index >= s->refcount_table_size) {
2371 ret = grow_refcount_table(bs, refcount_table_index + 1);
2375 refcount_block_offset = s->refcount_table[refcount_table_index];
2376 if (!refcount_block_offset) {
2379 /* create a new refcount block */
2380 /* Note: we cannot update the refcount now to avoid recursion */
2381 offset = alloc_clusters_noref(bs, s->cluster_size);
2382 memset(s->refcount_block_cache, 0, s->cluster_size);
2383 ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
2384 if (ret != s->cluster_size)
2386 s->refcount_table[refcount_table_index] = offset;
2387 data64 = cpu_to_be64(offset);
2388 ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
2389 refcount_table_index * sizeof(uint64_t),
2390 &data64, sizeof(data64));
2391 if (ret != sizeof(data64))
2394 refcount_block_offset = offset;
2395 s->refcount_block_cache_offset = offset;
2396 update_refcount(bs, offset, s->cluster_size, 1);
2398 if (refcount_block_offset != s->refcount_block_cache_offset) {
2399 if (load_refcount_block(bs, refcount_block_offset) < 0)
2403 /* we can update the count and save it */
2404 block_index = cluster_index &
2405 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2406 refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
2408 if (refcount < 0 || refcount > 0xffff)
2410 if (refcount == 0 && cluster_index < s->free_cluster_index) {
2411 s->free_cluster_index = cluster_index;
2413 s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
2414 if (bdrv_pwrite(s->hd,
2415 refcount_block_offset + (block_index << REFCOUNT_SHIFT),
2416 &s->refcount_block_cache[block_index], 2) != 2)
2421 static void update_refcount(BlockDriverState *bs,
2422 int64_t offset, int64_t length,
2425 BDRVQcowState *s = bs->opaque;
2426 int64_t start, last, cluster_offset;
2429 printf("update_refcount: offset=%lld size=%lld addend=%d\n",
2430 offset, length, addend);
2434 start = offset & ~(s->cluster_size - 1);
2435 last = (offset + length - 1) & ~(s->cluster_size - 1);
2436 for(cluster_offset = start; cluster_offset <= last;
2437 cluster_offset += s->cluster_size) {
2438 update_cluster_refcount(bs, cluster_offset >> s->cluster_bits, addend);
2443 static void inc_refcounts(BlockDriverState *bs,
2444 uint16_t *refcount_table,
2445 int refcount_table_size,
2446 int64_t offset, int64_t size)
2448 BDRVQcowState *s = bs->opaque;
2449 int64_t start, last, cluster_offset;
2455 start = offset & ~(s->cluster_size - 1);
2456 last = (offset + size - 1) & ~(s->cluster_size - 1);
2457 for(cluster_offset = start; cluster_offset <= last;
2458 cluster_offset += s->cluster_size) {
2459 k = cluster_offset >> s->cluster_bits;
2460 if (k < 0 || k >= refcount_table_size) {
2461 printf("ERROR: invalid cluster offset=0x%llx\n", cluster_offset);
2463 if (++refcount_table[k] == 0) {
2464 printf("ERROR: overflow cluster offset=0x%llx\n", cluster_offset);
2470 static int check_refcounts_l1(BlockDriverState *bs,
2471 uint16_t *refcount_table,
2472 int refcount_table_size,
2473 int64_t l1_table_offset, int l1_size,
2476 BDRVQcowState *s = bs->opaque;
2477 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2;
2478 int l2_size, i, j, nb_csectors, refcount;
2481 l1_size2 = l1_size * sizeof(uint64_t);
2483 inc_refcounts(bs, refcount_table, refcount_table_size,
2484 l1_table_offset, l1_size2);
2486 l1_table = qemu_malloc(l1_size2);
2487 if (bdrv_pread(s->hd, l1_table_offset,
2488 l1_table, l1_size2) != l1_size2)
2490 for(i = 0;i < l1_size; i++)
2491 be64_to_cpus(&l1_table[i]);
2493 l2_size = s->l2_size * sizeof(uint64_t);
2494 l2_table = qemu_malloc(l2_size);
2495 for(i = 0; i < l1_size; i++) {
2496 l2_offset = l1_table[i];
2499 refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2500 if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
2501 printf("ERROR OFLAG_COPIED: l2_offset=%llx refcount=%d\n",
2502 l2_offset, refcount);
2505 l2_offset &= ~QCOW_OFLAG_COPIED;
2506 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
2508 for(j = 0; j < s->l2_size; j++) {
2509 offset = be64_to_cpu(l2_table[j]);
2511 if (offset & QCOW_OFLAG_COMPRESSED) {
2512 if (offset & QCOW_OFLAG_COPIED) {
2513 printf("ERROR: cluster %lld: copied flag must never be set for compressed clusters\n",
2514 offset >> s->cluster_bits);
2515 offset &= ~QCOW_OFLAG_COPIED;
2517 nb_csectors = ((offset >> s->csize_shift) &
2519 offset &= s->cluster_offset_mask;
2520 inc_refcounts(bs, refcount_table,
2521 refcount_table_size,
2522 offset & ~511, nb_csectors * 512);
2525 refcount = get_refcount(bs, (offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2526 if ((refcount == 1) != ((offset & QCOW_OFLAG_COPIED) != 0)) {
2527 printf("ERROR OFLAG_COPIED: offset=%llx refcount=%d\n",
2531 offset &= ~QCOW_OFLAG_COPIED;
2532 inc_refcounts(bs, refcount_table,
2533 refcount_table_size,
2534 offset, s->cluster_size);
2538 inc_refcounts(bs, refcount_table,
2539 refcount_table_size,
2544 qemu_free(l1_table);
2545 qemu_free(l2_table);
2548 printf("ERROR: I/O error in check_refcounts_l1\n");
2549 qemu_free(l1_table);
2550 qemu_free(l2_table);
2554 static void check_refcounts(BlockDriverState *bs)
2556 BDRVQcowState *s = bs->opaque;
2558 int nb_clusters, refcount1, refcount2, i;
2560 uint16_t *refcount_table;
2562 size = bdrv_getlength(s->hd);
2563 nb_clusters = size_to_clusters(s, size);
2564 refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
2567 inc_refcounts(bs, refcount_table, nb_clusters,
2568 0, s->cluster_size);
2570 check_refcounts_l1(bs, refcount_table, nb_clusters,
2571 s->l1_table_offset, s->l1_size, 1);
2574 for(i = 0; i < s->nb_snapshots; i++) {
2575 sn = s->snapshots + i;
2576 check_refcounts_l1(bs, refcount_table, nb_clusters,
2577 sn->l1_table_offset, sn->l1_size, 0);
2579 inc_refcounts(bs, refcount_table, nb_clusters,
2580 s->snapshots_offset, s->snapshots_size);
2583 inc_refcounts(bs, refcount_table, nb_clusters,
2584 s->refcount_table_offset,
2585 s->refcount_table_size * sizeof(uint64_t));
2586 for(i = 0; i < s->refcount_table_size; i++) {
2588 offset = s->refcount_table[i];
2590 inc_refcounts(bs, refcount_table, nb_clusters,
2591 offset, s->cluster_size);
2595 /* compare ref counts */
2596 for(i = 0; i < nb_clusters; i++) {
2597 refcount1 = get_refcount(bs, i);
2598 refcount2 = refcount_table[i];
2599 if (refcount1 != refcount2)
2600 printf("ERROR cluster %d refcount=%d reference=%d\n",
2601 i, refcount1, refcount2);
2604 qemu_free(refcount_table);
2608 static void dump_refcounts(BlockDriverState *bs)
2610 BDRVQcowState *s = bs->opaque;
2611 int64_t nb_clusters, k, k1, size;
2614 size = bdrv_getlength(s->hd);
2615 nb_clusters = size_to_clusters(s, size);
2616 for(k = 0; k < nb_clusters;) {
2618 refcount = get_refcount(bs, k);
2620 while (k < nb_clusters && get_refcount(bs, k) == refcount)
2622 printf("%lld: refcount=%d nb=%lld\n", k, refcount, k - k1);
2628 BlockDriver bdrv_qcow2 = {
2629 .format_name = "qcow2",
2630 .instance_size = sizeof(BDRVQcowState),
2631 .bdrv_probe = qcow_probe,
2632 .bdrv_open = qcow_open,
2633 .bdrv_close = qcow_close,
2634 .bdrv_create = qcow_create,
2635 .bdrv_flush = qcow_flush,
2636 .bdrv_is_allocated = qcow_is_allocated,
2637 .bdrv_set_key = qcow_set_key,
2638 .bdrv_make_empty = qcow_make_empty,
2640 .bdrv_aio_read = qcow_aio_read,
2641 .bdrv_aio_write = qcow_aio_write,
2642 .bdrv_aio_cancel = qcow_aio_cancel,
2643 .aiocb_size = sizeof(QCowAIOCB),
2644 .bdrv_write_compressed = qcow_write_compressed,
2646 .bdrv_snapshot_create = qcow_snapshot_create,
2647 .bdrv_snapshot_goto = qcow_snapshot_goto,
2648 .bdrv_snapshot_delete = qcow_snapshot_delete,
2649 .bdrv_snapshot_list = qcow_snapshot_list,
2650 .bdrv_get_info = qcow_get_info,