2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu-common.h"
25 #include "block_int.h"
31 Differences with QCOW:
33 - Support for multiple incremental snapshots.
34 - Memory management by reference counts.
35 - Clusters which have a reference count of one have the bit
36 QCOW_OFLAG_COPIED to optimize write performance.
37 - Size of compressed clusters is stored in sectors to reduce bit usage
38 in the cluster offsets.
39 - Support for storing additional data (such as the VM state) in the
41 - If a backing store is used, the cluster size is not constrained
42 (could be backported to QCOW).
43 - L2 tables have always a size of one cluster.
47 //#define DEBUG_ALLOC2
49 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
50 #define QCOW_VERSION 2
52 #define QCOW_CRYPT_NONE 0
53 #define QCOW_CRYPT_AES 1
55 #define QCOW_MAX_CRYPT_CLUSTERS 32
57 /* indicate that the refcount of the referenced cluster is exactly one. */
58 #define QCOW_OFLAG_COPIED (1LL << 63)
59 /* indicate that the cluster is compressed (they never have the copied flag) */
60 #define QCOW_OFLAG_COMPRESSED (1LL << 62)
62 #define REFCOUNT_SHIFT 1 /* refcount size is 2 bytes */
64 typedef struct QCowHeader {
67 uint64_t backing_file_offset;
68 uint32_t backing_file_size;
69 uint32_t cluster_bits;
70 uint64_t size; /* in bytes */
71 uint32_t crypt_method;
72 uint32_t l1_size; /* XXX: save number of clusters instead ? */
73 uint64_t l1_table_offset;
74 uint64_t refcount_table_offset;
75 uint32_t refcount_table_clusters;
76 uint32_t nb_snapshots;
77 uint64_t snapshots_offset;
80 typedef struct __attribute__((packed)) QCowSnapshotHeader {
81 /* header is 8 byte aligned */
82 uint64_t l1_table_offset;
91 uint64_t vm_clock_nsec;
93 uint32_t vm_state_size;
94 uint32_t extra_data_size; /* for extension */
95 /* extra data follows */
100 #define L2_CACHE_SIZE 16
102 typedef struct QCowSnapshot {
103 uint64_t l1_table_offset;
107 uint32_t vm_state_size;
110 uint64_t vm_clock_nsec;
113 typedef struct BDRVQcowState {
114 BlockDriverState *hd;
121 int l1_vm_state_index;
124 uint64_t cluster_offset_mask;
125 uint64_t l1_table_offset;
128 uint64_t l2_cache_offsets[L2_CACHE_SIZE];
129 uint32_t l2_cache_counts[L2_CACHE_SIZE];
130 uint8_t *cluster_cache;
131 uint8_t *cluster_data;
132 uint64_t cluster_cache_offset;
134 uint64_t *refcount_table;
135 uint64_t refcount_table_offset;
136 uint32_t refcount_table_size;
137 uint64_t refcount_block_cache_offset;
138 uint16_t *refcount_block_cache;
139 int64_t free_cluster_index;
140 int64_t free_byte_offset;
142 uint32_t crypt_method; /* current crypt method, 0 if no key yet */
143 uint32_t crypt_method_header;
144 AES_KEY aes_encrypt_key;
145 AES_KEY aes_decrypt_key;
147 int64_t highest_alloc; /* highest cluester allocated (in clusters) */
149 uint64_t snapshots_offset;
152 QCowSnapshot *snapshots;
155 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
156 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
157 uint8_t *buf, int nb_sectors);
158 static int qcow_read_snapshots(BlockDriverState *bs);
159 static void qcow_free_snapshots(BlockDriverState *bs);
160 static int refcount_init(BlockDriverState *bs);
161 static void refcount_close(BlockDriverState *bs);
162 static int get_refcount(BlockDriverState *bs, int64_t cluster_index);
163 static int update_cluster_refcount(BlockDriverState *bs,
164 int64_t cluster_index,
166 static void update_refcount(BlockDriverState *bs,
167 int64_t offset, int64_t length,
169 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size);
170 static int64_t alloc_bytes(BlockDriverState *bs, int size);
171 static void free_clusters(BlockDriverState *bs,
172 int64_t offset, int64_t size);
174 static void check_refcounts(BlockDriverState *bs);
176 static void scan_refcount(BlockDriverState *bs, int64_t *high);
179 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
181 const QCowHeader *cow_header = (const void *)buf;
183 if (buf_size >= sizeof(QCowHeader) &&
184 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
185 be32_to_cpu(cow_header->version) == QCOW_VERSION)
191 static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
193 BDRVQcowState *s = bs->opaque;
194 int len, i, shift, ret;
197 /* Performance is terrible right now with cache=writethrough due mainly
198 * to reference count updates. If the user does not explicitly specify
199 * a caching type, force to writeback caching.
201 if ((flags & BDRV_O_CACHE_DEF)) {
202 flags |= BDRV_O_CACHE_WB;
203 flags &= ~BDRV_O_CACHE_DEF;
205 ret = bdrv_file_open(&s->hd, filename, flags);
208 if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header))
210 be32_to_cpus(&header.magic);
211 be32_to_cpus(&header.version);
212 be64_to_cpus(&header.backing_file_offset);
213 be32_to_cpus(&header.backing_file_size);
214 be64_to_cpus(&header.size);
215 be32_to_cpus(&header.cluster_bits);
216 be32_to_cpus(&header.crypt_method);
217 be64_to_cpus(&header.l1_table_offset);
218 be32_to_cpus(&header.l1_size);
219 be64_to_cpus(&header.refcount_table_offset);
220 be32_to_cpus(&header.refcount_table_clusters);
221 be64_to_cpus(&header.snapshots_offset);
222 be32_to_cpus(&header.nb_snapshots);
224 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
226 if (header.size <= 1 ||
227 header.cluster_bits < 9 ||
228 header.cluster_bits > 16)
230 if (header.crypt_method > QCOW_CRYPT_AES)
232 s->crypt_method_header = header.crypt_method;
233 if (s->crypt_method_header)
235 s->cluster_bits = header.cluster_bits;
236 s->cluster_size = 1 << s->cluster_bits;
237 s->cluster_sectors = 1 << (s->cluster_bits - 9);
238 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
239 s->l2_size = 1 << s->l2_bits;
240 bs->total_sectors = header.size / 512;
241 s->csize_shift = (62 - (s->cluster_bits - 8));
242 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
243 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
244 s->refcount_table_offset = header.refcount_table_offset;
245 s->refcount_table_size =
246 header.refcount_table_clusters << (s->cluster_bits - 3);
248 s->snapshots_offset = header.snapshots_offset;
249 s->nb_snapshots = header.nb_snapshots;
251 /* read the level 1 table */
252 s->l1_size = header.l1_size;
253 shift = s->cluster_bits + s->l2_bits;
254 s->l1_vm_state_index = (header.size + (1LL << shift) - 1) >> shift;
255 /* the L1 table must contain at least enough entries to put
257 if (s->l1_size < s->l1_vm_state_index)
259 s->l1_table_offset = header.l1_table_offset;
260 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
263 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
264 s->l1_size * sizeof(uint64_t))
266 for(i = 0;i < s->l1_size; i++) {
267 be64_to_cpus(&s->l1_table[i]);
270 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
273 s->cluster_cache = qemu_malloc(s->cluster_size);
274 if (!s->cluster_cache)
276 /* one more sector for decompressed data alignment */
277 s->cluster_data = qemu_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
279 if (!s->cluster_data)
281 s->cluster_cache_offset = -1;
283 if (refcount_init(bs) < 0)
286 scan_refcount(bs, &s->highest_alloc);
288 /* read the backing file name */
289 if (header.backing_file_offset != 0) {
290 len = header.backing_file_size;
293 if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len)
295 bs->backing_file[len] = '\0';
297 if (qcow_read_snapshots(bs) < 0)
306 qcow_free_snapshots(bs);
308 qemu_free(s->l1_table);
309 qemu_free(s->l2_cache);
310 qemu_free(s->cluster_cache);
311 qemu_free(s->cluster_data);
316 static int qcow_set_key(BlockDriverState *bs, const char *key)
318 BDRVQcowState *s = bs->opaque;
322 memset(keybuf, 0, 16);
326 /* XXX: we could compress the chars to 7 bits to increase
328 for(i = 0;i < len;i++) {
331 s->crypt_method = s->crypt_method_header;
333 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
335 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
345 AES_encrypt(in, tmp, &s->aes_encrypt_key);
346 AES_decrypt(tmp, out, &s->aes_decrypt_key);
347 for(i = 0; i < 16; i++)
348 printf(" %02x", tmp[i]);
350 for(i = 0; i < 16; i++)
351 printf(" %02x", out[i]);
358 /* The crypt function is compatible with the linux cryptoloop
359 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
361 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
362 uint8_t *out_buf, const uint8_t *in_buf,
363 int nb_sectors, int enc,
372 for(i = 0; i < nb_sectors; i++) {
373 ivec.ll[0] = cpu_to_le64(sector_num);
375 AES_cbc_encrypt(in_buf, out_buf, 512, key,
383 static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
384 uint64_t cluster_offset, int n_start, int n_end)
386 BDRVQcowState *s = bs->opaque;
392 ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
395 if (s->crypt_method) {
396 encrypt_sectors(s, start_sect + n_start,
398 s->cluster_data, n, 1,
399 &s->aes_encrypt_key);
401 ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
408 static void l2_cache_reset(BlockDriverState *bs)
410 BDRVQcowState *s = bs->opaque;
412 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
413 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
414 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
417 static inline int l2_cache_new_entry(BlockDriverState *bs)
419 BDRVQcowState *s = bs->opaque;
423 /* find a new entry in the least used one */
425 min_count = 0xffffffff;
426 for(i = 0; i < L2_CACHE_SIZE; i++) {
427 if (s->l2_cache_counts[i] < min_count) {
428 min_count = s->l2_cache_counts[i];
435 static int64_t align_offset(int64_t offset, int n)
437 offset = (offset + n - 1) & ~(n - 1);
441 static int grow_l1_table(BlockDriverState *bs, int min_size)
443 BDRVQcowState *s = bs->opaque;
444 int new_l1_size, new_l1_size2, ret, i;
445 uint64_t *new_l1_table;
446 uint64_t new_l1_table_offset;
449 new_l1_size = s->l1_size;
450 if (min_size <= new_l1_size)
452 while (min_size > new_l1_size) {
453 new_l1_size = (new_l1_size * 3 + 1) / 2;
456 printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
459 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
460 new_l1_table = qemu_mallocz(new_l1_size2);
463 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
465 /* write new table (align to cluster) */
466 new_l1_table_offset = alloc_clusters(bs, new_l1_size2);
468 for(i = 0; i < s->l1_size; i++)
469 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
470 ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
471 if (ret != new_l1_size2)
473 for(i = 0; i < s->l1_size; i++)
474 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
477 cpu_to_be32w((uint32_t*)data, new_l1_size);
478 cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
479 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
480 sizeof(data)) != sizeof(data))
482 qemu_free(s->l1_table);
483 free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
484 s->l1_table_offset = new_l1_table_offset;
485 s->l1_table = new_l1_table;
486 s->l1_size = new_l1_size;
489 qemu_free(s->l1_table);
496 * seek l2_offset in the l2_cache table
497 * if not found, return NULL,
499 * increments the l2 cache hit count of the entry,
500 * if counter overflow, divide by two all counters
501 * return the pointer to the l2 cache entry
505 static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
509 for(i = 0; i < L2_CACHE_SIZE; i++) {
510 if (l2_offset == s->l2_cache_offsets[i]) {
511 /* increment the hit count */
512 if (++s->l2_cache_counts[i] == 0xffffffff) {
513 for(j = 0; j < L2_CACHE_SIZE; j++) {
514 s->l2_cache_counts[j] >>= 1;
517 return s->l2_cache + (i << s->l2_bits);
526 * Loads a L2 table into memory. If the table is in the cache, the cache
527 * is used; otherwise the L2 table is loaded from the image file.
529 * Returns a pointer to the L2 table on success, or NULL if the read from
530 * the image file failed.
533 static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
535 BDRVQcowState *s = bs->opaque;
539 /* seek if the table for the given offset is in the cache */
541 l2_table = seek_l2_table(s, l2_offset);
542 if (l2_table != NULL)
545 /* not found: load a new entry in the least used one */
547 min_index = l2_cache_new_entry(bs);
548 l2_table = s->l2_cache + (min_index << s->l2_bits);
549 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
550 s->l2_size * sizeof(uint64_t))
552 s->l2_cache_offsets[min_index] = l2_offset;
553 s->l2_cache_counts[min_index] = 1;
561 * Allocate a new l2 entry in the file. If l1_index points to an already
562 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
563 * table) copy the contents of the old L2 table into the newly allocated one.
564 * Otherwise the new table is initialized with zeros.
568 static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
570 BDRVQcowState *s = bs->opaque;
572 uint64_t old_l2_offset, tmp;
573 uint64_t *l2_table, l2_offset;
575 old_l2_offset = s->l1_table[l1_index];
577 /* allocate a new l2 entry */
579 l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
581 /* update the L1 entry */
583 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
585 tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
586 if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
587 &tmp, sizeof(tmp)) != sizeof(tmp))
590 /* allocate a new entry in the l2 cache */
592 min_index = l2_cache_new_entry(bs);
593 l2_table = s->l2_cache + (min_index << s->l2_bits);
595 if (old_l2_offset == 0) {
596 /* if there was no old l2 table, clear the new table */
597 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
599 /* if there was an old l2 table, read it from the disk */
600 if (bdrv_pread(s->hd, old_l2_offset,
601 l2_table, s->l2_size * sizeof(uint64_t)) !=
602 s->l2_size * sizeof(uint64_t))
605 /* write the l2 table to the file */
606 if (bdrv_pwrite(s->hd, l2_offset,
607 l2_table, s->l2_size * sizeof(uint64_t)) !=
608 s->l2_size * sizeof(uint64_t))
611 /* update the l2 cache entry */
613 s->l2_cache_offsets[min_index] = l2_offset;
614 s->l2_cache_counts[min_index] = 1;
619 static int size_to_clusters(BDRVQcowState *s, int64_t size)
621 return (size + (s->cluster_size - 1)) >> s->cluster_bits;
624 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
625 uint64_t *l2_table, uint64_t start, uint64_t mask)
628 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
633 for (i = start; i < start + nb_clusters; i++)
634 if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
640 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
644 while(nb_clusters-- && l2_table[i] == 0)
653 * For a given offset of the disk image, return cluster offset in
656 * on entry, *num is the number of contiguous clusters we'd like to
657 * access following offset.
659 * on exit, *num is the number of contiguous clusters we can read.
661 * Return 1, if the offset is found
662 * Return 0, otherwise.
666 static uint64_t get_cluster_offset(BlockDriverState *bs,
667 uint64_t offset, int *num)
669 BDRVQcowState *s = bs->opaque;
670 int l1_index, l2_index;
671 uint64_t l2_offset, *l2_table, cluster_offset;
673 int index_in_cluster, nb_available, nb_needed, nb_clusters;
675 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
676 nb_needed = *num + index_in_cluster;
678 l1_bits = s->l2_bits + s->cluster_bits;
680 /* compute how many bytes there are between the offset and
681 * the end of the l1 entry
684 nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
686 /* compute the number of available sectors */
688 nb_available = (nb_available >> 9) + index_in_cluster;
692 /* seek the the l2 offset in the l1 table */
694 l1_index = offset >> l1_bits;
695 if (l1_index >= s->l1_size)
698 l2_offset = s->l1_table[l1_index];
700 /* seek the l2 table of the given l2 offset */
705 /* load the l2 table in memory */
707 l2_offset &= ~QCOW_OFLAG_COPIED;
708 l2_table = l2_load(bs, l2_offset);
709 if (l2_table == NULL)
712 /* find the cluster offset for the given disk offset */
714 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
715 cluster_offset = be64_to_cpu(l2_table[l2_index]);
716 nb_clusters = size_to_clusters(s, nb_needed << 9);
718 if (!cluster_offset) {
719 /* how many empty clusters ? */
720 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
722 /* how many allocated clusters ? */
723 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
724 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
727 nb_available = (c * s->cluster_sectors);
729 if (nb_available > nb_needed)
730 nb_available = nb_needed;
732 *num = nb_available - index_in_cluster;
734 return cluster_offset & ~QCOW_OFLAG_COPIED;
740 * free clusters according to its type: compressed or not
744 static void free_any_clusters(BlockDriverState *bs,
745 uint64_t cluster_offset, int nb_clusters)
747 BDRVQcowState *s = bs->opaque;
749 /* free the cluster */
751 if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
753 nb_csectors = ((cluster_offset >> s->csize_shift) &
755 free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511,
760 free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
768 * for a given disk offset, load (and allocate if needed)
771 * the l2 table offset in the qcow2 file and the cluster index
772 * in the l2 table are given to the caller.
776 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
777 uint64_t **new_l2_table,
778 uint64_t *new_l2_offset,
781 BDRVQcowState *s = bs->opaque;
782 int l1_index, l2_index, ret;
783 uint64_t l2_offset, *l2_table;
785 /* seek the the l2 offset in the l1 table */
787 l1_index = offset >> (s->l2_bits + s->cluster_bits);
788 if (l1_index >= s->l1_size) {
789 ret = grow_l1_table(bs, l1_index + 1);
793 l2_offset = s->l1_table[l1_index];
795 /* seek the l2 table of the given l2 offset */
797 if (l2_offset & QCOW_OFLAG_COPIED) {
798 /* load the l2 table in memory */
799 l2_offset &= ~QCOW_OFLAG_COPIED;
800 l2_table = l2_load(bs, l2_offset);
801 if (l2_table == NULL)
805 free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
806 l2_table = l2_allocate(bs, l1_index);
807 if (l2_table == NULL)
809 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
812 /* find the cluster offset for the given disk offset */
814 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
816 *new_l2_table = l2_table;
817 *new_l2_offset = l2_offset;
818 *new_l2_index = l2_index;
824 * alloc_compressed_cluster_offset
826 * For a given offset of the disk image, return cluster offset in
829 * If the offset is not found, allocate a new compressed cluster.
831 * Return the cluster offset if successful,
832 * Return 0, otherwise.
836 static uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,
840 BDRVQcowState *s = bs->opaque;
842 uint64_t l2_offset, *l2_table, cluster_offset;
845 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
849 cluster_offset = be64_to_cpu(l2_table[l2_index]);
850 if (cluster_offset & QCOW_OFLAG_COPIED)
851 return cluster_offset & ~QCOW_OFLAG_COPIED;
854 free_any_clusters(bs, cluster_offset, 1);
856 cluster_offset = alloc_bytes(bs, compressed_size);
857 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
858 (cluster_offset >> 9);
860 cluster_offset |= QCOW_OFLAG_COMPRESSED |
861 ((uint64_t)nb_csectors << s->csize_shift);
863 /* update L2 table */
865 /* compressed clusters never have the copied flag */
867 l2_table[l2_index] = cpu_to_be64(cluster_offset);
868 if (bdrv_pwrite(s->hd,
869 l2_offset + l2_index * sizeof(uint64_t),
871 sizeof(uint64_t)) != sizeof(uint64_t))
874 return cluster_offset;
877 typedef struct QCowL2Meta
885 static int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
888 BDRVQcowState *s = bs->opaque;
889 int i, j = 0, l2_index, ret;
890 uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
892 if (m->nb_clusters == 0)
895 if (!(old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t))))
898 /* copy content of unmodified sectors */
899 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
901 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
906 if (m->nb_available & (s->cluster_sectors - 1)) {
907 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
908 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
909 m->nb_available - end, s->cluster_sectors);
915 /* update L2 table */
916 if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
919 for (i = 0; i < m->nb_clusters; i++) {
920 if(l2_table[l2_index + i] != 0)
921 old_cluster[j++] = l2_table[l2_index + i];
923 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
924 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
927 if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),
928 l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=
929 m->nb_clusters * sizeof(uint64_t))
932 for (i = 0; i < j; i++)
933 free_any_clusters(bs, old_cluster[i], 1);
937 qemu_free(old_cluster);
942 * alloc_cluster_offset
944 * For a given offset of the disk image, return cluster offset in
947 * If the offset is not found, allocate a new cluster.
949 * Return the cluster offset if successful,
950 * Return 0, otherwise.
954 static uint64_t alloc_cluster_offset(BlockDriverState *bs,
956 int n_start, int n_end,
957 int *num, QCowL2Meta *m)
959 BDRVQcowState *s = bs->opaque;
961 uint64_t l2_offset, *l2_table, cluster_offset;
962 int nb_clusters, i = 0;
964 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
968 nb_clusters = size_to_clusters(s, n_end << 9);
970 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
972 cluster_offset = be64_to_cpu(l2_table[l2_index]);
974 /* We keep all QCOW_OFLAG_COPIED clusters */
976 if (cluster_offset & QCOW_OFLAG_COPIED) {
977 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
978 &l2_table[l2_index], 0, 0);
980 cluster_offset &= ~QCOW_OFLAG_COPIED;
986 /* for the moment, multiple compressed clusters are not managed */
988 if (cluster_offset & QCOW_OFLAG_COMPRESSED)
991 /* how many available clusters ? */
993 while (i < nb_clusters) {
994 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
995 &l2_table[l2_index], i, 0);
997 if(be64_to_cpu(l2_table[l2_index + i]))
1000 i += count_contiguous_free_clusters(nb_clusters - i,
1001 &l2_table[l2_index + i]);
1003 cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
1005 if ((cluster_offset & QCOW_OFLAG_COPIED) ||
1006 (cluster_offset & QCOW_OFLAG_COMPRESSED))
1011 /* allocate a new cluster */
1013 cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
1015 /* save info needed for meta data update */
1017 m->n_start = n_start;
1018 m->nb_clusters = nb_clusters;
1021 m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
1023 *num = m->nb_available - n_start;
1025 return cluster_offset;
1028 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
1029 int nb_sectors, int *pnum)
1031 uint64_t cluster_offset;
1034 cluster_offset = get_cluster_offset(bs, sector_num << 9, pnum);
1036 return (cluster_offset != 0);
1039 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1040 const uint8_t *buf, int buf_size)
1042 z_stream strm1, *strm = &strm1;
1045 memset(strm, 0, sizeof(*strm));
1047 strm->next_in = (uint8_t *)buf;
1048 strm->avail_in = buf_size;
1049 strm->next_out = out_buf;
1050 strm->avail_out = out_buf_size;
1052 ret = inflateInit2(strm, -12);
1055 ret = inflate(strm, Z_FINISH);
1056 out_len = strm->next_out - out_buf;
1057 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1058 out_len != out_buf_size) {
1066 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
1068 int ret, csize, nb_csectors, sector_offset;
1071 coffset = cluster_offset & s->cluster_offset_mask;
1072 if (s->cluster_cache_offset != coffset) {
1073 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1074 sector_offset = coffset & 511;
1075 csize = nb_csectors * 512 - sector_offset;
1076 ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
1080 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1081 s->cluster_data + sector_offset, csize) < 0) {
1084 s->cluster_cache_offset = coffset;
1089 /* handle reading after the end of the backing file */
1090 static int backing_read1(BlockDriverState *bs,
1091 int64_t sector_num, uint8_t *buf, int nb_sectors)
1094 if ((sector_num + nb_sectors) <= bs->total_sectors)
1096 if (sector_num >= bs->total_sectors)
1099 n1 = bs->total_sectors - sector_num;
1100 memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1));
1104 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
1105 uint8_t *buf, int nb_sectors)
1107 BDRVQcowState *s = bs->opaque;
1108 int ret, index_in_cluster, n, n1;
1109 uint64_t cluster_offset;
1111 while (nb_sectors > 0) {
1113 cluster_offset = get_cluster_offset(bs, sector_num << 9, &n);
1114 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1115 if (!cluster_offset) {
1116 if (bs->backing_hd) {
1117 /* read from the base image */
1118 n1 = backing_read1(bs->backing_hd, sector_num, buf, n);
1120 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
1125 memset(buf, 0, 512 * n);
1127 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
1128 if (decompress_cluster(s, cluster_offset) < 0)
1130 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
1132 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1135 if (s->crypt_method) {
1136 encrypt_sectors(s, sector_num, buf, buf, n, 0,
1137 &s->aes_decrypt_key);
1147 static int qcow_write(BlockDriverState *bs, int64_t sector_num,
1148 const uint8_t *buf, int nb_sectors)
1150 BDRVQcowState *s = bs->opaque;
1151 int ret, index_in_cluster, n;
1152 uint64_t cluster_offset;
1156 while (nb_sectors > 0) {
1157 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1158 n_end = index_in_cluster + nb_sectors;
1159 if (s->crypt_method &&
1160 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1161 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1162 cluster_offset = alloc_cluster_offset(bs, sector_num << 9,
1164 n_end, &n, &l2meta);
1165 if (!cluster_offset)
1167 if (s->crypt_method) {
1168 encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
1169 &s->aes_encrypt_key);
1170 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
1171 s->cluster_data, n * 512);
1173 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1175 if (ret != n * 512 || alloc_cluster_link_l2(bs, cluster_offset, &l2meta) < 0) {
1176 free_any_clusters(bs, cluster_offset, l2meta.nb_clusters);
1183 s->cluster_cache_offset = -1; /* disable compressed cache */
1187 typedef struct QCowAIOCB {
1188 BlockDriverAIOCB common;
1193 uint64_t cluster_offset;
1194 uint8_t *cluster_data;
1195 BlockDriverAIOCB *hd_aiocb;
1200 static void qcow_aio_read_cb(void *opaque, int ret);
1201 static void qcow_aio_read_bh(void *opaque)
1203 QCowAIOCB *acb = opaque;
1204 qemu_bh_delete(acb->bh);
1206 qcow_aio_read_cb(opaque, 0);
1209 static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb)
1214 acb->bh = qemu_bh_new(cb, acb);
1218 qemu_bh_schedule(acb->bh);
1223 static void qcow_aio_read_cb(void *opaque, int ret)
1225 QCowAIOCB *acb = opaque;
1226 BlockDriverState *bs = acb->common.bs;
1227 BDRVQcowState *s = bs->opaque;
1228 int index_in_cluster, n1;
1230 acb->hd_aiocb = NULL;
1233 acb->common.cb(acb->common.opaque, ret);
1234 qemu_aio_release(acb);
1238 /* post process the read buffer */
1239 if (!acb->cluster_offset) {
1241 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1244 if (s->crypt_method) {
1245 encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
1247 &s->aes_decrypt_key);
1251 acb->nb_sectors -= acb->n;
1252 acb->sector_num += acb->n;
1253 acb->buf += acb->n * 512;
1255 if (acb->nb_sectors == 0) {
1256 /* request completed */
1257 acb->common.cb(acb->common.opaque, 0);
1258 qemu_aio_release(acb);
1262 /* prepare next AIO request */
1263 acb->n = acb->nb_sectors;
1264 acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
1265 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1267 if (!acb->cluster_offset) {
1268 if (bs->backing_hd) {
1269 /* read from the base image */
1270 n1 = backing_read1(bs->backing_hd, acb->sector_num,
1273 acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num,
1274 acb->buf, acb->n, qcow_aio_read_cb, acb);
1275 if (acb->hd_aiocb == NULL)
1278 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1283 /* Note: in this case, no need to wait */
1284 memset(acb->buf, 0, 512 * acb->n);
1285 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1289 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1290 /* add AIO support for compressed blocks ? */
1291 if (decompress_cluster(s, acb->cluster_offset) < 0)
1294 s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
1295 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1299 if ((acb->cluster_offset & 511) != 0) {
1303 acb->hd_aiocb = bdrv_aio_read(s->hd,
1304 (acb->cluster_offset >> 9) + index_in_cluster,
1305 acb->buf, acb->n, qcow_aio_read_cb, acb);
1306 if (acb->hd_aiocb == NULL)
1311 static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
1312 int64_t sector_num, uint8_t *buf, int nb_sectors,
1313 BlockDriverCompletionFunc *cb, void *opaque)
1317 acb = qemu_aio_get(bs, cb, opaque);
1320 acb->hd_aiocb = NULL;
1321 acb->sector_num = sector_num;
1323 acb->nb_sectors = nb_sectors;
1325 acb->cluster_offset = 0;
1326 acb->l2meta.nb_clusters = 0;
1330 static BlockDriverAIOCB *qcow_aio_read(BlockDriverState *bs,
1331 int64_t sector_num, uint8_t *buf, int nb_sectors,
1332 BlockDriverCompletionFunc *cb, void *opaque)
1336 acb = qcow_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
1340 qcow_aio_read_cb(acb, 0);
1341 return &acb->common;
1344 static void qcow_aio_write_cb(void *opaque, int ret)
1346 QCowAIOCB *acb = opaque;
1347 BlockDriverState *bs = acb->common.bs;
1348 BDRVQcowState *s = bs->opaque;
1349 int index_in_cluster;
1350 const uint8_t *src_buf;
1353 acb->hd_aiocb = NULL;
1357 acb->common.cb(acb->common.opaque, ret);
1358 qemu_aio_release(acb);
1362 if (alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta) < 0) {
1363 free_any_clusters(bs, acb->cluster_offset, acb->l2meta.nb_clusters);
1367 acb->nb_sectors -= acb->n;
1368 acb->sector_num += acb->n;
1369 acb->buf += acb->n * 512;
1371 if (acb->nb_sectors == 0) {
1372 /* request completed */
1373 acb->common.cb(acb->common.opaque, 0);
1374 qemu_aio_release(acb);
1378 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1379 n_end = index_in_cluster + acb->nb_sectors;
1380 if (s->crypt_method &&
1381 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1382 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1384 acb->cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9,
1386 n_end, &acb->n, &acb->l2meta);
1387 if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
1391 if (s->crypt_method) {
1392 if (!acb->cluster_data) {
1393 acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS *
1395 if (!acb->cluster_data) {
1400 encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
1401 acb->n, 1, &s->aes_encrypt_key);
1402 src_buf = acb->cluster_data;
1406 acb->hd_aiocb = bdrv_aio_write(s->hd,
1407 (acb->cluster_offset >> 9) + index_in_cluster,
1409 qcow_aio_write_cb, acb);
1410 if (acb->hd_aiocb == NULL)
1414 static BlockDriverAIOCB *qcow_aio_write(BlockDriverState *bs,
1415 int64_t sector_num, const uint8_t *buf, int nb_sectors,
1416 BlockDriverCompletionFunc *cb, void *opaque)
1418 BDRVQcowState *s = bs->opaque;
1421 s->cluster_cache_offset = -1; /* disable compressed cache */
1423 acb = qcow_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
1427 qcow_aio_write_cb(acb, 0);
1428 return &acb->common;
1431 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1433 QCowAIOCB *acb = (QCowAIOCB *)blockacb;
1435 bdrv_aio_cancel(acb->hd_aiocb);
1436 qemu_aio_release(acb);
1439 static void qcow_close(BlockDriverState *bs)
1441 BDRVQcowState *s = bs->opaque;
1442 qemu_free(s->l1_table);
1443 qemu_free(s->l2_cache);
1444 qemu_free(s->cluster_cache);
1445 qemu_free(s->cluster_data);
1450 /* XXX: use std qcow open function ? */
1451 typedef struct QCowCreateState {
1454 uint16_t *refcount_block;
1455 uint64_t *refcount_table;
1456 int64_t l1_table_offset;
1457 int64_t refcount_table_offset;
1458 int64_t refcount_block_offset;
1461 static void create_refcount_update(QCowCreateState *s,
1462 int64_t offset, int64_t size)
1465 int64_t start, last, cluster_offset;
1468 start = offset & ~(s->cluster_size - 1);
1469 last = (offset + size - 1) & ~(s->cluster_size - 1);
1470 for(cluster_offset = start; cluster_offset <= last;
1471 cluster_offset += s->cluster_size) {
1472 p = &s->refcount_block[cluster_offset >> s->cluster_bits];
1473 refcount = be16_to_cpu(*p);
1475 *p = cpu_to_be16(refcount);
1479 static int qcow_create(const char *filename, int64_t total_size,
1480 const char *backing_file, int flags)
1482 int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits;
1484 uint64_t tmp, offset;
1485 QCowCreateState s1, *s = &s1;
1487 memset(s, 0, sizeof(*s));
1489 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1492 memset(&header, 0, sizeof(header));
1493 header.magic = cpu_to_be32(QCOW_MAGIC);
1494 header.version = cpu_to_be32(QCOW_VERSION);
1495 header.size = cpu_to_be64(total_size * 512);
1496 header_size = sizeof(header);
1497 backing_filename_len = 0;
1499 header.backing_file_offset = cpu_to_be64(header_size);
1500 backing_filename_len = strlen(backing_file);
1501 header.backing_file_size = cpu_to_be32(backing_filename_len);
1502 header_size += backing_filename_len;
1504 s->cluster_bits = 12; /* 4 KB clusters */
1505 s->cluster_size = 1 << s->cluster_bits;
1506 header.cluster_bits = cpu_to_be32(s->cluster_bits);
1507 header_size = (header_size + 7) & ~7;
1508 if (flags & BLOCK_FLAG_ENCRYPT) {
1509 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
1511 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
1513 l2_bits = s->cluster_bits - 3;
1514 shift = s->cluster_bits + l2_bits;
1515 l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift);
1516 offset = align_offset(header_size, s->cluster_size);
1517 s->l1_table_offset = offset;
1518 header.l1_table_offset = cpu_to_be64(s->l1_table_offset);
1519 header.l1_size = cpu_to_be32(l1_size);
1520 offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size);
1522 s->refcount_table = qemu_mallocz(s->cluster_size);
1523 if (!s->refcount_table)
1525 s->refcount_block = qemu_mallocz(s->cluster_size);
1526 if (!s->refcount_block)
1529 s->refcount_table_offset = offset;
1530 header.refcount_table_offset = cpu_to_be64(offset);
1531 header.refcount_table_clusters = cpu_to_be32(1);
1532 offset += s->cluster_size;
1534 s->refcount_table[0] = cpu_to_be64(offset);
1535 s->refcount_block_offset = offset;
1536 offset += s->cluster_size;
1538 /* update refcounts */
1539 create_refcount_update(s, 0, header_size);
1540 create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t));
1541 create_refcount_update(s, s->refcount_table_offset, s->cluster_size);
1542 create_refcount_update(s, s->refcount_block_offset, s->cluster_size);
1544 /* write all the data */
1545 write(fd, &header, sizeof(header));
1547 write(fd, backing_file, backing_filename_len);
1549 lseek(fd, s->l1_table_offset, SEEK_SET);
1551 for(i = 0;i < l1_size; i++) {
1552 write(fd, &tmp, sizeof(tmp));
1554 lseek(fd, s->refcount_table_offset, SEEK_SET);
1555 write(fd, s->refcount_table, s->cluster_size);
1557 lseek(fd, s->refcount_block_offset, SEEK_SET);
1558 write(fd, s->refcount_block, s->cluster_size);
1560 qemu_free(s->refcount_table);
1561 qemu_free(s->refcount_block);
1565 qemu_free(s->refcount_table);
1566 qemu_free(s->refcount_block);
1571 static int qcow_make_empty(BlockDriverState *bs)
1574 /* XXX: not correct */
1575 BDRVQcowState *s = bs->opaque;
1576 uint32_t l1_length = s->l1_size * sizeof(uint64_t);
1579 memset(s->l1_table, 0, l1_length);
1580 if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0)
1582 ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length);
1591 /* XXX: put compressed sectors first, then all the cluster aligned
1592 tables to avoid losing bytes in alignment */
1593 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
1594 const uint8_t *buf, int nb_sectors)
1596 BDRVQcowState *s = bs->opaque;
1600 uint64_t cluster_offset;
1602 if (nb_sectors == 0) {
1603 /* align end of file to a sector boundary to ease reading with
1604 sector based I/Os */
1605 cluster_offset = bdrv_getlength(s->hd);
1606 cluster_offset = (cluster_offset + 511) & ~511;
1607 bdrv_truncate(s->hd, cluster_offset);
1611 if (nb_sectors != s->cluster_sectors)
1614 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
1618 /* best compression, small window, no zlib header */
1619 memset(&strm, 0, sizeof(strm));
1620 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
1622 9, Z_DEFAULT_STRATEGY);
1628 strm.avail_in = s->cluster_size;
1629 strm.next_in = (uint8_t *)buf;
1630 strm.avail_out = s->cluster_size;
1631 strm.next_out = out_buf;
1633 ret = deflate(&strm, Z_FINISH);
1634 if (ret != Z_STREAM_END && ret != Z_OK) {
1639 out_len = strm.next_out - out_buf;
1643 if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
1644 /* could not compress: write normal cluster */
1645 qcow_write(bs, sector_num, buf, s->cluster_sectors);
1647 cluster_offset = alloc_compressed_cluster_offset(bs, sector_num << 9,
1649 if (!cluster_offset)
1651 cluster_offset &= s->cluster_offset_mask;
1652 if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
1662 static void qcow_flush(BlockDriverState *bs)
1664 BDRVQcowState *s = bs->opaque;
1668 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1670 BDRVQcowState *s = bs->opaque;
1671 bdi->cluster_size = s->cluster_size;
1672 bdi->vm_state_offset = (int64_t)s->l1_vm_state_index <<
1673 (s->cluster_bits + s->l2_bits);
1674 bdi->highest_alloc = s->highest_alloc << s->cluster_bits;
1678 /*********************************************************/
1679 /* snapshot support */
1681 /* update the refcounts of snapshots and the copied flag */
1682 static int update_snapshot_refcount(BlockDriverState *bs,
1683 int64_t l1_table_offset,
1687 BDRVQcowState *s = bs->opaque;
1688 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
1689 int64_t old_offset, old_l2_offset;
1690 int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
1696 l1_size2 = l1_size * sizeof(uint64_t);
1698 if (l1_table_offset != s->l1_table_offset) {
1699 l1_table = qemu_malloc(l1_size2);
1703 if (bdrv_pread(s->hd, l1_table_offset,
1704 l1_table, l1_size2) != l1_size2)
1706 for(i = 0;i < l1_size; i++)
1707 be64_to_cpus(&l1_table[i]);
1709 assert(l1_size == s->l1_size);
1710 l1_table = s->l1_table;
1714 l2_size = s->l2_size * sizeof(uint64_t);
1715 l2_table = qemu_malloc(l2_size);
1719 for(i = 0; i < l1_size; i++) {
1720 l2_offset = l1_table[i];
1722 old_l2_offset = l2_offset;
1723 l2_offset &= ~QCOW_OFLAG_COPIED;
1725 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
1727 for(j = 0; j < s->l2_size; j++) {
1728 offset = be64_to_cpu(l2_table[j]);
1730 old_offset = offset;
1731 offset &= ~QCOW_OFLAG_COPIED;
1732 if (offset & QCOW_OFLAG_COMPRESSED) {
1733 nb_csectors = ((offset >> s->csize_shift) &
1736 update_refcount(bs, (offset & s->cluster_offset_mask) & ~511,
1737 nb_csectors * 512, addend);
1738 /* compressed clusters are never modified */
1742 refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
1744 refcount = get_refcount(bs, offset >> s->cluster_bits);
1748 if (refcount == 1) {
1749 offset |= QCOW_OFLAG_COPIED;
1751 if (offset != old_offset) {
1752 l2_table[j] = cpu_to_be64(offset);
1758 if (bdrv_pwrite(s->hd,
1759 l2_offset, l2_table, l2_size) != l2_size)
1764 refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
1766 refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
1768 if (refcount == 1) {
1769 l2_offset |= QCOW_OFLAG_COPIED;
1771 if (l2_offset != old_l2_offset) {
1772 l1_table[i] = l2_offset;
1778 for(i = 0; i < l1_size; i++)
1779 cpu_to_be64s(&l1_table[i]);
1780 if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
1781 l1_size2) != l1_size2)
1783 for(i = 0; i < l1_size; i++)
1784 be64_to_cpus(&l1_table[i]);
1787 qemu_free(l1_table);
1788 qemu_free(l2_table);
1792 qemu_free(l1_table);
1793 qemu_free(l2_table);
1797 static void qcow_free_snapshots(BlockDriverState *bs)
1799 BDRVQcowState *s = bs->opaque;
1802 for(i = 0; i < s->nb_snapshots; i++) {
1803 qemu_free(s->snapshots[i].name);
1804 qemu_free(s->snapshots[i].id_str);
1806 qemu_free(s->snapshots);
1807 s->snapshots = NULL;
1808 s->nb_snapshots = 0;
1811 static int qcow_read_snapshots(BlockDriverState *bs)
1813 BDRVQcowState *s = bs->opaque;
1814 QCowSnapshotHeader h;
1816 int i, id_str_size, name_size;
1818 uint32_t extra_data_size;
1820 if (!s->nb_snapshots) {
1821 s->snapshots = NULL;
1822 s->snapshots_size = 0;
1826 offset = s->snapshots_offset;
1827 s->snapshots = qemu_mallocz(s->nb_snapshots * sizeof(QCowSnapshot));
1830 for(i = 0; i < s->nb_snapshots; i++) {
1831 offset = align_offset(offset, 8);
1832 if (bdrv_pread(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1834 offset += sizeof(h);
1835 sn = s->snapshots + i;
1836 sn->l1_table_offset = be64_to_cpu(h.l1_table_offset);
1837 sn->l1_size = be32_to_cpu(h.l1_size);
1838 sn->vm_state_size = be32_to_cpu(h.vm_state_size);
1839 sn->date_sec = be32_to_cpu(h.date_sec);
1840 sn->date_nsec = be32_to_cpu(h.date_nsec);
1841 sn->vm_clock_nsec = be64_to_cpu(h.vm_clock_nsec);
1842 extra_data_size = be32_to_cpu(h.extra_data_size);
1844 id_str_size = be16_to_cpu(h.id_str_size);
1845 name_size = be16_to_cpu(h.name_size);
1847 offset += extra_data_size;
1849 sn->id_str = qemu_malloc(id_str_size + 1);
1852 if (bdrv_pread(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1854 offset += id_str_size;
1855 sn->id_str[id_str_size] = '\0';
1857 sn->name = qemu_malloc(name_size + 1);
1860 if (bdrv_pread(s->hd, offset, sn->name, name_size) != name_size)
1862 offset += name_size;
1863 sn->name[name_size] = '\0';
1865 s->snapshots_size = offset - s->snapshots_offset;
1868 qcow_free_snapshots(bs);
1872 /* add at the end of the file a new list of snapshots */
1873 static int qcow_write_snapshots(BlockDriverState *bs)
1875 BDRVQcowState *s = bs->opaque;
1877 QCowSnapshotHeader h;
1878 int i, name_size, id_str_size, snapshots_size;
1881 int64_t offset, snapshots_offset;
1883 /* compute the size of the snapshots */
1885 for(i = 0; i < s->nb_snapshots; i++) {
1886 sn = s->snapshots + i;
1887 offset = align_offset(offset, 8);
1888 offset += sizeof(h);
1889 offset += strlen(sn->id_str);
1890 offset += strlen(sn->name);
1892 snapshots_size = offset;
1894 snapshots_offset = alloc_clusters(bs, snapshots_size);
1895 offset = snapshots_offset;
1897 for(i = 0; i < s->nb_snapshots; i++) {
1898 sn = s->snapshots + i;
1899 memset(&h, 0, sizeof(h));
1900 h.l1_table_offset = cpu_to_be64(sn->l1_table_offset);
1901 h.l1_size = cpu_to_be32(sn->l1_size);
1902 h.vm_state_size = cpu_to_be32(sn->vm_state_size);
1903 h.date_sec = cpu_to_be32(sn->date_sec);
1904 h.date_nsec = cpu_to_be32(sn->date_nsec);
1905 h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec);
1907 id_str_size = strlen(sn->id_str);
1908 name_size = strlen(sn->name);
1909 h.id_str_size = cpu_to_be16(id_str_size);
1910 h.name_size = cpu_to_be16(name_size);
1911 offset = align_offset(offset, 8);
1912 if (bdrv_pwrite(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1914 offset += sizeof(h);
1915 if (bdrv_pwrite(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1917 offset += id_str_size;
1918 if (bdrv_pwrite(s->hd, offset, sn->name, name_size) != name_size)
1920 offset += name_size;
1923 /* update the various header fields */
1924 data64 = cpu_to_be64(snapshots_offset);
1925 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, snapshots_offset),
1926 &data64, sizeof(data64)) != sizeof(data64))
1928 data32 = cpu_to_be32(s->nb_snapshots);
1929 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, nb_snapshots),
1930 &data32, sizeof(data32)) != sizeof(data32))
1933 /* free the old snapshot table */
1934 free_clusters(bs, s->snapshots_offset, s->snapshots_size);
1935 s->snapshots_offset = snapshots_offset;
1936 s->snapshots_size = snapshots_size;
1942 static void find_new_snapshot_id(BlockDriverState *bs,
1943 char *id_str, int id_str_size)
1945 BDRVQcowState *s = bs->opaque;
1947 int i, id, id_max = 0;
1949 for(i = 0; i < s->nb_snapshots; i++) {
1950 sn = s->snapshots + i;
1951 id = strtoul(sn->id_str, NULL, 10);
1955 snprintf(id_str, id_str_size, "%d", id_max + 1);
1958 static int find_snapshot_by_id(BlockDriverState *bs, const char *id_str)
1960 BDRVQcowState *s = bs->opaque;
1963 for(i = 0; i < s->nb_snapshots; i++) {
1964 if (!strcmp(s->snapshots[i].id_str, id_str))
1970 static int find_snapshot_by_id_or_name(BlockDriverState *bs, const char *name)
1972 BDRVQcowState *s = bs->opaque;
1975 ret = find_snapshot_by_id(bs, name);
1978 for(i = 0; i < s->nb_snapshots; i++) {
1979 if (!strcmp(s->snapshots[i].name, name))
1985 /* if no id is provided, a new one is constructed */
1986 static int qcow_snapshot_create(BlockDriverState *bs,
1987 QEMUSnapshotInfo *sn_info)
1989 BDRVQcowState *s = bs->opaque;
1990 QCowSnapshot *snapshots1, sn1, *sn = &sn1;
1992 uint64_t *l1_table = NULL;
1994 memset(sn, 0, sizeof(*sn));
1996 if (sn_info->id_str[0] == '\0') {
1997 /* compute a new id */
1998 find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str));
2001 /* check that the ID is unique */
2002 if (find_snapshot_by_id(bs, sn_info->id_str) >= 0)
2005 sn->id_str = qemu_strdup(sn_info->id_str);
2008 sn->name = qemu_strdup(sn_info->name);
2011 sn->vm_state_size = sn_info->vm_state_size;
2012 sn->date_sec = sn_info->date_sec;
2013 sn->date_nsec = sn_info->date_nsec;
2014 sn->vm_clock_nsec = sn_info->vm_clock_nsec;
2016 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1);
2020 /* create the L1 table of the snapshot */
2021 sn->l1_table_offset = alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
2022 sn->l1_size = s->l1_size;
2024 l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
2027 for(i = 0; i < s->l1_size; i++) {
2028 l1_table[i] = cpu_to_be64(s->l1_table[i]);
2030 if (bdrv_pwrite(s->hd, sn->l1_table_offset,
2031 l1_table, s->l1_size * sizeof(uint64_t)) !=
2032 (s->l1_size * sizeof(uint64_t)))
2034 qemu_free(l1_table);
2037 snapshots1 = qemu_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot));
2041 memcpy(snapshots1, s->snapshots, s->nb_snapshots * sizeof(QCowSnapshot));
2042 qemu_free(s->snapshots);
2044 s->snapshots = snapshots1;
2045 s->snapshots[s->nb_snapshots++] = *sn;
2047 if (qcow_write_snapshots(bs) < 0)
2050 check_refcounts(bs);
2054 qemu_free(sn->name);
2055 qemu_free(l1_table);
2059 /* copy the snapshot 'snapshot_name' into the current disk image */
2060 static int qcow_snapshot_goto(BlockDriverState *bs,
2061 const char *snapshot_id)
2063 BDRVQcowState *s = bs->opaque;
2065 int i, snapshot_index, l1_size2;
2067 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2068 if (snapshot_index < 0)
2070 sn = &s->snapshots[snapshot_index];
2072 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, -1) < 0)
2075 if (grow_l1_table(bs, sn->l1_size) < 0)
2078 s->l1_size = sn->l1_size;
2079 l1_size2 = s->l1_size * sizeof(uint64_t);
2080 /* copy the snapshot l1 table to the current l1 table */
2081 if (bdrv_pread(s->hd, sn->l1_table_offset,
2082 s->l1_table, l1_size2) != l1_size2)
2084 if (bdrv_pwrite(s->hd, s->l1_table_offset,
2085 s->l1_table, l1_size2) != l1_size2)
2087 for(i = 0;i < s->l1_size; i++) {
2088 be64_to_cpus(&s->l1_table[i]);
2091 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1) < 0)
2095 check_refcounts(bs);
2102 static int qcow_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2104 BDRVQcowState *s = bs->opaque;
2106 int snapshot_index, ret;
2108 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2109 if (snapshot_index < 0)
2111 sn = &s->snapshots[snapshot_index];
2113 ret = update_snapshot_refcount(bs, sn->l1_table_offset, sn->l1_size, -1);
2116 /* must update the copied flag on the current cluster offsets */
2117 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 0);
2120 free_clusters(bs, sn->l1_table_offset, sn->l1_size * sizeof(uint64_t));
2122 qemu_free(sn->id_str);
2123 qemu_free(sn->name);
2124 memmove(sn, sn + 1, (s->nb_snapshots - snapshot_index - 1) * sizeof(*sn));
2126 ret = qcow_write_snapshots(bs);
2128 /* XXX: restore snapshot if error ? */
2132 check_refcounts(bs);
2137 static int qcow_snapshot_list(BlockDriverState *bs,
2138 QEMUSnapshotInfo **psn_tab)
2140 BDRVQcowState *s = bs->opaque;
2141 QEMUSnapshotInfo *sn_tab, *sn_info;
2145 sn_tab = qemu_mallocz(s->nb_snapshots * sizeof(QEMUSnapshotInfo));
2148 for(i = 0; i < s->nb_snapshots; i++) {
2149 sn_info = sn_tab + i;
2150 sn = s->snapshots + i;
2151 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str),
2153 pstrcpy(sn_info->name, sizeof(sn_info->name),
2155 sn_info->vm_state_size = sn->vm_state_size;
2156 sn_info->date_sec = sn->date_sec;
2157 sn_info->date_nsec = sn->date_nsec;
2158 sn_info->vm_clock_nsec = sn->vm_clock_nsec;
2161 return s->nb_snapshots;
2168 /*********************************************************/
2169 /* refcount handling */
2171 static int refcount_init(BlockDriverState *bs)
2173 BDRVQcowState *s = bs->opaque;
2174 int ret, refcount_table_size2, i;
2176 s->refcount_block_cache = qemu_malloc(s->cluster_size);
2177 if (!s->refcount_block_cache)
2179 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
2180 s->refcount_table = qemu_malloc(refcount_table_size2);
2181 if (!s->refcount_table)
2183 if (s->refcount_table_size > 0) {
2184 ret = bdrv_pread(s->hd, s->refcount_table_offset,
2185 s->refcount_table, refcount_table_size2);
2186 if (ret != refcount_table_size2)
2188 for(i = 0; i < s->refcount_table_size; i++)
2189 be64_to_cpus(&s->refcount_table[i]);
2196 static void refcount_close(BlockDriverState *bs)
2198 BDRVQcowState *s = bs->opaque;
2199 qemu_free(s->refcount_block_cache);
2200 qemu_free(s->refcount_table);
2204 static int load_refcount_block(BlockDriverState *bs,
2205 int64_t refcount_block_offset)
2207 BDRVQcowState *s = bs->opaque;
2209 ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
2211 if (ret != s->cluster_size)
2213 s->refcount_block_cache_offset = refcount_block_offset;
2217 static void scan_refcount(BlockDriverState *bs, int64_t *high)
2219 BDRVQcowState *s = bs->opaque;
2220 int64_t refcnt_index, cluster_index, cluster_end, h = 0;
2222 for (refcnt_index=0; refcnt_index < s->refcount_table_size; refcnt_index++){
2223 if (s->refcount_table[refcnt_index] == 0) {
2226 cluster_index = refcnt_index << (s->cluster_bits - REFCOUNT_SHIFT);
2227 cluster_end = (refcnt_index + 1) << (s->cluster_bits - REFCOUNT_SHIFT);
2228 for ( ; cluster_index < cluster_end; cluster_index++) {
2229 if (get_refcount(bs, cluster_index) == 0)
2230 /* do nothing -- reserved for free counting */;
2240 static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
2242 BDRVQcowState *s = bs->opaque;
2243 int refcount_table_index, block_index;
2244 int64_t refcount_block_offset;
2246 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2247 if (refcount_table_index >= s->refcount_table_size)
2249 refcount_block_offset = s->refcount_table[refcount_table_index];
2250 if (!refcount_block_offset)
2252 if (refcount_block_offset != s->refcount_block_cache_offset) {
2253 /* better than nothing: return allocated if read error */
2254 if (load_refcount_block(bs, refcount_block_offset) < 0)
2257 block_index = cluster_index &
2258 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2259 return be16_to_cpu(s->refcount_block_cache[block_index]);
2262 /* return < 0 if error */
2263 static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
2265 BDRVQcowState *s = bs->opaque;
2268 nb_clusters = size_to_clusters(s, size);
2270 for(i = 0; i < nb_clusters; i++) {
2271 int64_t i = s->free_cluster_index++;
2272 if (get_refcount(bs, i) != 0)
2276 printf("alloc_clusters: size=%lld -> %lld\n",
2278 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
2281 if (s->highest_alloc < s->free_cluster_index)
2282 s->highest_alloc = s->free_cluster_index;
2284 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
2287 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size)
2291 offset = alloc_clusters_noref(bs, size);
2292 update_refcount(bs, offset, size, 1);
2296 /* only used to allocate compressed sectors. We try to allocate
2297 contiguous sectors. size must be <= cluster_size */
2298 static int64_t alloc_bytes(BlockDriverState *bs, int size)
2300 BDRVQcowState *s = bs->opaque;
2301 int64_t offset, cluster_offset;
2302 int free_in_cluster;
2304 assert(size > 0 && size <= s->cluster_size);
2305 if (s->free_byte_offset == 0) {
2306 s->free_byte_offset = alloc_clusters(bs, s->cluster_size);
2309 free_in_cluster = s->cluster_size -
2310 (s->free_byte_offset & (s->cluster_size - 1));
2311 if (size <= free_in_cluster) {
2312 /* enough space in current cluster */
2313 offset = s->free_byte_offset;
2314 s->free_byte_offset += size;
2315 free_in_cluster -= size;
2316 if (free_in_cluster == 0)
2317 s->free_byte_offset = 0;
2318 if ((offset & (s->cluster_size - 1)) != 0)
2319 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2321 offset = alloc_clusters(bs, s->cluster_size);
2322 cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
2323 if ((cluster_offset + s->cluster_size) == offset) {
2324 /* we are lucky: contiguous data */
2325 offset = s->free_byte_offset;
2326 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2327 s->free_byte_offset += size;
2329 s->free_byte_offset = offset;
2336 static void free_clusters(BlockDriverState *bs,
2337 int64_t offset, int64_t size)
2339 update_refcount(bs, offset, size, -1);
2342 static int grow_refcount_table(BlockDriverState *bs, int min_size)
2344 BDRVQcowState *s = bs->opaque;
2345 int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
2346 uint64_t *new_table;
2347 int64_t table_offset;
2350 int64_t old_table_offset;
2352 if (min_size <= s->refcount_table_size)
2354 /* compute new table size */
2355 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2357 if (refcount_table_clusters == 0) {
2358 refcount_table_clusters = 1;
2360 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
2362 new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
2363 if (min_size <= new_table_size)
2367 printf("grow_refcount_table from %d to %d\n",
2368 s->refcount_table_size,
2371 new_table_size2 = new_table_size * sizeof(uint64_t);
2372 new_table = qemu_mallocz(new_table_size2);
2375 memcpy(new_table, s->refcount_table,
2376 s->refcount_table_size * sizeof(uint64_t));
2377 for(i = 0; i < s->refcount_table_size; i++)
2378 cpu_to_be64s(&new_table[i]);
2379 /* Note: we cannot update the refcount now to avoid recursion */
2380 table_offset = alloc_clusters_noref(bs, new_table_size2);
2381 ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
2382 if (ret != new_table_size2)
2384 for(i = 0; i < s->refcount_table_size; i++)
2385 be64_to_cpus(&new_table[i]);
2387 cpu_to_be64w((uint64_t*)data, table_offset);
2388 cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
2389 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
2390 data, sizeof(data)) != sizeof(data))
2392 qemu_free(s->refcount_table);
2393 old_table_offset = s->refcount_table_offset;
2394 old_table_size = s->refcount_table_size;
2395 s->refcount_table = new_table;
2396 s->refcount_table_size = new_table_size;
2397 s->refcount_table_offset = table_offset;
2399 update_refcount(bs, table_offset, new_table_size2, 1);
2400 free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
2403 free_clusters(bs, table_offset, new_table_size2);
2404 qemu_free(new_table);
2408 /* addend must be 1 or -1 */
2409 /* XXX: cache several refcount block clusters ? */
2410 static int update_cluster_refcount(BlockDriverState *bs,
2411 int64_t cluster_index,
2414 BDRVQcowState *s = bs->opaque;
2415 int64_t offset, refcount_block_offset;
2416 int ret, refcount_table_index, block_index, refcount;
2419 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2420 if (refcount_table_index >= s->refcount_table_size) {
2423 ret = grow_refcount_table(bs, refcount_table_index + 1);
2427 refcount_block_offset = s->refcount_table[refcount_table_index];
2428 if (!refcount_block_offset) {
2431 /* create a new refcount block */
2432 /* Note: we cannot update the refcount now to avoid recursion */
2433 offset = alloc_clusters_noref(bs, s->cluster_size);
2434 memset(s->refcount_block_cache, 0, s->cluster_size);
2435 ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
2436 if (ret != s->cluster_size)
2438 s->refcount_table[refcount_table_index] = offset;
2439 data64 = cpu_to_be64(offset);
2440 ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
2441 refcount_table_index * sizeof(uint64_t),
2442 &data64, sizeof(data64));
2443 if (ret != sizeof(data64))
2446 refcount_block_offset = offset;
2447 s->refcount_block_cache_offset = offset;
2448 update_refcount(bs, offset, s->cluster_size, 1);
2450 if (refcount_block_offset != s->refcount_block_cache_offset) {
2451 if (load_refcount_block(bs, refcount_block_offset) < 0)
2455 /* we can update the count and save it */
2456 block_index = cluster_index &
2457 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2458 refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
2460 if (refcount < 0 || refcount > 0xffff)
2462 if (refcount == 0 && cluster_index < s->free_cluster_index) {
2463 s->free_cluster_index = cluster_index;
2465 s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
2466 if (bdrv_pwrite(s->hd,
2467 refcount_block_offset + (block_index << REFCOUNT_SHIFT),
2468 &s->refcount_block_cache[block_index], 2) != 2)
2473 static void update_refcount(BlockDriverState *bs,
2474 int64_t offset, int64_t length,
2477 BDRVQcowState *s = bs->opaque;
2478 int64_t start, last, cluster_offset;
2481 printf("update_refcount: offset=%lld size=%lld addend=%d\n",
2482 offset, length, addend);
2486 start = offset & ~(s->cluster_size - 1);
2487 last = (offset + length - 1) & ~(s->cluster_size - 1);
2488 for(cluster_offset = start; cluster_offset <= last;
2489 cluster_offset += s->cluster_size) {
2490 update_cluster_refcount(bs, cluster_offset >> s->cluster_bits, addend);
2495 static void inc_refcounts(BlockDriverState *bs,
2496 uint16_t *refcount_table,
2497 int refcount_table_size,
2498 int64_t offset, int64_t size)
2500 BDRVQcowState *s = bs->opaque;
2501 int64_t start, last, cluster_offset;
2507 start = offset & ~(s->cluster_size - 1);
2508 last = (offset + size - 1) & ~(s->cluster_size - 1);
2509 for(cluster_offset = start; cluster_offset <= last;
2510 cluster_offset += s->cluster_size) {
2511 k = cluster_offset >> s->cluster_bits;
2512 if (k < 0 || k >= refcount_table_size) {
2513 printf("ERROR: invalid cluster offset=0x%llx\n", cluster_offset);
2515 if (++refcount_table[k] == 0) {
2516 printf("ERROR: overflow cluster offset=0x%llx\n", cluster_offset);
2522 static int check_refcounts_l1(BlockDriverState *bs,
2523 uint16_t *refcount_table,
2524 int refcount_table_size,
2525 int64_t l1_table_offset, int l1_size,
2528 BDRVQcowState *s = bs->opaque;
2529 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2;
2530 int l2_size, i, j, nb_csectors, refcount;
2533 l1_size2 = l1_size * sizeof(uint64_t);
2535 inc_refcounts(bs, refcount_table, refcount_table_size,
2536 l1_table_offset, l1_size2);
2538 l1_table = qemu_malloc(l1_size2);
2541 if (bdrv_pread(s->hd, l1_table_offset,
2542 l1_table, l1_size2) != l1_size2)
2544 for(i = 0;i < l1_size; i++)
2545 be64_to_cpus(&l1_table[i]);
2547 l2_size = s->l2_size * sizeof(uint64_t);
2548 l2_table = qemu_malloc(l2_size);
2551 for(i = 0; i < l1_size; i++) {
2552 l2_offset = l1_table[i];
2555 refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2556 if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
2557 printf("ERROR OFLAG_COPIED: l2_offset=%llx refcount=%d\n",
2558 l2_offset, refcount);
2561 l2_offset &= ~QCOW_OFLAG_COPIED;
2562 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
2564 for(j = 0; j < s->l2_size; j++) {
2565 offset = be64_to_cpu(l2_table[j]);
2567 if (offset & QCOW_OFLAG_COMPRESSED) {
2568 if (offset & QCOW_OFLAG_COPIED) {
2569 printf("ERROR: cluster %lld: copied flag must never be set for compressed clusters\n",
2570 offset >> s->cluster_bits);
2571 offset &= ~QCOW_OFLAG_COPIED;
2573 nb_csectors = ((offset >> s->csize_shift) &
2575 offset &= s->cluster_offset_mask;
2576 inc_refcounts(bs, refcount_table,
2577 refcount_table_size,
2578 offset & ~511, nb_csectors * 512);
2581 refcount = get_refcount(bs, (offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2582 if ((refcount == 1) != ((offset & QCOW_OFLAG_COPIED) != 0)) {
2583 printf("ERROR OFLAG_COPIED: offset=%llx refcount=%d\n",
2587 offset &= ~QCOW_OFLAG_COPIED;
2588 inc_refcounts(bs, refcount_table,
2589 refcount_table_size,
2590 offset, s->cluster_size);
2594 inc_refcounts(bs, refcount_table,
2595 refcount_table_size,
2600 qemu_free(l1_table);
2601 qemu_free(l2_table);
2604 printf("ERROR: I/O error in check_refcounts_l1\n");
2605 qemu_free(l1_table);
2606 qemu_free(l2_table);
2610 static void check_refcounts(BlockDriverState *bs)
2612 BDRVQcowState *s = bs->opaque;
2614 int nb_clusters, refcount1, refcount2, i;
2616 uint16_t *refcount_table;
2618 size = bdrv_getlength(s->hd);
2619 nb_clusters = size_to_clusters(s, size);
2620 refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
2623 inc_refcounts(bs, refcount_table, nb_clusters,
2624 0, s->cluster_size);
2626 check_refcounts_l1(bs, refcount_table, nb_clusters,
2627 s->l1_table_offset, s->l1_size, 1);
2630 for(i = 0; i < s->nb_snapshots; i++) {
2631 sn = s->snapshots + i;
2632 check_refcounts_l1(bs, refcount_table, nb_clusters,
2633 sn->l1_table_offset, sn->l1_size, 0);
2635 inc_refcounts(bs, refcount_table, nb_clusters,
2636 s->snapshots_offset, s->snapshots_size);
2639 inc_refcounts(bs, refcount_table, nb_clusters,
2640 s->refcount_table_offset,
2641 s->refcount_table_size * sizeof(uint64_t));
2642 for(i = 0; i < s->refcount_table_size; i++) {
2644 offset = s->refcount_table[i];
2646 inc_refcounts(bs, refcount_table, nb_clusters,
2647 offset, s->cluster_size);
2651 /* compare ref counts */
2652 for(i = 0; i < nb_clusters; i++) {
2653 refcount1 = get_refcount(bs, i);
2654 refcount2 = refcount_table[i];
2655 if (refcount1 != refcount2)
2656 printf("ERROR cluster %d refcount=%d reference=%d\n",
2657 i, refcount1, refcount2);
2660 qemu_free(refcount_table);
2664 static void dump_refcounts(BlockDriverState *bs)
2666 BDRVQcowState *s = bs->opaque;
2667 int64_t nb_clusters, k, k1, size;
2670 size = bdrv_getlength(s->hd);
2671 nb_clusters = size_to_clusters(s, size);
2672 for(k = 0; k < nb_clusters;) {
2674 refcount = get_refcount(bs, k);
2676 while (k < nb_clusters && get_refcount(bs, k) == refcount)
2678 printf("%lld: refcount=%d nb=%lld\n", k, refcount, k - k1);
2684 BlockDriver bdrv_qcow2 = {
2686 sizeof(BDRVQcowState),
2698 .bdrv_aio_read = qcow_aio_read,
2699 .bdrv_aio_write = qcow_aio_write,
2700 .bdrv_aio_cancel = qcow_aio_cancel,
2701 .aiocb_size = sizeof(QCowAIOCB),
2702 .bdrv_write_compressed = qcow_write_compressed,
2704 .bdrv_snapshot_create = qcow_snapshot_create,
2705 .bdrv_snapshot_goto = qcow_snapshot_goto,
2706 .bdrv_snapshot_delete = qcow_snapshot_delete,
2707 .bdrv_snapshot_list = qcow_snapshot_list,
2708 .bdrv_get_info = qcow_get_info,