2 * Block driver for the QCOW version 2 format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu-common.h"
25 #include "block_int.h"
31 Differences with QCOW:
33 - Support for multiple incremental snapshots.
34 - Memory management by reference counts.
35 - Clusters which have a reference count of one have the bit
36 QCOW_OFLAG_COPIED to optimize write performance.
37 - Size of compressed clusters is stored in sectors to reduce bit usage
38 in the cluster offsets.
39 - Support for storing additional data (such as the VM state) in the
41 - If a backing store is used, the cluster size is not constrained
42 (could be backported to QCOW).
43 - L2 tables have always a size of one cluster.
47 //#define DEBUG_ALLOC2
50 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
51 #define QCOW_VERSION 2
53 #define QCOW_CRYPT_NONE 0
54 #define QCOW_CRYPT_AES 1
56 #define QCOW_MAX_CRYPT_CLUSTERS 32
58 /* indicate that the refcount of the referenced cluster is exactly one. */
59 #define QCOW_OFLAG_COPIED (1LL << 63)
60 /* indicate that the cluster is compressed (they never have the copied flag) */
61 #define QCOW_OFLAG_COMPRESSED (1LL << 62)
63 #define REFCOUNT_SHIFT 1 /* refcount size is 2 bytes */
65 typedef struct QCowHeader {
68 uint64_t backing_file_offset;
69 uint32_t backing_file_size;
70 uint32_t cluster_bits;
71 uint64_t size; /* in bytes */
72 uint32_t crypt_method;
73 uint32_t l1_size; /* XXX: save number of clusters instead ? */
74 uint64_t l1_table_offset;
75 uint64_t refcount_table_offset;
76 uint32_t refcount_table_clusters;
77 uint32_t nb_snapshots;
78 uint64_t snapshots_offset;
86 #define QCOW_EXT_MAGIC_END 0
87 #define QCOW_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA
90 typedef struct __attribute__((packed)) QCowSnapshotHeader {
91 /* header is 8 byte aligned */
92 uint64_t l1_table_offset;
101 uint64_t vm_clock_nsec;
103 uint32_t vm_state_size;
104 uint32_t extra_data_size; /* for extension */
105 /* extra data follows */
108 } QCowSnapshotHeader;
110 #define L2_CACHE_SIZE 16
112 typedef struct QCowSnapshot {
113 uint64_t l1_table_offset;
117 uint32_t vm_state_size;
120 uint64_t vm_clock_nsec;
123 typedef struct BDRVQcowState {
124 BlockDriverState *hd;
131 int l1_vm_state_index;
134 uint64_t cluster_offset_mask;
135 uint64_t l1_table_offset;
138 uint64_t l2_cache_offsets[L2_CACHE_SIZE];
139 uint32_t l2_cache_counts[L2_CACHE_SIZE];
140 uint8_t *cluster_cache;
141 uint8_t *cluster_data;
142 uint64_t cluster_cache_offset;
144 uint64_t *refcount_table;
145 uint64_t refcount_table_offset;
146 uint32_t refcount_table_size;
147 uint64_t refcount_block_cache_offset;
148 uint16_t *refcount_block_cache;
149 int64_t free_cluster_index;
150 int64_t free_byte_offset;
152 uint32_t crypt_method; /* current crypt method, 0 if no key yet */
153 uint32_t crypt_method_header;
154 AES_KEY aes_encrypt_key;
155 AES_KEY aes_decrypt_key;
156 uint64_t snapshots_offset;
159 QCowSnapshot *snapshots;
162 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
163 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
164 uint8_t *buf, int nb_sectors);
165 static int qcow_read_snapshots(BlockDriverState *bs);
166 static void qcow_free_snapshots(BlockDriverState *bs);
167 static int refcount_init(BlockDriverState *bs);
168 static void refcount_close(BlockDriverState *bs);
169 static int get_refcount(BlockDriverState *bs, int64_t cluster_index);
170 static int update_cluster_refcount(BlockDriverState *bs,
171 int64_t cluster_index,
173 static void update_refcount(BlockDriverState *bs,
174 int64_t offset, int64_t length,
176 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size);
177 static int64_t alloc_bytes(BlockDriverState *bs, int size);
178 static void free_clusters(BlockDriverState *bs,
179 int64_t offset, int64_t size);
181 static void check_refcounts(BlockDriverState *bs);
184 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
186 const QCowHeader *cow_header = (const void *)buf;
188 if (buf_size >= sizeof(QCowHeader) &&
189 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
190 be32_to_cpu(cow_header->version) == QCOW_VERSION)
198 * read qcow2 extension and fill bs
199 * start reading from start_offset
200 * finish reading upon magic of value 0 or when end_offset reached
201 * unknown magic is skipped (future extension this version knows nothing about)
202 * return 0 upon success, non-0 otherwise
204 static int qcow_read_extensions(BlockDriverState *bs, uint64_t start_offset,
207 BDRVQcowState *s = bs->opaque;
212 printf("qcow_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
214 offset = start_offset;
215 while (offset < end_offset) {
219 if (offset > s->cluster_size)
220 printf("qcow_handle_extension: suspicious offset %lu\n", offset);
222 printf("attemting to read extended header in offset %lu\n", offset);
225 if (bdrv_pread(s->hd, offset, &ext, sizeof(ext)) != sizeof(ext)) {
226 fprintf(stderr, "qcow_handle_extension: ERROR: pread fail from offset %llu\n",
227 (unsigned long long)offset);
230 be32_to_cpus(&ext.magic);
231 be32_to_cpus(&ext.len);
232 offset += sizeof(ext);
234 printf("ext.magic = 0x%x\n", ext.magic);
237 case QCOW_EXT_MAGIC_END:
240 case QCOW_EXT_MAGIC_BACKING_FORMAT:
241 if (ext.len >= sizeof(bs->backing_format)) {
242 fprintf(stderr, "ERROR: ext_backing_format: len=%u too large"
244 ext.len, sizeof(bs->backing_format));
247 if (bdrv_pread(s->hd, offset , bs->backing_format,
250 bs->backing_format[ext.len] = '\0';
252 printf("Qcow2: Got format extension %s\n", bs->backing_format);
254 offset += ((ext.len + 7) & ~7);
258 /* unknown magic -- just skip it */
259 offset += ((ext.len + 7) & ~7);
268 static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
270 BDRVQcowState *s = bs->opaque;
271 int len, i, shift, ret;
275 /* Performance is terrible right now with cache=writethrough due mainly
276 * to reference count updates. If the user does not explicitly specify
277 * a caching type, force to writeback caching.
279 if ((flags & BDRV_O_CACHE_DEF)) {
280 flags |= BDRV_O_CACHE_WB;
281 flags &= ~BDRV_O_CACHE_DEF;
283 ret = bdrv_file_open(&s->hd, filename, flags);
286 if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header))
288 be32_to_cpus(&header.magic);
289 be32_to_cpus(&header.version);
290 be64_to_cpus(&header.backing_file_offset);
291 be32_to_cpus(&header.backing_file_size);
292 be64_to_cpus(&header.size);
293 be32_to_cpus(&header.cluster_bits);
294 be32_to_cpus(&header.crypt_method);
295 be64_to_cpus(&header.l1_table_offset);
296 be32_to_cpus(&header.l1_size);
297 be64_to_cpus(&header.refcount_table_offset);
298 be32_to_cpus(&header.refcount_table_clusters);
299 be64_to_cpus(&header.snapshots_offset);
300 be32_to_cpus(&header.nb_snapshots);
302 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
304 if (header.size <= 1 ||
305 header.cluster_bits < 9 ||
306 header.cluster_bits > 16)
308 if (header.crypt_method > QCOW_CRYPT_AES)
310 s->crypt_method_header = header.crypt_method;
311 if (s->crypt_method_header)
313 s->cluster_bits = header.cluster_bits;
314 s->cluster_size = 1 << s->cluster_bits;
315 s->cluster_sectors = 1 << (s->cluster_bits - 9);
316 s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
317 s->l2_size = 1 << s->l2_bits;
318 bs->total_sectors = header.size / 512;
319 s->csize_shift = (62 - (s->cluster_bits - 8));
320 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
321 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
322 s->refcount_table_offset = header.refcount_table_offset;
323 s->refcount_table_size =
324 header.refcount_table_clusters << (s->cluster_bits - 3);
326 s->snapshots_offset = header.snapshots_offset;
327 s->nb_snapshots = header.nb_snapshots;
329 /* read the level 1 table */
330 s->l1_size = header.l1_size;
331 shift = s->cluster_bits + s->l2_bits;
332 s->l1_vm_state_index = (header.size + (1LL << shift) - 1) >> shift;
333 /* the L1 table must contain at least enough entries to put
335 if (s->l1_size < s->l1_vm_state_index)
337 s->l1_table_offset = header.l1_table_offset;
338 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
339 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
340 s->l1_size * sizeof(uint64_t))
342 for(i = 0;i < s->l1_size; i++) {
343 be64_to_cpus(&s->l1_table[i]);
346 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
347 s->cluster_cache = qemu_malloc(s->cluster_size);
348 /* one more sector for decompressed data alignment */
349 s->cluster_data = qemu_malloc(QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
351 s->cluster_cache_offset = -1;
353 if (refcount_init(bs) < 0)
356 /* read qcow2 extensions */
357 if (header.backing_file_offset)
358 ext_end = header.backing_file_offset;
360 ext_end = s->cluster_size;
361 if (qcow_read_extensions(bs, sizeof(header), ext_end))
364 /* read the backing file name */
365 if (header.backing_file_offset != 0) {
366 len = header.backing_file_size;
369 if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len)
371 bs->backing_file[len] = '\0';
373 if (qcow_read_snapshots(bs) < 0)
382 qcow_free_snapshots(bs);
384 qemu_free(s->l1_table);
385 qemu_free(s->l2_cache);
386 qemu_free(s->cluster_cache);
387 qemu_free(s->cluster_data);
392 static int qcow_set_key(BlockDriverState *bs, const char *key)
394 BDRVQcowState *s = bs->opaque;
398 memset(keybuf, 0, 16);
402 /* XXX: we could compress the chars to 7 bits to increase
404 for(i = 0;i < len;i++) {
407 s->crypt_method = s->crypt_method_header;
409 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
411 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
421 AES_encrypt(in, tmp, &s->aes_encrypt_key);
422 AES_decrypt(tmp, out, &s->aes_decrypt_key);
423 for(i = 0; i < 16; i++)
424 printf(" %02x", tmp[i]);
426 for(i = 0; i < 16; i++)
427 printf(" %02x", out[i]);
434 /* The crypt function is compatible with the linux cryptoloop
435 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
437 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
438 uint8_t *out_buf, const uint8_t *in_buf,
439 int nb_sectors, int enc,
448 for(i = 0; i < nb_sectors; i++) {
449 ivec.ll[0] = cpu_to_le64(sector_num);
451 AES_cbc_encrypt(in_buf, out_buf, 512, key,
459 static int copy_sectors(BlockDriverState *bs, uint64_t start_sect,
460 uint64_t cluster_offset, int n_start, int n_end)
462 BDRVQcowState *s = bs->opaque;
468 ret = qcow_read(bs, start_sect + n_start, s->cluster_data, n);
471 if (s->crypt_method) {
472 encrypt_sectors(s, start_sect + n_start,
474 s->cluster_data, n, 1,
475 &s->aes_encrypt_key);
477 ret = bdrv_write(s->hd, (cluster_offset >> 9) + n_start,
484 static void l2_cache_reset(BlockDriverState *bs)
486 BDRVQcowState *s = bs->opaque;
488 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
489 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
490 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
493 static inline int l2_cache_new_entry(BlockDriverState *bs)
495 BDRVQcowState *s = bs->opaque;
499 /* find a new entry in the least used one */
501 min_count = 0xffffffff;
502 for(i = 0; i < L2_CACHE_SIZE; i++) {
503 if (s->l2_cache_counts[i] < min_count) {
504 min_count = s->l2_cache_counts[i];
511 static int64_t align_offset(int64_t offset, int n)
513 offset = (offset + n - 1) & ~(n - 1);
517 static int grow_l1_table(BlockDriverState *bs, int min_size)
519 BDRVQcowState *s = bs->opaque;
520 int new_l1_size, new_l1_size2, ret, i;
521 uint64_t *new_l1_table;
522 uint64_t new_l1_table_offset;
525 new_l1_size = s->l1_size;
526 if (min_size <= new_l1_size)
528 while (min_size > new_l1_size) {
529 new_l1_size = (new_l1_size * 3 + 1) / 2;
532 printf("grow l1_table from %d to %d\n", s->l1_size, new_l1_size);
535 new_l1_size2 = sizeof(uint64_t) * new_l1_size;
536 new_l1_table = qemu_mallocz(new_l1_size2);
537 memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
539 /* write new table (align to cluster) */
540 new_l1_table_offset = alloc_clusters(bs, new_l1_size2);
542 for(i = 0; i < s->l1_size; i++)
543 new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
544 ret = bdrv_pwrite(s->hd, new_l1_table_offset, new_l1_table, new_l1_size2);
545 if (ret != new_l1_size2)
547 for(i = 0; i < s->l1_size; i++)
548 new_l1_table[i] = be64_to_cpu(new_l1_table[i]);
551 cpu_to_be32w((uint32_t*)data, new_l1_size);
552 cpu_to_be64w((uint64_t*)(data + 4), new_l1_table_offset);
553 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, l1_size), data,
554 sizeof(data)) != sizeof(data))
556 qemu_free(s->l1_table);
557 free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
558 s->l1_table_offset = new_l1_table_offset;
559 s->l1_table = new_l1_table;
560 s->l1_size = new_l1_size;
563 qemu_free(s->l1_table);
570 * seek l2_offset in the l2_cache table
571 * if not found, return NULL,
573 * increments the l2 cache hit count of the entry,
574 * if counter overflow, divide by two all counters
575 * return the pointer to the l2 cache entry
579 static uint64_t *seek_l2_table(BDRVQcowState *s, uint64_t l2_offset)
583 for(i = 0; i < L2_CACHE_SIZE; i++) {
584 if (l2_offset == s->l2_cache_offsets[i]) {
585 /* increment the hit count */
586 if (++s->l2_cache_counts[i] == 0xffffffff) {
587 for(j = 0; j < L2_CACHE_SIZE; j++) {
588 s->l2_cache_counts[j] >>= 1;
591 return s->l2_cache + (i << s->l2_bits);
600 * Loads a L2 table into memory. If the table is in the cache, the cache
601 * is used; otherwise the L2 table is loaded from the image file.
603 * Returns a pointer to the L2 table on success, or NULL if the read from
604 * the image file failed.
607 static uint64_t *l2_load(BlockDriverState *bs, uint64_t l2_offset)
609 BDRVQcowState *s = bs->opaque;
613 /* seek if the table for the given offset is in the cache */
615 l2_table = seek_l2_table(s, l2_offset);
616 if (l2_table != NULL)
619 /* not found: load a new entry in the least used one */
621 min_index = l2_cache_new_entry(bs);
622 l2_table = s->l2_cache + (min_index << s->l2_bits);
623 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
624 s->l2_size * sizeof(uint64_t))
626 s->l2_cache_offsets[min_index] = l2_offset;
627 s->l2_cache_counts[min_index] = 1;
635 * Allocate a new l2 entry in the file. If l1_index points to an already
636 * used entry in the L2 table (i.e. we are doing a copy on write for the L2
637 * table) copy the contents of the old L2 table into the newly allocated one.
638 * Otherwise the new table is initialized with zeros.
642 static uint64_t *l2_allocate(BlockDriverState *bs, int l1_index)
644 BDRVQcowState *s = bs->opaque;
646 uint64_t old_l2_offset, tmp;
647 uint64_t *l2_table, l2_offset;
649 old_l2_offset = s->l1_table[l1_index];
651 /* allocate a new l2 entry */
653 l2_offset = alloc_clusters(bs, s->l2_size * sizeof(uint64_t));
655 /* update the L1 entry */
657 s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED;
659 tmp = cpu_to_be64(l2_offset | QCOW_OFLAG_COPIED);
660 if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
661 &tmp, sizeof(tmp)) != sizeof(tmp))
664 /* allocate a new entry in the l2 cache */
666 min_index = l2_cache_new_entry(bs);
667 l2_table = s->l2_cache + (min_index << s->l2_bits);
669 if (old_l2_offset == 0) {
670 /* if there was no old l2 table, clear the new table */
671 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
673 /* if there was an old l2 table, read it from the disk */
674 if (bdrv_pread(s->hd, old_l2_offset,
675 l2_table, s->l2_size * sizeof(uint64_t)) !=
676 s->l2_size * sizeof(uint64_t))
679 /* write the l2 table to the file */
680 if (bdrv_pwrite(s->hd, l2_offset,
681 l2_table, s->l2_size * sizeof(uint64_t)) !=
682 s->l2_size * sizeof(uint64_t))
685 /* update the l2 cache entry */
687 s->l2_cache_offsets[min_index] = l2_offset;
688 s->l2_cache_counts[min_index] = 1;
693 static int size_to_clusters(BDRVQcowState *s, int64_t size)
695 return (size + (s->cluster_size - 1)) >> s->cluster_bits;
698 static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
699 uint64_t *l2_table, uint64_t start, uint64_t mask)
702 uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
707 for (i = start; i < start + nb_clusters; i++)
708 if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
714 static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
718 while(nb_clusters-- && l2_table[i] == 0)
727 * For a given offset of the disk image, return cluster offset in
730 * on entry, *num is the number of contiguous clusters we'd like to
731 * access following offset.
733 * on exit, *num is the number of contiguous clusters we can read.
735 * Return 1, if the offset is found
736 * Return 0, otherwise.
740 static uint64_t get_cluster_offset(BlockDriverState *bs,
741 uint64_t offset, int *num)
743 BDRVQcowState *s = bs->opaque;
744 int l1_index, l2_index;
745 uint64_t l2_offset, *l2_table, cluster_offset;
747 int index_in_cluster, nb_available, nb_needed, nb_clusters;
749 index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1);
750 nb_needed = *num + index_in_cluster;
752 l1_bits = s->l2_bits + s->cluster_bits;
754 /* compute how many bytes there are between the offset and
755 * the end of the l1 entry
758 nb_available = (1 << l1_bits) - (offset & ((1 << l1_bits) - 1));
760 /* compute the number of available sectors */
762 nb_available = (nb_available >> 9) + index_in_cluster;
766 /* seek the the l2 offset in the l1 table */
768 l1_index = offset >> l1_bits;
769 if (l1_index >= s->l1_size)
772 l2_offset = s->l1_table[l1_index];
774 /* seek the l2 table of the given l2 offset */
779 /* load the l2 table in memory */
781 l2_offset &= ~QCOW_OFLAG_COPIED;
782 l2_table = l2_load(bs, l2_offset);
783 if (l2_table == NULL)
786 /* find the cluster offset for the given disk offset */
788 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
789 cluster_offset = be64_to_cpu(l2_table[l2_index]);
790 nb_clusters = size_to_clusters(s, nb_needed << 9);
792 if (!cluster_offset) {
793 /* how many empty clusters ? */
794 c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]);
796 /* how many allocated clusters ? */
797 c = count_contiguous_clusters(nb_clusters, s->cluster_size,
798 &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
801 nb_available = (c * s->cluster_sectors);
803 if (nb_available > nb_needed)
804 nb_available = nb_needed;
806 *num = nb_available - index_in_cluster;
808 return cluster_offset & ~QCOW_OFLAG_COPIED;
814 * free clusters according to its type: compressed or not
818 static void free_any_clusters(BlockDriverState *bs,
819 uint64_t cluster_offset, int nb_clusters)
821 BDRVQcowState *s = bs->opaque;
823 /* free the cluster */
825 if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
827 nb_csectors = ((cluster_offset >> s->csize_shift) &
829 free_clusters(bs, (cluster_offset & s->cluster_offset_mask) & ~511,
834 free_clusters(bs, cluster_offset, nb_clusters << s->cluster_bits);
842 * for a given disk offset, load (and allocate if needed)
845 * the l2 table offset in the qcow2 file and the cluster index
846 * in the l2 table are given to the caller.
850 static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
851 uint64_t **new_l2_table,
852 uint64_t *new_l2_offset,
855 BDRVQcowState *s = bs->opaque;
856 int l1_index, l2_index, ret;
857 uint64_t l2_offset, *l2_table;
859 /* seek the the l2 offset in the l1 table */
861 l1_index = offset >> (s->l2_bits + s->cluster_bits);
862 if (l1_index >= s->l1_size) {
863 ret = grow_l1_table(bs, l1_index + 1);
867 l2_offset = s->l1_table[l1_index];
869 /* seek the l2 table of the given l2 offset */
871 if (l2_offset & QCOW_OFLAG_COPIED) {
872 /* load the l2 table in memory */
873 l2_offset &= ~QCOW_OFLAG_COPIED;
874 l2_table = l2_load(bs, l2_offset);
875 if (l2_table == NULL)
879 free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
880 l2_table = l2_allocate(bs, l1_index);
881 if (l2_table == NULL)
883 l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED;
886 /* find the cluster offset for the given disk offset */
888 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
890 *new_l2_table = l2_table;
891 *new_l2_offset = l2_offset;
892 *new_l2_index = l2_index;
898 * alloc_compressed_cluster_offset
900 * For a given offset of the disk image, return cluster offset in
903 * If the offset is not found, allocate a new compressed cluster.
905 * Return the cluster offset if successful,
906 * Return 0, otherwise.
910 static uint64_t alloc_compressed_cluster_offset(BlockDriverState *bs,
914 BDRVQcowState *s = bs->opaque;
916 uint64_t l2_offset, *l2_table, cluster_offset;
919 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
923 cluster_offset = be64_to_cpu(l2_table[l2_index]);
924 if (cluster_offset & QCOW_OFLAG_COPIED)
925 return cluster_offset & ~QCOW_OFLAG_COPIED;
928 free_any_clusters(bs, cluster_offset, 1);
930 cluster_offset = alloc_bytes(bs, compressed_size);
931 nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) -
932 (cluster_offset >> 9);
934 cluster_offset |= QCOW_OFLAG_COMPRESSED |
935 ((uint64_t)nb_csectors << s->csize_shift);
937 /* update L2 table */
939 /* compressed clusters never have the copied flag */
941 l2_table[l2_index] = cpu_to_be64(cluster_offset);
942 if (bdrv_pwrite(s->hd,
943 l2_offset + l2_index * sizeof(uint64_t),
945 sizeof(uint64_t)) != sizeof(uint64_t))
948 return cluster_offset;
951 typedef struct QCowL2Meta
959 static int alloc_cluster_link_l2(BlockDriverState *bs, uint64_t cluster_offset,
962 BDRVQcowState *s = bs->opaque;
963 int i, j = 0, l2_index, ret;
964 uint64_t *old_cluster, start_sect, l2_offset, *l2_table;
966 if (m->nb_clusters == 0)
969 old_cluster = qemu_malloc(m->nb_clusters * sizeof(uint64_t));
971 /* copy content of unmodified sectors */
972 start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9;
974 ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start);
979 if (m->nb_available & (s->cluster_sectors - 1)) {
980 uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
981 ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
982 m->nb_available - end, s->cluster_sectors);
988 /* update L2 table */
989 if (!get_cluster_table(bs, m->offset, &l2_table, &l2_offset, &l2_index))
992 for (i = 0; i < m->nb_clusters; i++) {
993 if(l2_table[l2_index + i] != 0)
994 old_cluster[j++] = l2_table[l2_index + i];
996 l2_table[l2_index + i] = cpu_to_be64((cluster_offset +
997 (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
1000 if (bdrv_pwrite(s->hd, l2_offset + l2_index * sizeof(uint64_t),
1001 l2_table + l2_index, m->nb_clusters * sizeof(uint64_t)) !=
1002 m->nb_clusters * sizeof(uint64_t))
1005 for (i = 0; i < j; i++)
1006 free_any_clusters(bs, old_cluster[i], 1);
1010 qemu_free(old_cluster);
1015 * alloc_cluster_offset
1017 * For a given offset of the disk image, return cluster offset in
1020 * If the offset is not found, allocate a new cluster.
1022 * Return the cluster offset if successful,
1023 * Return 0, otherwise.
1027 static uint64_t alloc_cluster_offset(BlockDriverState *bs,
1029 int n_start, int n_end,
1030 int *num, QCowL2Meta *m)
1032 BDRVQcowState *s = bs->opaque;
1034 uint64_t l2_offset, *l2_table, cluster_offset;
1035 int nb_clusters, i = 0;
1037 ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
1041 nb_clusters = size_to_clusters(s, n_end << 9);
1043 nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
1045 cluster_offset = be64_to_cpu(l2_table[l2_index]);
1047 /* We keep all QCOW_OFLAG_COPIED clusters */
1049 if (cluster_offset & QCOW_OFLAG_COPIED) {
1050 nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
1051 &l2_table[l2_index], 0, 0);
1053 cluster_offset &= ~QCOW_OFLAG_COPIED;
1059 /* for the moment, multiple compressed clusters are not managed */
1061 if (cluster_offset & QCOW_OFLAG_COMPRESSED)
1064 /* how many available clusters ? */
1066 while (i < nb_clusters) {
1067 i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
1068 &l2_table[l2_index], i, 0);
1070 if(be64_to_cpu(l2_table[l2_index + i]))
1073 i += count_contiguous_free_clusters(nb_clusters - i,
1074 &l2_table[l2_index + i]);
1076 cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
1078 if ((cluster_offset & QCOW_OFLAG_COPIED) ||
1079 (cluster_offset & QCOW_OFLAG_COMPRESSED))
1084 /* allocate a new cluster */
1086 cluster_offset = alloc_clusters(bs, nb_clusters * s->cluster_size);
1088 /* save info needed for meta data update */
1090 m->n_start = n_start;
1091 m->nb_clusters = nb_clusters;
1094 m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
1096 *num = m->nb_available - n_start;
1098 return cluster_offset;
1101 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
1102 int nb_sectors, int *pnum)
1104 uint64_t cluster_offset;
1107 cluster_offset = get_cluster_offset(bs, sector_num << 9, pnum);
1109 return (cluster_offset != 0);
1112 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
1113 const uint8_t *buf, int buf_size)
1115 z_stream strm1, *strm = &strm1;
1118 memset(strm, 0, sizeof(*strm));
1120 strm->next_in = (uint8_t *)buf;
1121 strm->avail_in = buf_size;
1122 strm->next_out = out_buf;
1123 strm->avail_out = out_buf_size;
1125 ret = inflateInit2(strm, -12);
1128 ret = inflate(strm, Z_FINISH);
1129 out_len = strm->next_out - out_buf;
1130 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
1131 out_len != out_buf_size) {
1139 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
1141 int ret, csize, nb_csectors, sector_offset;
1144 coffset = cluster_offset & s->cluster_offset_mask;
1145 if (s->cluster_cache_offset != coffset) {
1146 nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1;
1147 sector_offset = coffset & 511;
1148 csize = nb_csectors * 512 - sector_offset;
1149 ret = bdrv_read(s->hd, coffset >> 9, s->cluster_data, nb_csectors);
1153 if (decompress_buffer(s->cluster_cache, s->cluster_size,
1154 s->cluster_data + sector_offset, csize) < 0) {
1157 s->cluster_cache_offset = coffset;
1162 /* handle reading after the end of the backing file */
1163 static int backing_read1(BlockDriverState *bs,
1164 int64_t sector_num, uint8_t *buf, int nb_sectors)
1167 if ((sector_num + nb_sectors) <= bs->total_sectors)
1169 if (sector_num >= bs->total_sectors)
1172 n1 = bs->total_sectors - sector_num;
1173 memset(buf + n1 * 512, 0, 512 * (nb_sectors - n1));
1177 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
1178 uint8_t *buf, int nb_sectors)
1180 BDRVQcowState *s = bs->opaque;
1181 int ret, index_in_cluster, n, n1;
1182 uint64_t cluster_offset;
1184 while (nb_sectors > 0) {
1186 cluster_offset = get_cluster_offset(bs, sector_num << 9, &n);
1187 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1188 if (!cluster_offset) {
1189 if (bs->backing_hd) {
1190 /* read from the base image */
1191 n1 = backing_read1(bs->backing_hd, sector_num, buf, n);
1193 ret = bdrv_read(bs->backing_hd, sector_num, buf, n1);
1198 memset(buf, 0, 512 * n);
1200 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
1201 if (decompress_cluster(s, cluster_offset) < 0)
1203 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
1205 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1208 if (s->crypt_method) {
1209 encrypt_sectors(s, sector_num, buf, buf, n, 0,
1210 &s->aes_decrypt_key);
1220 static int qcow_write(BlockDriverState *bs, int64_t sector_num,
1221 const uint8_t *buf, int nb_sectors)
1223 BDRVQcowState *s = bs->opaque;
1224 int ret, index_in_cluster, n;
1225 uint64_t cluster_offset;
1229 while (nb_sectors > 0) {
1230 index_in_cluster = sector_num & (s->cluster_sectors - 1);
1231 n_end = index_in_cluster + nb_sectors;
1232 if (s->crypt_method &&
1233 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1234 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1235 cluster_offset = alloc_cluster_offset(bs, sector_num << 9,
1237 n_end, &n, &l2meta);
1238 if (!cluster_offset)
1240 if (s->crypt_method) {
1241 encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
1242 &s->aes_encrypt_key);
1243 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
1244 s->cluster_data, n * 512);
1246 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
1248 if (ret != n * 512 || alloc_cluster_link_l2(bs, cluster_offset, &l2meta) < 0) {
1249 free_any_clusters(bs, cluster_offset, l2meta.nb_clusters);
1256 s->cluster_cache_offset = -1; /* disable compressed cache */
1260 typedef struct QCowAIOCB {
1261 BlockDriverAIOCB common;
1266 uint64_t cluster_offset;
1267 uint8_t *cluster_data;
1268 BlockDriverAIOCB *hd_aiocb;
1273 static void qcow_aio_read_cb(void *opaque, int ret);
1274 static void qcow_aio_read_bh(void *opaque)
1276 QCowAIOCB *acb = opaque;
1277 qemu_bh_delete(acb->bh);
1279 qcow_aio_read_cb(opaque, 0);
1282 static int qcow_schedule_bh(QEMUBHFunc *cb, QCowAIOCB *acb)
1287 acb->bh = qemu_bh_new(cb, acb);
1291 qemu_bh_schedule(acb->bh);
1296 static void qcow_aio_read_cb(void *opaque, int ret)
1298 QCowAIOCB *acb = opaque;
1299 BlockDriverState *bs = acb->common.bs;
1300 BDRVQcowState *s = bs->opaque;
1301 int index_in_cluster, n1;
1303 acb->hd_aiocb = NULL;
1306 acb->common.cb(acb->common.opaque, ret);
1307 qemu_aio_release(acb);
1311 /* post process the read buffer */
1312 if (!acb->cluster_offset) {
1314 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1317 if (s->crypt_method) {
1318 encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
1320 &s->aes_decrypt_key);
1324 acb->nb_sectors -= acb->n;
1325 acb->sector_num += acb->n;
1326 acb->buf += acb->n * 512;
1328 if (acb->nb_sectors == 0) {
1329 /* request completed */
1330 acb->common.cb(acb->common.opaque, 0);
1331 qemu_aio_release(acb);
1335 /* prepare next AIO request */
1336 acb->n = acb->nb_sectors;
1337 acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, &acb->n);
1338 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1340 if (!acb->cluster_offset) {
1341 if (bs->backing_hd) {
1342 /* read from the base image */
1343 n1 = backing_read1(bs->backing_hd, acb->sector_num,
1346 acb->hd_aiocb = bdrv_aio_read(bs->backing_hd, acb->sector_num,
1347 acb->buf, acb->n, qcow_aio_read_cb, acb);
1348 if (acb->hd_aiocb == NULL)
1351 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1356 /* Note: in this case, no need to wait */
1357 memset(acb->buf, 0, 512 * acb->n);
1358 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1362 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
1363 /* add AIO support for compressed blocks ? */
1364 if (decompress_cluster(s, acb->cluster_offset) < 0)
1367 s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
1368 ret = qcow_schedule_bh(qcow_aio_read_bh, acb);
1372 if ((acb->cluster_offset & 511) != 0) {
1376 acb->hd_aiocb = bdrv_aio_read(s->hd,
1377 (acb->cluster_offset >> 9) + index_in_cluster,
1378 acb->buf, acb->n, qcow_aio_read_cb, acb);
1379 if (acb->hd_aiocb == NULL)
1384 static QCowAIOCB *qcow_aio_setup(BlockDriverState *bs,
1385 int64_t sector_num, uint8_t *buf, int nb_sectors,
1386 BlockDriverCompletionFunc *cb, void *opaque)
1390 acb = qemu_aio_get(bs, cb, opaque);
1393 acb->hd_aiocb = NULL;
1394 acb->sector_num = sector_num;
1396 acb->nb_sectors = nb_sectors;
1398 acb->cluster_offset = 0;
1399 acb->l2meta.nb_clusters = 0;
1403 static BlockDriverAIOCB *qcow_aio_read(BlockDriverState *bs,
1404 int64_t sector_num, uint8_t *buf, int nb_sectors,
1405 BlockDriverCompletionFunc *cb, void *opaque)
1409 acb = qcow_aio_setup(bs, sector_num, buf, nb_sectors, cb, opaque);
1413 qcow_aio_read_cb(acb, 0);
1414 return &acb->common;
1417 static void qcow_aio_write_cb(void *opaque, int ret)
1419 QCowAIOCB *acb = opaque;
1420 BlockDriverState *bs = acb->common.bs;
1421 BDRVQcowState *s = bs->opaque;
1422 int index_in_cluster;
1423 const uint8_t *src_buf;
1426 acb->hd_aiocb = NULL;
1430 acb->common.cb(acb->common.opaque, ret);
1431 qemu_aio_release(acb);
1435 if (alloc_cluster_link_l2(bs, acb->cluster_offset, &acb->l2meta) < 0) {
1436 free_any_clusters(bs, acb->cluster_offset, acb->l2meta.nb_clusters);
1440 acb->nb_sectors -= acb->n;
1441 acb->sector_num += acb->n;
1442 acb->buf += acb->n * 512;
1444 if (acb->nb_sectors == 0) {
1445 /* request completed */
1446 acb->common.cb(acb->common.opaque, 0);
1447 qemu_aio_release(acb);
1451 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
1452 n_end = index_in_cluster + acb->nb_sectors;
1453 if (s->crypt_method &&
1454 n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors)
1455 n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
1457 acb->cluster_offset = alloc_cluster_offset(bs, acb->sector_num << 9,
1459 n_end, &acb->n, &acb->l2meta);
1460 if (!acb->cluster_offset || (acb->cluster_offset & 511) != 0) {
1464 if (s->crypt_method) {
1465 if (!acb->cluster_data) {
1466 acb->cluster_data = qemu_mallocz(QCOW_MAX_CRYPT_CLUSTERS *
1469 encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
1470 acb->n, 1, &s->aes_encrypt_key);
1471 src_buf = acb->cluster_data;
1475 acb->hd_aiocb = bdrv_aio_write(s->hd,
1476 (acb->cluster_offset >> 9) + index_in_cluster,
1478 qcow_aio_write_cb, acb);
1479 if (acb->hd_aiocb == NULL)
1483 static BlockDriverAIOCB *qcow_aio_write(BlockDriverState *bs,
1484 int64_t sector_num, const uint8_t *buf, int nb_sectors,
1485 BlockDriverCompletionFunc *cb, void *opaque)
1487 BDRVQcowState *s = bs->opaque;
1490 s->cluster_cache_offset = -1; /* disable compressed cache */
1492 acb = qcow_aio_setup(bs, sector_num, (uint8_t*)buf, nb_sectors, cb, opaque);
1496 qcow_aio_write_cb(acb, 0);
1497 return &acb->common;
1500 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
1502 QCowAIOCB *acb = (QCowAIOCB *)blockacb;
1504 bdrv_aio_cancel(acb->hd_aiocb);
1505 qemu_aio_release(acb);
1508 static void qcow_close(BlockDriverState *bs)
1510 BDRVQcowState *s = bs->opaque;
1511 qemu_free(s->l1_table);
1512 qemu_free(s->l2_cache);
1513 qemu_free(s->cluster_cache);
1514 qemu_free(s->cluster_data);
1519 /* XXX: use std qcow open function ? */
1520 typedef struct QCowCreateState {
1523 uint16_t *refcount_block;
1524 uint64_t *refcount_table;
1525 int64_t l1_table_offset;
1526 int64_t refcount_table_offset;
1527 int64_t refcount_block_offset;
1530 static void create_refcount_update(QCowCreateState *s,
1531 int64_t offset, int64_t size)
1534 int64_t start, last, cluster_offset;
1537 start = offset & ~(s->cluster_size - 1);
1538 last = (offset + size - 1) & ~(s->cluster_size - 1);
1539 for(cluster_offset = start; cluster_offset <= last;
1540 cluster_offset += s->cluster_size) {
1541 p = &s->refcount_block[cluster_offset >> s->cluster_bits];
1542 refcount = be16_to_cpu(*p);
1544 *p = cpu_to_be16(refcount);
1548 static int qcow_create2(const char *filename, int64_t total_size,
1549 const char *backing_file, const char *backing_format,
1553 int fd, header_size, backing_filename_len, l1_size, i, shift, l2_bits;
1554 int backing_format_len = 0;
1556 uint64_t tmp, offset;
1557 QCowCreateState s1, *s = &s1;
1558 QCowExtension ext_bf = {0, 0};
1561 memset(s, 0, sizeof(*s));
1563 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
1566 memset(&header, 0, sizeof(header));
1567 header.magic = cpu_to_be32(QCOW_MAGIC);
1568 header.version = cpu_to_be32(QCOW_VERSION);
1569 header.size = cpu_to_be64(total_size * 512);
1570 header_size = sizeof(header);
1571 backing_filename_len = 0;
1573 if (backing_format) {
1574 ext_bf.magic = QCOW_EXT_MAGIC_BACKING_FORMAT;
1575 backing_format_len = strlen(backing_format);
1576 ext_bf.len = (backing_format_len + 7) & ~7;
1577 header_size += ((sizeof(ext_bf) + ext_bf.len + 7) & ~7);
1579 header.backing_file_offset = cpu_to_be64(header_size);
1580 backing_filename_len = strlen(backing_file);
1581 header.backing_file_size = cpu_to_be32(backing_filename_len);
1582 header_size += backing_filename_len;
1584 s->cluster_bits = 12; /* 4 KB clusters */
1585 s->cluster_size = 1 << s->cluster_bits;
1586 header.cluster_bits = cpu_to_be32(s->cluster_bits);
1587 header_size = (header_size + 7) & ~7;
1588 if (flags & BLOCK_FLAG_ENCRYPT) {
1589 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
1591 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
1593 l2_bits = s->cluster_bits - 3;
1594 shift = s->cluster_bits + l2_bits;
1595 l1_size = (((total_size * 512) + (1LL << shift) - 1) >> shift);
1596 offset = align_offset(header_size, s->cluster_size);
1597 s->l1_table_offset = offset;
1598 header.l1_table_offset = cpu_to_be64(s->l1_table_offset);
1599 header.l1_size = cpu_to_be32(l1_size);
1600 offset += align_offset(l1_size * sizeof(uint64_t), s->cluster_size);
1602 s->refcount_table = qemu_mallocz(s->cluster_size);
1603 s->refcount_block = qemu_mallocz(s->cluster_size);
1605 s->refcount_table_offset = offset;
1606 header.refcount_table_offset = cpu_to_be64(offset);
1607 header.refcount_table_clusters = cpu_to_be32(1);
1608 offset += s->cluster_size;
1610 s->refcount_table[0] = cpu_to_be64(offset);
1611 s->refcount_block_offset = offset;
1612 offset += s->cluster_size;
1614 /* update refcounts */
1615 create_refcount_update(s, 0, header_size);
1616 create_refcount_update(s, s->l1_table_offset, l1_size * sizeof(uint64_t));
1617 create_refcount_update(s, s->refcount_table_offset, s->cluster_size);
1618 create_refcount_update(s, s->refcount_block_offset, s->cluster_size);
1620 /* write all the data */
1621 write(fd, &header, sizeof(header));
1623 if (backing_format_len) {
1625 int d = ext_bf.len - backing_format_len;
1627 memset(zero, 0, sizeof(zero));
1628 cpu_to_be32s(&ext_bf.magic);
1629 cpu_to_be32s(&ext_bf.len);
1630 write(fd, &ext_bf, sizeof(ext_bf));
1631 write(fd, backing_format, backing_format_len);
1636 write(fd, backing_file, backing_filename_len);
1638 lseek(fd, s->l1_table_offset, SEEK_SET);
1640 for(i = 0;i < l1_size; i++) {
1641 write(fd, &tmp, sizeof(tmp));
1643 lseek(fd, s->refcount_table_offset, SEEK_SET);
1644 write(fd, s->refcount_table, s->cluster_size);
1646 lseek(fd, s->refcount_block_offset, SEEK_SET);
1647 write(fd, s->refcount_block, s->cluster_size);
1649 qemu_free(s->refcount_table);
1650 qemu_free(s->refcount_block);
1655 static int qcow_create(const char *filename, int64_t total_size,
1656 const char *backing_file, int flags)
1658 return qcow_create2(filename, total_size, backing_file, NULL, flags);
1661 static int qcow_make_empty(BlockDriverState *bs)
1664 /* XXX: not correct */
1665 BDRVQcowState *s = bs->opaque;
1666 uint32_t l1_length = s->l1_size * sizeof(uint64_t);
1669 memset(s->l1_table, 0, l1_length);
1670 if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0)
1672 ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length);
1681 /* XXX: put compressed sectors first, then all the cluster aligned
1682 tables to avoid losing bytes in alignment */
1683 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
1684 const uint8_t *buf, int nb_sectors)
1686 BDRVQcowState *s = bs->opaque;
1690 uint64_t cluster_offset;
1692 if (nb_sectors == 0) {
1693 /* align end of file to a sector boundary to ease reading with
1694 sector based I/Os */
1695 cluster_offset = bdrv_getlength(s->hd);
1696 cluster_offset = (cluster_offset + 511) & ~511;
1697 bdrv_truncate(s->hd, cluster_offset);
1701 if (nb_sectors != s->cluster_sectors)
1704 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
1706 /* best compression, small window, no zlib header */
1707 memset(&strm, 0, sizeof(strm));
1708 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
1710 9, Z_DEFAULT_STRATEGY);
1716 strm.avail_in = s->cluster_size;
1717 strm.next_in = (uint8_t *)buf;
1718 strm.avail_out = s->cluster_size;
1719 strm.next_out = out_buf;
1721 ret = deflate(&strm, Z_FINISH);
1722 if (ret != Z_STREAM_END && ret != Z_OK) {
1727 out_len = strm.next_out - out_buf;
1731 if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
1732 /* could not compress: write normal cluster */
1733 qcow_write(bs, sector_num, buf, s->cluster_sectors);
1735 cluster_offset = alloc_compressed_cluster_offset(bs, sector_num << 9,
1737 if (!cluster_offset)
1739 cluster_offset &= s->cluster_offset_mask;
1740 if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
1750 static void qcow_flush(BlockDriverState *bs)
1752 BDRVQcowState *s = bs->opaque;
1756 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1758 BDRVQcowState *s = bs->opaque;
1759 bdi->cluster_size = s->cluster_size;
1760 bdi->vm_state_offset = (int64_t)s->l1_vm_state_index <<
1761 (s->cluster_bits + s->l2_bits);
1765 /*********************************************************/
1766 /* snapshot support */
1768 /* update the refcounts of snapshots and the copied flag */
1769 static int update_snapshot_refcount(BlockDriverState *bs,
1770 int64_t l1_table_offset,
1774 BDRVQcowState *s = bs->opaque;
1775 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2, l1_allocated;
1776 int64_t old_offset, old_l2_offset;
1777 int l2_size, i, j, l1_modified, l2_modified, nb_csectors, refcount;
1783 l1_size2 = l1_size * sizeof(uint64_t);
1785 if (l1_table_offset != s->l1_table_offset) {
1786 l1_table = qemu_malloc(l1_size2);
1788 if (bdrv_pread(s->hd, l1_table_offset,
1789 l1_table, l1_size2) != l1_size2)
1791 for(i = 0;i < l1_size; i++)
1792 be64_to_cpus(&l1_table[i]);
1794 assert(l1_size == s->l1_size);
1795 l1_table = s->l1_table;
1799 l2_size = s->l2_size * sizeof(uint64_t);
1800 l2_table = qemu_malloc(l2_size);
1802 for(i = 0; i < l1_size; i++) {
1803 l2_offset = l1_table[i];
1805 old_l2_offset = l2_offset;
1806 l2_offset &= ~QCOW_OFLAG_COPIED;
1808 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
1810 for(j = 0; j < s->l2_size; j++) {
1811 offset = be64_to_cpu(l2_table[j]);
1813 old_offset = offset;
1814 offset &= ~QCOW_OFLAG_COPIED;
1815 if (offset & QCOW_OFLAG_COMPRESSED) {
1816 nb_csectors = ((offset >> s->csize_shift) &
1819 update_refcount(bs, (offset & s->cluster_offset_mask) & ~511,
1820 nb_csectors * 512, addend);
1821 /* compressed clusters are never modified */
1825 refcount = update_cluster_refcount(bs, offset >> s->cluster_bits, addend);
1827 refcount = get_refcount(bs, offset >> s->cluster_bits);
1831 if (refcount == 1) {
1832 offset |= QCOW_OFLAG_COPIED;
1834 if (offset != old_offset) {
1835 l2_table[j] = cpu_to_be64(offset);
1841 if (bdrv_pwrite(s->hd,
1842 l2_offset, l2_table, l2_size) != l2_size)
1847 refcount = update_cluster_refcount(bs, l2_offset >> s->cluster_bits, addend);
1849 refcount = get_refcount(bs, l2_offset >> s->cluster_bits);
1851 if (refcount == 1) {
1852 l2_offset |= QCOW_OFLAG_COPIED;
1854 if (l2_offset != old_l2_offset) {
1855 l1_table[i] = l2_offset;
1861 for(i = 0; i < l1_size; i++)
1862 cpu_to_be64s(&l1_table[i]);
1863 if (bdrv_pwrite(s->hd, l1_table_offset, l1_table,
1864 l1_size2) != l1_size2)
1866 for(i = 0; i < l1_size; i++)
1867 be64_to_cpus(&l1_table[i]);
1870 qemu_free(l1_table);
1871 qemu_free(l2_table);
1875 qemu_free(l1_table);
1876 qemu_free(l2_table);
1880 static void qcow_free_snapshots(BlockDriverState *bs)
1882 BDRVQcowState *s = bs->opaque;
1885 for(i = 0; i < s->nb_snapshots; i++) {
1886 qemu_free(s->snapshots[i].name);
1887 qemu_free(s->snapshots[i].id_str);
1889 qemu_free(s->snapshots);
1890 s->snapshots = NULL;
1891 s->nb_snapshots = 0;
1894 static int qcow_read_snapshots(BlockDriverState *bs)
1896 BDRVQcowState *s = bs->opaque;
1897 QCowSnapshotHeader h;
1899 int i, id_str_size, name_size;
1901 uint32_t extra_data_size;
1903 if (!s->nb_snapshots) {
1904 s->snapshots = NULL;
1905 s->snapshots_size = 0;
1909 offset = s->snapshots_offset;
1910 s->snapshots = qemu_mallocz(s->nb_snapshots * sizeof(QCowSnapshot));
1911 for(i = 0; i < s->nb_snapshots; i++) {
1912 offset = align_offset(offset, 8);
1913 if (bdrv_pread(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1915 offset += sizeof(h);
1916 sn = s->snapshots + i;
1917 sn->l1_table_offset = be64_to_cpu(h.l1_table_offset);
1918 sn->l1_size = be32_to_cpu(h.l1_size);
1919 sn->vm_state_size = be32_to_cpu(h.vm_state_size);
1920 sn->date_sec = be32_to_cpu(h.date_sec);
1921 sn->date_nsec = be32_to_cpu(h.date_nsec);
1922 sn->vm_clock_nsec = be64_to_cpu(h.vm_clock_nsec);
1923 extra_data_size = be32_to_cpu(h.extra_data_size);
1925 id_str_size = be16_to_cpu(h.id_str_size);
1926 name_size = be16_to_cpu(h.name_size);
1928 offset += extra_data_size;
1930 sn->id_str = qemu_malloc(id_str_size + 1);
1931 if (bdrv_pread(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1933 offset += id_str_size;
1934 sn->id_str[id_str_size] = '\0';
1936 sn->name = qemu_malloc(name_size + 1);
1937 if (bdrv_pread(s->hd, offset, sn->name, name_size) != name_size)
1939 offset += name_size;
1940 sn->name[name_size] = '\0';
1942 s->snapshots_size = offset - s->snapshots_offset;
1945 qcow_free_snapshots(bs);
1949 /* add at the end of the file a new list of snapshots */
1950 static int qcow_write_snapshots(BlockDriverState *bs)
1952 BDRVQcowState *s = bs->opaque;
1954 QCowSnapshotHeader h;
1955 int i, name_size, id_str_size, snapshots_size;
1958 int64_t offset, snapshots_offset;
1960 /* compute the size of the snapshots */
1962 for(i = 0; i < s->nb_snapshots; i++) {
1963 sn = s->snapshots + i;
1964 offset = align_offset(offset, 8);
1965 offset += sizeof(h);
1966 offset += strlen(sn->id_str);
1967 offset += strlen(sn->name);
1969 snapshots_size = offset;
1971 snapshots_offset = alloc_clusters(bs, snapshots_size);
1972 offset = snapshots_offset;
1974 for(i = 0; i < s->nb_snapshots; i++) {
1975 sn = s->snapshots + i;
1976 memset(&h, 0, sizeof(h));
1977 h.l1_table_offset = cpu_to_be64(sn->l1_table_offset);
1978 h.l1_size = cpu_to_be32(sn->l1_size);
1979 h.vm_state_size = cpu_to_be32(sn->vm_state_size);
1980 h.date_sec = cpu_to_be32(sn->date_sec);
1981 h.date_nsec = cpu_to_be32(sn->date_nsec);
1982 h.vm_clock_nsec = cpu_to_be64(sn->vm_clock_nsec);
1984 id_str_size = strlen(sn->id_str);
1985 name_size = strlen(sn->name);
1986 h.id_str_size = cpu_to_be16(id_str_size);
1987 h.name_size = cpu_to_be16(name_size);
1988 offset = align_offset(offset, 8);
1989 if (bdrv_pwrite(s->hd, offset, &h, sizeof(h)) != sizeof(h))
1991 offset += sizeof(h);
1992 if (bdrv_pwrite(s->hd, offset, sn->id_str, id_str_size) != id_str_size)
1994 offset += id_str_size;
1995 if (bdrv_pwrite(s->hd, offset, sn->name, name_size) != name_size)
1997 offset += name_size;
2000 /* update the various header fields */
2001 data64 = cpu_to_be64(snapshots_offset);
2002 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, snapshots_offset),
2003 &data64, sizeof(data64)) != sizeof(data64))
2005 data32 = cpu_to_be32(s->nb_snapshots);
2006 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, nb_snapshots),
2007 &data32, sizeof(data32)) != sizeof(data32))
2010 /* free the old snapshot table */
2011 free_clusters(bs, s->snapshots_offset, s->snapshots_size);
2012 s->snapshots_offset = snapshots_offset;
2013 s->snapshots_size = snapshots_size;
2019 static void find_new_snapshot_id(BlockDriverState *bs,
2020 char *id_str, int id_str_size)
2022 BDRVQcowState *s = bs->opaque;
2024 int i, id, id_max = 0;
2026 for(i = 0; i < s->nb_snapshots; i++) {
2027 sn = s->snapshots + i;
2028 id = strtoul(sn->id_str, NULL, 10);
2032 snprintf(id_str, id_str_size, "%d", id_max + 1);
2035 static int find_snapshot_by_id(BlockDriverState *bs, const char *id_str)
2037 BDRVQcowState *s = bs->opaque;
2040 for(i = 0; i < s->nb_snapshots; i++) {
2041 if (!strcmp(s->snapshots[i].id_str, id_str))
2047 static int find_snapshot_by_id_or_name(BlockDriverState *bs, const char *name)
2049 BDRVQcowState *s = bs->opaque;
2052 ret = find_snapshot_by_id(bs, name);
2055 for(i = 0; i < s->nb_snapshots; i++) {
2056 if (!strcmp(s->snapshots[i].name, name))
2062 /* if no id is provided, a new one is constructed */
2063 static int qcow_snapshot_create(BlockDriverState *bs,
2064 QEMUSnapshotInfo *sn_info)
2066 BDRVQcowState *s = bs->opaque;
2067 QCowSnapshot *snapshots1, sn1, *sn = &sn1;
2069 uint64_t *l1_table = NULL;
2071 memset(sn, 0, sizeof(*sn));
2073 if (sn_info->id_str[0] == '\0') {
2074 /* compute a new id */
2075 find_new_snapshot_id(bs, sn_info->id_str, sizeof(sn_info->id_str));
2078 /* check that the ID is unique */
2079 if (find_snapshot_by_id(bs, sn_info->id_str) >= 0)
2082 sn->id_str = qemu_strdup(sn_info->id_str);
2085 sn->name = qemu_strdup(sn_info->name);
2088 sn->vm_state_size = sn_info->vm_state_size;
2089 sn->date_sec = sn_info->date_sec;
2090 sn->date_nsec = sn_info->date_nsec;
2091 sn->vm_clock_nsec = sn_info->vm_clock_nsec;
2093 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1);
2097 /* create the L1 table of the snapshot */
2098 sn->l1_table_offset = alloc_clusters(bs, s->l1_size * sizeof(uint64_t));
2099 sn->l1_size = s->l1_size;
2101 l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
2102 for(i = 0; i < s->l1_size; i++) {
2103 l1_table[i] = cpu_to_be64(s->l1_table[i]);
2105 if (bdrv_pwrite(s->hd, sn->l1_table_offset,
2106 l1_table, s->l1_size * sizeof(uint64_t)) !=
2107 (s->l1_size * sizeof(uint64_t)))
2109 qemu_free(l1_table);
2112 snapshots1 = qemu_malloc((s->nb_snapshots + 1) * sizeof(QCowSnapshot));
2114 memcpy(snapshots1, s->snapshots, s->nb_snapshots * sizeof(QCowSnapshot));
2115 qemu_free(s->snapshots);
2117 s->snapshots = snapshots1;
2118 s->snapshots[s->nb_snapshots++] = *sn;
2120 if (qcow_write_snapshots(bs) < 0)
2123 check_refcounts(bs);
2127 qemu_free(sn->name);
2128 qemu_free(l1_table);
2132 /* copy the snapshot 'snapshot_name' into the current disk image */
2133 static int qcow_snapshot_goto(BlockDriverState *bs,
2134 const char *snapshot_id)
2136 BDRVQcowState *s = bs->opaque;
2138 int i, snapshot_index, l1_size2;
2140 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2141 if (snapshot_index < 0)
2143 sn = &s->snapshots[snapshot_index];
2145 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, -1) < 0)
2148 if (grow_l1_table(bs, sn->l1_size) < 0)
2151 s->l1_size = sn->l1_size;
2152 l1_size2 = s->l1_size * sizeof(uint64_t);
2153 /* copy the snapshot l1 table to the current l1 table */
2154 if (bdrv_pread(s->hd, sn->l1_table_offset,
2155 s->l1_table, l1_size2) != l1_size2)
2157 if (bdrv_pwrite(s->hd, s->l1_table_offset,
2158 s->l1_table, l1_size2) != l1_size2)
2160 for(i = 0;i < s->l1_size; i++) {
2161 be64_to_cpus(&s->l1_table[i]);
2164 if (update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 1) < 0)
2168 check_refcounts(bs);
2175 static int qcow_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2177 BDRVQcowState *s = bs->opaque;
2179 int snapshot_index, ret;
2181 snapshot_index = find_snapshot_by_id_or_name(bs, snapshot_id);
2182 if (snapshot_index < 0)
2184 sn = &s->snapshots[snapshot_index];
2186 ret = update_snapshot_refcount(bs, sn->l1_table_offset, sn->l1_size, -1);
2189 /* must update the copied flag on the current cluster offsets */
2190 ret = update_snapshot_refcount(bs, s->l1_table_offset, s->l1_size, 0);
2193 free_clusters(bs, sn->l1_table_offset, sn->l1_size * sizeof(uint64_t));
2195 qemu_free(sn->id_str);
2196 qemu_free(sn->name);
2197 memmove(sn, sn + 1, (s->nb_snapshots - snapshot_index - 1) * sizeof(*sn));
2199 ret = qcow_write_snapshots(bs);
2201 /* XXX: restore snapshot if error ? */
2205 check_refcounts(bs);
2210 static int qcow_snapshot_list(BlockDriverState *bs,
2211 QEMUSnapshotInfo **psn_tab)
2213 BDRVQcowState *s = bs->opaque;
2214 QEMUSnapshotInfo *sn_tab, *sn_info;
2218 sn_tab = qemu_mallocz(s->nb_snapshots * sizeof(QEMUSnapshotInfo));
2219 for(i = 0; i < s->nb_snapshots; i++) {
2220 sn_info = sn_tab + i;
2221 sn = s->snapshots + i;
2222 pstrcpy(sn_info->id_str, sizeof(sn_info->id_str),
2224 pstrcpy(sn_info->name, sizeof(sn_info->name),
2226 sn_info->vm_state_size = sn->vm_state_size;
2227 sn_info->date_sec = sn->date_sec;
2228 sn_info->date_nsec = sn->date_nsec;
2229 sn_info->vm_clock_nsec = sn->vm_clock_nsec;
2232 return s->nb_snapshots;
2235 /*********************************************************/
2236 /* refcount handling */
2238 static int refcount_init(BlockDriverState *bs)
2240 BDRVQcowState *s = bs->opaque;
2241 int ret, refcount_table_size2, i;
2243 s->refcount_block_cache = qemu_malloc(s->cluster_size);
2244 refcount_table_size2 = s->refcount_table_size * sizeof(uint64_t);
2245 s->refcount_table = qemu_malloc(refcount_table_size2);
2246 if (s->refcount_table_size > 0) {
2247 ret = bdrv_pread(s->hd, s->refcount_table_offset,
2248 s->refcount_table, refcount_table_size2);
2249 if (ret != refcount_table_size2)
2251 for(i = 0; i < s->refcount_table_size; i++)
2252 be64_to_cpus(&s->refcount_table[i]);
2259 static void refcount_close(BlockDriverState *bs)
2261 BDRVQcowState *s = bs->opaque;
2262 qemu_free(s->refcount_block_cache);
2263 qemu_free(s->refcount_table);
2267 static int load_refcount_block(BlockDriverState *bs,
2268 int64_t refcount_block_offset)
2270 BDRVQcowState *s = bs->opaque;
2272 ret = bdrv_pread(s->hd, refcount_block_offset, s->refcount_block_cache,
2274 if (ret != s->cluster_size)
2276 s->refcount_block_cache_offset = refcount_block_offset;
2280 static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
2282 BDRVQcowState *s = bs->opaque;
2283 int refcount_table_index, block_index;
2284 int64_t refcount_block_offset;
2286 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2287 if (refcount_table_index >= s->refcount_table_size)
2289 refcount_block_offset = s->refcount_table[refcount_table_index];
2290 if (!refcount_block_offset)
2292 if (refcount_block_offset != s->refcount_block_cache_offset) {
2293 /* better than nothing: return allocated if read error */
2294 if (load_refcount_block(bs, refcount_block_offset) < 0)
2297 block_index = cluster_index &
2298 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2299 return be16_to_cpu(s->refcount_block_cache[block_index]);
2302 /* return < 0 if error */
2303 static int64_t alloc_clusters_noref(BlockDriverState *bs, int64_t size)
2305 BDRVQcowState *s = bs->opaque;
2308 nb_clusters = size_to_clusters(s, size);
2310 for(i = 0; i < nb_clusters; i++) {
2311 int64_t i = s->free_cluster_index++;
2312 if (get_refcount(bs, i) != 0)
2316 printf("alloc_clusters: size=%lld -> %lld\n",
2318 (s->free_cluster_index - nb_clusters) << s->cluster_bits);
2320 return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
2323 static int64_t alloc_clusters(BlockDriverState *bs, int64_t size)
2327 offset = alloc_clusters_noref(bs, size);
2328 update_refcount(bs, offset, size, 1);
2332 /* only used to allocate compressed sectors. We try to allocate
2333 contiguous sectors. size must be <= cluster_size */
2334 static int64_t alloc_bytes(BlockDriverState *bs, int size)
2336 BDRVQcowState *s = bs->opaque;
2337 int64_t offset, cluster_offset;
2338 int free_in_cluster;
2340 assert(size > 0 && size <= s->cluster_size);
2341 if (s->free_byte_offset == 0) {
2342 s->free_byte_offset = alloc_clusters(bs, s->cluster_size);
2345 free_in_cluster = s->cluster_size -
2346 (s->free_byte_offset & (s->cluster_size - 1));
2347 if (size <= free_in_cluster) {
2348 /* enough space in current cluster */
2349 offset = s->free_byte_offset;
2350 s->free_byte_offset += size;
2351 free_in_cluster -= size;
2352 if (free_in_cluster == 0)
2353 s->free_byte_offset = 0;
2354 if ((offset & (s->cluster_size - 1)) != 0)
2355 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2357 offset = alloc_clusters(bs, s->cluster_size);
2358 cluster_offset = s->free_byte_offset & ~(s->cluster_size - 1);
2359 if ((cluster_offset + s->cluster_size) == offset) {
2360 /* we are lucky: contiguous data */
2361 offset = s->free_byte_offset;
2362 update_cluster_refcount(bs, offset >> s->cluster_bits, 1);
2363 s->free_byte_offset += size;
2365 s->free_byte_offset = offset;
2372 static void free_clusters(BlockDriverState *bs,
2373 int64_t offset, int64_t size)
2375 update_refcount(bs, offset, size, -1);
2378 static int grow_refcount_table(BlockDriverState *bs, int min_size)
2380 BDRVQcowState *s = bs->opaque;
2381 int new_table_size, new_table_size2, refcount_table_clusters, i, ret;
2382 uint64_t *new_table;
2383 int64_t table_offset;
2386 int64_t old_table_offset;
2388 if (min_size <= s->refcount_table_size)
2390 /* compute new table size */
2391 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2393 if (refcount_table_clusters == 0) {
2394 refcount_table_clusters = 1;
2396 refcount_table_clusters = (refcount_table_clusters * 3 + 1) / 2;
2398 new_table_size = refcount_table_clusters << (s->cluster_bits - 3);
2399 if (min_size <= new_table_size)
2403 printf("grow_refcount_table from %d to %d\n",
2404 s->refcount_table_size,
2407 new_table_size2 = new_table_size * sizeof(uint64_t);
2408 new_table = qemu_mallocz(new_table_size2);
2409 memcpy(new_table, s->refcount_table,
2410 s->refcount_table_size * sizeof(uint64_t));
2411 for(i = 0; i < s->refcount_table_size; i++)
2412 cpu_to_be64s(&new_table[i]);
2413 /* Note: we cannot update the refcount now to avoid recursion */
2414 table_offset = alloc_clusters_noref(bs, new_table_size2);
2415 ret = bdrv_pwrite(s->hd, table_offset, new_table, new_table_size2);
2416 if (ret != new_table_size2)
2418 for(i = 0; i < s->refcount_table_size; i++)
2419 be64_to_cpus(&new_table[i]);
2421 cpu_to_be64w((uint64_t*)data, table_offset);
2422 cpu_to_be32w((uint32_t*)(data + 8), refcount_table_clusters);
2423 if (bdrv_pwrite(s->hd, offsetof(QCowHeader, refcount_table_offset),
2424 data, sizeof(data)) != sizeof(data))
2426 qemu_free(s->refcount_table);
2427 old_table_offset = s->refcount_table_offset;
2428 old_table_size = s->refcount_table_size;
2429 s->refcount_table = new_table;
2430 s->refcount_table_size = new_table_size;
2431 s->refcount_table_offset = table_offset;
2433 update_refcount(bs, table_offset, new_table_size2, 1);
2434 free_clusters(bs, old_table_offset, old_table_size * sizeof(uint64_t));
2437 free_clusters(bs, table_offset, new_table_size2);
2438 qemu_free(new_table);
2442 /* addend must be 1 or -1 */
2443 /* XXX: cache several refcount block clusters ? */
2444 static int update_cluster_refcount(BlockDriverState *bs,
2445 int64_t cluster_index,
2448 BDRVQcowState *s = bs->opaque;
2449 int64_t offset, refcount_block_offset;
2450 int ret, refcount_table_index, block_index, refcount;
2453 refcount_table_index = cluster_index >> (s->cluster_bits - REFCOUNT_SHIFT);
2454 if (refcount_table_index >= s->refcount_table_size) {
2457 ret = grow_refcount_table(bs, refcount_table_index + 1);
2461 refcount_block_offset = s->refcount_table[refcount_table_index];
2462 if (!refcount_block_offset) {
2465 /* create a new refcount block */
2466 /* Note: we cannot update the refcount now to avoid recursion */
2467 offset = alloc_clusters_noref(bs, s->cluster_size);
2468 memset(s->refcount_block_cache, 0, s->cluster_size);
2469 ret = bdrv_pwrite(s->hd, offset, s->refcount_block_cache, s->cluster_size);
2470 if (ret != s->cluster_size)
2472 s->refcount_table[refcount_table_index] = offset;
2473 data64 = cpu_to_be64(offset);
2474 ret = bdrv_pwrite(s->hd, s->refcount_table_offset +
2475 refcount_table_index * sizeof(uint64_t),
2476 &data64, sizeof(data64));
2477 if (ret != sizeof(data64))
2480 refcount_block_offset = offset;
2481 s->refcount_block_cache_offset = offset;
2482 update_refcount(bs, offset, s->cluster_size, 1);
2484 if (refcount_block_offset != s->refcount_block_cache_offset) {
2485 if (load_refcount_block(bs, refcount_block_offset) < 0)
2489 /* we can update the count and save it */
2490 block_index = cluster_index &
2491 ((1 << (s->cluster_bits - REFCOUNT_SHIFT)) - 1);
2492 refcount = be16_to_cpu(s->refcount_block_cache[block_index]);
2494 if (refcount < 0 || refcount > 0xffff)
2496 if (refcount == 0 && cluster_index < s->free_cluster_index) {
2497 s->free_cluster_index = cluster_index;
2499 s->refcount_block_cache[block_index] = cpu_to_be16(refcount);
2500 if (bdrv_pwrite(s->hd,
2501 refcount_block_offset + (block_index << REFCOUNT_SHIFT),
2502 &s->refcount_block_cache[block_index], 2) != 2)
2507 static void update_refcount(BlockDriverState *bs,
2508 int64_t offset, int64_t length,
2511 BDRVQcowState *s = bs->opaque;
2512 int64_t start, last, cluster_offset;
2515 printf("update_refcount: offset=%lld size=%lld addend=%d\n",
2516 offset, length, addend);
2520 start = offset & ~(s->cluster_size - 1);
2521 last = (offset + length - 1) & ~(s->cluster_size - 1);
2522 for(cluster_offset = start; cluster_offset <= last;
2523 cluster_offset += s->cluster_size) {
2524 update_cluster_refcount(bs, cluster_offset >> s->cluster_bits, addend);
2529 static void inc_refcounts(BlockDriverState *bs,
2530 uint16_t *refcount_table,
2531 int refcount_table_size,
2532 int64_t offset, int64_t size)
2534 BDRVQcowState *s = bs->opaque;
2535 int64_t start, last, cluster_offset;
2541 start = offset & ~(s->cluster_size - 1);
2542 last = (offset + size - 1) & ~(s->cluster_size - 1);
2543 for(cluster_offset = start; cluster_offset <= last;
2544 cluster_offset += s->cluster_size) {
2545 k = cluster_offset >> s->cluster_bits;
2546 if (k < 0 || k >= refcount_table_size) {
2547 printf("ERROR: invalid cluster offset=0x%llx\n", cluster_offset);
2549 if (++refcount_table[k] == 0) {
2550 printf("ERROR: overflow cluster offset=0x%llx\n", cluster_offset);
2556 static int check_refcounts_l1(BlockDriverState *bs,
2557 uint16_t *refcount_table,
2558 int refcount_table_size,
2559 int64_t l1_table_offset, int l1_size,
2562 BDRVQcowState *s = bs->opaque;
2563 uint64_t *l1_table, *l2_table, l2_offset, offset, l1_size2;
2564 int l2_size, i, j, nb_csectors, refcount;
2567 l1_size2 = l1_size * sizeof(uint64_t);
2569 inc_refcounts(bs, refcount_table, refcount_table_size,
2570 l1_table_offset, l1_size2);
2572 l1_table = qemu_malloc(l1_size2);
2573 if (bdrv_pread(s->hd, l1_table_offset,
2574 l1_table, l1_size2) != l1_size2)
2576 for(i = 0;i < l1_size; i++)
2577 be64_to_cpus(&l1_table[i]);
2579 l2_size = s->l2_size * sizeof(uint64_t);
2580 l2_table = qemu_malloc(l2_size);
2581 for(i = 0; i < l1_size; i++) {
2582 l2_offset = l1_table[i];
2585 refcount = get_refcount(bs, (l2_offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2586 if ((refcount == 1) != ((l2_offset & QCOW_OFLAG_COPIED) != 0)) {
2587 printf("ERROR OFLAG_COPIED: l2_offset=%llx refcount=%d\n",
2588 l2_offset, refcount);
2591 l2_offset &= ~QCOW_OFLAG_COPIED;
2592 if (bdrv_pread(s->hd, l2_offset, l2_table, l2_size) != l2_size)
2594 for(j = 0; j < s->l2_size; j++) {
2595 offset = be64_to_cpu(l2_table[j]);
2597 if (offset & QCOW_OFLAG_COMPRESSED) {
2598 if (offset & QCOW_OFLAG_COPIED) {
2599 printf("ERROR: cluster %lld: copied flag must never be set for compressed clusters\n",
2600 offset >> s->cluster_bits);
2601 offset &= ~QCOW_OFLAG_COPIED;
2603 nb_csectors = ((offset >> s->csize_shift) &
2605 offset &= s->cluster_offset_mask;
2606 inc_refcounts(bs, refcount_table,
2607 refcount_table_size,
2608 offset & ~511, nb_csectors * 512);
2611 refcount = get_refcount(bs, (offset & ~QCOW_OFLAG_COPIED) >> s->cluster_bits);
2612 if ((refcount == 1) != ((offset & QCOW_OFLAG_COPIED) != 0)) {
2613 printf("ERROR OFLAG_COPIED: offset=%llx refcount=%d\n",
2617 offset &= ~QCOW_OFLAG_COPIED;
2618 inc_refcounts(bs, refcount_table,
2619 refcount_table_size,
2620 offset, s->cluster_size);
2624 inc_refcounts(bs, refcount_table,
2625 refcount_table_size,
2630 qemu_free(l1_table);
2631 qemu_free(l2_table);
2634 printf("ERROR: I/O error in check_refcounts_l1\n");
2635 qemu_free(l1_table);
2636 qemu_free(l2_table);
2640 static void check_refcounts(BlockDriverState *bs)
2642 BDRVQcowState *s = bs->opaque;
2644 int nb_clusters, refcount1, refcount2, i;
2646 uint16_t *refcount_table;
2648 size = bdrv_getlength(s->hd);
2649 nb_clusters = size_to_clusters(s, size);
2650 refcount_table = qemu_mallocz(nb_clusters * sizeof(uint16_t));
2653 inc_refcounts(bs, refcount_table, nb_clusters,
2654 0, s->cluster_size);
2656 check_refcounts_l1(bs, refcount_table, nb_clusters,
2657 s->l1_table_offset, s->l1_size, 1);
2660 for(i = 0; i < s->nb_snapshots; i++) {
2661 sn = s->snapshots + i;
2662 check_refcounts_l1(bs, refcount_table, nb_clusters,
2663 sn->l1_table_offset, sn->l1_size, 0);
2665 inc_refcounts(bs, refcount_table, nb_clusters,
2666 s->snapshots_offset, s->snapshots_size);
2669 inc_refcounts(bs, refcount_table, nb_clusters,
2670 s->refcount_table_offset,
2671 s->refcount_table_size * sizeof(uint64_t));
2672 for(i = 0; i < s->refcount_table_size; i++) {
2674 offset = s->refcount_table[i];
2676 inc_refcounts(bs, refcount_table, nb_clusters,
2677 offset, s->cluster_size);
2681 /* compare ref counts */
2682 for(i = 0; i < nb_clusters; i++) {
2683 refcount1 = get_refcount(bs, i);
2684 refcount2 = refcount_table[i];
2685 if (refcount1 != refcount2)
2686 printf("ERROR cluster %d refcount=%d reference=%d\n",
2687 i, refcount1, refcount2);
2690 qemu_free(refcount_table);
2694 static void dump_refcounts(BlockDriverState *bs)
2696 BDRVQcowState *s = bs->opaque;
2697 int64_t nb_clusters, k, k1, size;
2700 size = bdrv_getlength(s->hd);
2701 nb_clusters = size_to_clusters(s, size);
2702 for(k = 0; k < nb_clusters;) {
2704 refcount = get_refcount(bs, k);
2706 while (k < nb_clusters && get_refcount(bs, k) == refcount)
2708 printf("%lld: refcount=%d nb=%lld\n", k, refcount, k - k1);
2714 BlockDriver bdrv_qcow2 = {
2715 .format_name = "qcow2",
2716 .instance_size = sizeof(BDRVQcowState),
2717 .bdrv_probe = qcow_probe,
2718 .bdrv_open = qcow_open,
2719 .bdrv_close = qcow_close,
2720 .bdrv_create = qcow_create,
2721 .bdrv_flush = qcow_flush,
2722 .bdrv_is_allocated = qcow_is_allocated,
2723 .bdrv_set_key = qcow_set_key,
2724 .bdrv_make_empty = qcow_make_empty,
2726 .bdrv_aio_read = qcow_aio_read,
2727 .bdrv_aio_write = qcow_aio_write,
2728 .bdrv_aio_cancel = qcow_aio_cancel,
2729 .aiocb_size = sizeof(QCowAIOCB),
2730 .bdrv_write_compressed = qcow_write_compressed,
2732 .bdrv_snapshot_create = qcow_snapshot_create,
2733 .bdrv_snapshot_goto = qcow_snapshot_goto,
2734 .bdrv_snapshot_delete = qcow_snapshot_delete,
2735 .bdrv_snapshot_list = qcow_snapshot_list,
2736 .bdrv_get_info = qcow_get_info,
2738 .bdrv_create2 = qcow_create2,