2 * Block driver for the QCOW format
4 * Copyright (c) 2004-2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu-common.h"
25 #include "block_int.h"
30 /**************************************************************/
31 /* QEMU COW block driver with compression and encryption support */
33 #define QCOW_MAGIC (('Q' << 24) | ('F' << 16) | ('I' << 8) | 0xfb)
34 #define QCOW_VERSION 1
36 #define QCOW_CRYPT_NONE 0
37 #define QCOW_CRYPT_AES 1
39 #define QCOW_OFLAG_COMPRESSED (1LL << 63)
41 typedef struct QCowHeader {
44 uint64_t backing_file_offset;
45 uint32_t backing_file_size;
47 uint64_t size; /* in bytes */
50 uint32_t crypt_method;
51 uint64_t l1_table_offset;
54 #define L2_CACHE_SIZE 16
56 typedef struct BDRVQcowState {
64 uint64_t cluster_offset_mask;
65 uint64_t l1_table_offset;
68 uint64_t l2_cache_offsets[L2_CACHE_SIZE];
69 uint32_t l2_cache_counts[L2_CACHE_SIZE];
70 uint8_t *cluster_cache;
71 uint8_t *cluster_data;
72 uint64_t cluster_cache_offset;
73 uint32_t crypt_method; /* current crypt method, 0 if no key yet */
74 uint32_t crypt_method_header;
75 AES_KEY aes_encrypt_key;
76 AES_KEY aes_decrypt_key;
79 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset);
81 static int qcow_probe(const uint8_t *buf, int buf_size, const char *filename)
83 const QCowHeader *cow_header = (const void *)buf;
85 if (buf_size >= sizeof(QCowHeader) &&
86 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
87 be32_to_cpu(cow_header->version) == QCOW_VERSION)
93 static int qcow_open(BlockDriverState *bs, const char *filename, int flags)
95 BDRVQcowState *s = bs->opaque;
96 int len, i, shift, ret;
99 ret = bdrv_file_open(&s->hd, filename, flags);
102 if (bdrv_pread(s->hd, 0, &header, sizeof(header)) != sizeof(header))
104 be32_to_cpus(&header.magic);
105 be32_to_cpus(&header.version);
106 be64_to_cpus(&header.backing_file_offset);
107 be32_to_cpus(&header.backing_file_size);
108 be32_to_cpus(&header.mtime);
109 be64_to_cpus(&header.size);
110 be32_to_cpus(&header.crypt_method);
111 be64_to_cpus(&header.l1_table_offset);
113 if (header.magic != QCOW_MAGIC || header.version != QCOW_VERSION)
115 if (header.size <= 1 || header.cluster_bits < 9)
117 if (header.crypt_method > QCOW_CRYPT_AES)
119 s->crypt_method_header = header.crypt_method;
120 if (s->crypt_method_header)
122 s->cluster_bits = header.cluster_bits;
123 s->cluster_size = 1 << s->cluster_bits;
124 s->cluster_sectors = 1 << (s->cluster_bits - 9);
125 s->l2_bits = header.l2_bits;
126 s->l2_size = 1 << s->l2_bits;
127 bs->total_sectors = header.size / 512;
128 s->cluster_offset_mask = (1LL << (63 - s->cluster_bits)) - 1;
130 /* read the level 1 table */
131 shift = s->cluster_bits + s->l2_bits;
132 s->l1_size = (header.size + (1LL << shift) - 1) >> shift;
134 s->l1_table_offset = header.l1_table_offset;
135 s->l1_table = qemu_malloc(s->l1_size * sizeof(uint64_t));
138 if (bdrv_pread(s->hd, s->l1_table_offset, s->l1_table, s->l1_size * sizeof(uint64_t)) !=
139 s->l1_size * sizeof(uint64_t))
141 for(i = 0;i < s->l1_size; i++) {
142 be64_to_cpus(&s->l1_table[i]);
145 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
148 s->cluster_cache = qemu_malloc(s->cluster_size);
149 if (!s->cluster_cache)
151 s->cluster_data = qemu_malloc(s->cluster_size);
152 if (!s->cluster_data)
154 s->cluster_cache_offset = -1;
156 /* read the backing file name */
157 if (header.backing_file_offset != 0) {
158 len = header.backing_file_size;
161 if (bdrv_pread(s->hd, header.backing_file_offset, bs->backing_file, len) != len)
163 bs->backing_file[len] = '\0';
168 qemu_free(s->l1_table);
169 qemu_free(s->l2_cache);
170 qemu_free(s->cluster_cache);
171 qemu_free(s->cluster_data);
176 static int qcow_set_key(BlockDriverState *bs, const char *key)
178 BDRVQcowState *s = bs->opaque;
182 memset(keybuf, 0, 16);
186 /* XXX: we could compress the chars to 7 bits to increase
188 for(i = 0;i < len;i++) {
191 s->crypt_method = s->crypt_method_header;
193 if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0)
195 if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0)
205 AES_encrypt(in, tmp, &s->aes_encrypt_key);
206 AES_decrypt(tmp, out, &s->aes_decrypt_key);
207 for(i = 0; i < 16; i++)
208 printf(" %02x", tmp[i]);
210 for(i = 0; i < 16; i++)
211 printf(" %02x", out[i]);
218 /* The crypt function is compatible with the linux cryptoloop
219 algorithm for < 4 GB images. NOTE: out_buf == in_buf is
221 static void encrypt_sectors(BDRVQcowState *s, int64_t sector_num,
222 uint8_t *out_buf, const uint8_t *in_buf,
223 int nb_sectors, int enc,
232 for(i = 0; i < nb_sectors; i++) {
233 ivec.ll[0] = cpu_to_le64(sector_num);
235 AES_cbc_encrypt(in_buf, out_buf, 512, key,
247 * 1 to allocate a normal cluster (for sector indexes 'n_start' to
250 * 2 to allocate a compressed cluster of size
251 * 'compressed_size'. 'compressed_size' must be > 0 and <
254 * return 0 if not allocated.
256 static uint64_t get_cluster_offset(BlockDriverState *bs,
257 uint64_t offset, int allocate,
259 int n_start, int n_end)
261 BDRVQcowState *s = bs->opaque;
262 int min_index, i, j, l1_index, l2_index;
263 uint64_t l2_offset, *l2_table, cluster_offset, tmp;
267 l1_index = offset >> (s->l2_bits + s->cluster_bits);
268 l2_offset = s->l1_table[l1_index];
273 /* allocate a new l2 entry */
274 l2_offset = bdrv_getlength(s->hd);
275 /* round to cluster size */
276 l2_offset = (l2_offset + s->cluster_size - 1) & ~(s->cluster_size - 1);
277 /* update the L1 entry */
278 s->l1_table[l1_index] = l2_offset;
279 tmp = cpu_to_be64(l2_offset);
280 if (bdrv_pwrite(s->hd, s->l1_table_offset + l1_index * sizeof(tmp),
281 &tmp, sizeof(tmp)) != sizeof(tmp))
285 for(i = 0; i < L2_CACHE_SIZE; i++) {
286 if (l2_offset == s->l2_cache_offsets[i]) {
287 /* increment the hit count */
288 if (++s->l2_cache_counts[i] == 0xffffffff) {
289 for(j = 0; j < L2_CACHE_SIZE; j++) {
290 s->l2_cache_counts[j] >>= 1;
293 l2_table = s->l2_cache + (i << s->l2_bits);
297 /* not found: load a new entry in the least used one */
299 min_count = 0xffffffff;
300 for(i = 0; i < L2_CACHE_SIZE; i++) {
301 if (s->l2_cache_counts[i] < min_count) {
302 min_count = s->l2_cache_counts[i];
306 l2_table = s->l2_cache + (min_index << s->l2_bits);
308 memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
309 if (bdrv_pwrite(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
310 s->l2_size * sizeof(uint64_t))
313 if (bdrv_pread(s->hd, l2_offset, l2_table, s->l2_size * sizeof(uint64_t)) !=
314 s->l2_size * sizeof(uint64_t))
317 s->l2_cache_offsets[min_index] = l2_offset;
318 s->l2_cache_counts[min_index] = 1;
320 l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1);
321 cluster_offset = be64_to_cpu(l2_table[l2_index]);
322 if (!cluster_offset ||
323 ((cluster_offset & QCOW_OFLAG_COMPRESSED) && allocate == 1)) {
326 /* allocate a new cluster */
327 if ((cluster_offset & QCOW_OFLAG_COMPRESSED) &&
328 (n_end - n_start) < s->cluster_sectors) {
329 /* if the cluster is already compressed, we must
330 decompress it in the case it is not completely
332 if (decompress_cluster(s, cluster_offset) < 0)
334 cluster_offset = bdrv_getlength(s->hd);
335 cluster_offset = (cluster_offset + s->cluster_size - 1) &
336 ~(s->cluster_size - 1);
337 /* write the cluster content */
338 if (bdrv_pwrite(s->hd, cluster_offset, s->cluster_cache, s->cluster_size) !=
342 cluster_offset = bdrv_getlength(s->hd);
344 /* round to cluster size */
345 cluster_offset = (cluster_offset + s->cluster_size - 1) &
346 ~(s->cluster_size - 1);
347 bdrv_truncate(s->hd, cluster_offset + s->cluster_size);
348 /* if encrypted, we must initialize the cluster
349 content which won't be written */
350 if (s->crypt_method &&
351 (n_end - n_start) < s->cluster_sectors) {
353 start_sect = (offset & ~(s->cluster_size - 1)) >> 9;
354 memset(s->cluster_data + 512, 0x00, 512);
355 for(i = 0; i < s->cluster_sectors; i++) {
356 if (i < n_start || i >= n_end) {
357 encrypt_sectors(s, start_sect + i,
359 s->cluster_data + 512, 1, 1,
360 &s->aes_encrypt_key);
361 if (bdrv_pwrite(s->hd, cluster_offset + i * 512,
362 s->cluster_data, 512) != 512)
367 } else if (allocate == 2) {
368 cluster_offset |= QCOW_OFLAG_COMPRESSED |
369 (uint64_t)compressed_size << (63 - s->cluster_bits);
372 /* update L2 table */
373 tmp = cpu_to_be64(cluster_offset);
374 l2_table[l2_index] = tmp;
375 if (bdrv_pwrite(s->hd,
376 l2_offset + l2_index * sizeof(tmp), &tmp, sizeof(tmp)) != sizeof(tmp))
379 return cluster_offset;
382 static int qcow_is_allocated(BlockDriverState *bs, int64_t sector_num,
383 int nb_sectors, int *pnum)
385 BDRVQcowState *s = bs->opaque;
386 int index_in_cluster, n;
387 uint64_t cluster_offset;
389 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
390 index_in_cluster = sector_num & (s->cluster_sectors - 1);
391 n = s->cluster_sectors - index_in_cluster;
395 return (cluster_offset != 0);
398 static int decompress_buffer(uint8_t *out_buf, int out_buf_size,
399 const uint8_t *buf, int buf_size)
401 z_stream strm1, *strm = &strm1;
404 memset(strm, 0, sizeof(*strm));
406 strm->next_in = (uint8_t *)buf;
407 strm->avail_in = buf_size;
408 strm->next_out = out_buf;
409 strm->avail_out = out_buf_size;
411 ret = inflateInit2(strm, -12);
414 ret = inflate(strm, Z_FINISH);
415 out_len = strm->next_out - out_buf;
416 if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) ||
417 out_len != out_buf_size) {
425 static int decompress_cluster(BDRVQcowState *s, uint64_t cluster_offset)
430 coffset = cluster_offset & s->cluster_offset_mask;
431 if (s->cluster_cache_offset != coffset) {
432 csize = cluster_offset >> (63 - s->cluster_bits);
433 csize &= (s->cluster_size - 1);
434 ret = bdrv_pread(s->hd, coffset, s->cluster_data, csize);
437 if (decompress_buffer(s->cluster_cache, s->cluster_size,
438 s->cluster_data, csize) < 0) {
441 s->cluster_cache_offset = coffset;
448 static int qcow_read(BlockDriverState *bs, int64_t sector_num,
449 uint8_t *buf, int nb_sectors)
451 BDRVQcowState *s = bs->opaque;
452 int ret, index_in_cluster, n;
453 uint64_t cluster_offset;
455 while (nb_sectors > 0) {
456 cluster_offset = get_cluster_offset(bs, sector_num << 9, 0, 0, 0, 0);
457 index_in_cluster = sector_num & (s->cluster_sectors - 1);
458 n = s->cluster_sectors - index_in_cluster;
461 if (!cluster_offset) {
462 if (bs->backing_hd) {
463 /* read from the base image */
464 ret = bdrv_read(bs->backing_hd, sector_num, buf, n);
468 memset(buf, 0, 512 * n);
470 } else if (cluster_offset & QCOW_OFLAG_COMPRESSED) {
471 if (decompress_cluster(s, cluster_offset) < 0)
473 memcpy(buf, s->cluster_cache + index_in_cluster * 512, 512 * n);
475 ret = bdrv_pread(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
478 if (s->crypt_method) {
479 encrypt_sectors(s, sector_num, buf, buf, n, 0,
480 &s->aes_decrypt_key);
491 static int qcow_write(BlockDriverState *bs, int64_t sector_num,
492 const uint8_t *buf, int nb_sectors)
494 BDRVQcowState *s = bs->opaque;
495 int ret, index_in_cluster, n;
496 uint64_t cluster_offset;
498 while (nb_sectors > 0) {
499 index_in_cluster = sector_num & (s->cluster_sectors - 1);
500 n = s->cluster_sectors - index_in_cluster;
503 cluster_offset = get_cluster_offset(bs, sector_num << 9, 1, 0,
505 index_in_cluster + n);
508 if (s->crypt_method) {
509 encrypt_sectors(s, sector_num, s->cluster_data, buf, n, 1,
510 &s->aes_encrypt_key);
511 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512,
512 s->cluster_data, n * 512);
514 ret = bdrv_pwrite(s->hd, cluster_offset + index_in_cluster * 512, buf, n * 512);
522 s->cluster_cache_offset = -1; /* disable compressed cache */
526 typedef struct QCowAIOCB {
527 BlockDriverAIOCB common;
534 uint64_t cluster_offset;
535 uint8_t *cluster_data;
537 QEMUIOVector hd_qiov;
538 BlockDriverAIOCB *hd_aiocb;
541 static void qcow_aio_read_cb(void *opaque, int ret)
543 QCowAIOCB *acb = opaque;
544 BlockDriverState *bs = acb->common.bs;
545 BDRVQcowState *s = bs->opaque;
546 int index_in_cluster;
548 acb->hd_aiocb = NULL;
553 /* post process the read buffer */
554 if (!acb->cluster_offset) {
556 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
559 if (s->crypt_method) {
560 encrypt_sectors(s, acb->sector_num, acb->buf, acb->buf,
562 &s->aes_decrypt_key);
566 acb->nb_sectors -= acb->n;
567 acb->sector_num += acb->n;
568 acb->buf += acb->n * 512;
570 if (acb->nb_sectors == 0) {
571 /* request completed */
576 /* prepare next AIO request */
577 acb->cluster_offset = get_cluster_offset(bs, acb->sector_num << 9,
579 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
580 acb->n = s->cluster_sectors - index_in_cluster;
581 if (acb->n > acb->nb_sectors)
582 acb->n = acb->nb_sectors;
584 if (!acb->cluster_offset) {
585 if (bs->backing_hd) {
586 /* read from the base image */
587 acb->hd_iov.iov_base = (void *)acb->buf;
588 acb->hd_iov.iov_len = acb->n * 512;
589 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
590 acb->hd_aiocb = bdrv_aio_readv(bs->backing_hd, acb->sector_num,
591 &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb);
592 if (acb->hd_aiocb == NULL)
595 /* Note: in this case, no need to wait */
596 memset(acb->buf, 0, 512 * acb->n);
599 } else if (acb->cluster_offset & QCOW_OFLAG_COMPRESSED) {
600 /* add AIO support for compressed blocks ? */
601 if (decompress_cluster(s, acb->cluster_offset) < 0)
604 s->cluster_cache + index_in_cluster * 512, 512 * acb->n);
607 if ((acb->cluster_offset & 511) != 0) {
611 acb->hd_iov.iov_base = (void *)acb->buf;
612 acb->hd_iov.iov_len = acb->n * 512;
613 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
614 acb->hd_aiocb = bdrv_aio_readv(s->hd,
615 (acb->cluster_offset >> 9) + index_in_cluster,
616 &acb->hd_qiov, acb->n, qcow_aio_read_cb, acb);
617 if (acb->hd_aiocb == NULL)
624 if (acb->qiov->niov > 1) {
625 qemu_iovec_from_buffer(acb->qiov, acb->orig_buf, acb->qiov->size);
626 qemu_vfree(acb->orig_buf);
628 acb->common.cb(acb->common.opaque, ret);
629 qemu_aio_release(acb);
632 static BlockDriverAIOCB *qcow_aio_readv(BlockDriverState *bs,
633 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
634 BlockDriverCompletionFunc *cb, void *opaque)
638 acb = qemu_aio_get(bs, cb, opaque);
641 acb->hd_aiocb = NULL;
642 acb->sector_num = sector_num;
645 acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size);
647 acb->buf = (uint8_t *)qiov->iov->iov_base;
648 acb->nb_sectors = nb_sectors;
650 acb->cluster_offset = 0;
652 qcow_aio_read_cb(acb, 0);
656 static void qcow_aio_write_cb(void *opaque, int ret)
658 QCowAIOCB *acb = opaque;
659 BlockDriverState *bs = acb->common.bs;
660 BDRVQcowState *s = bs->opaque;
661 int index_in_cluster;
662 uint64_t cluster_offset;
663 const uint8_t *src_buf;
665 acb->hd_aiocb = NULL;
670 acb->nb_sectors -= acb->n;
671 acb->sector_num += acb->n;
672 acb->buf += acb->n * 512;
674 if (acb->nb_sectors == 0) {
675 /* request completed */
680 index_in_cluster = acb->sector_num & (s->cluster_sectors - 1);
681 acb->n = s->cluster_sectors - index_in_cluster;
682 if (acb->n > acb->nb_sectors)
683 acb->n = acb->nb_sectors;
684 cluster_offset = get_cluster_offset(bs, acb->sector_num << 9, 1, 0,
686 index_in_cluster + acb->n);
687 if (!cluster_offset || (cluster_offset & 511) != 0) {
691 if (s->crypt_method) {
692 if (!acb->cluster_data) {
693 acb->cluster_data = qemu_mallocz(s->cluster_size);
694 if (!acb->cluster_data) {
699 encrypt_sectors(s, acb->sector_num, acb->cluster_data, acb->buf,
700 acb->n, 1, &s->aes_encrypt_key);
701 src_buf = acb->cluster_data;
706 acb->hd_iov.iov_base = (void *)src_buf;
707 acb->hd_iov.iov_len = acb->n * 512;
708 qemu_iovec_init_external(&acb->hd_qiov, &acb->hd_iov, 1);
709 acb->hd_aiocb = bdrv_aio_writev(s->hd,
710 (cluster_offset >> 9) + index_in_cluster,
711 &acb->hd_qiov, acb->n,
712 qcow_aio_write_cb, acb);
713 if (acb->hd_aiocb == NULL)
718 if (acb->qiov->niov > 1)
719 qemu_vfree(acb->orig_buf);
720 acb->common.cb(acb->common.opaque, ret);
721 qemu_aio_release(acb);
724 static BlockDriverAIOCB *qcow_aio_writev(BlockDriverState *bs,
725 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
726 BlockDriverCompletionFunc *cb, void *opaque)
728 BDRVQcowState *s = bs->opaque;
731 s->cluster_cache_offset = -1; /* disable compressed cache */
733 acb = qemu_aio_get(bs, cb, opaque);
736 acb->hd_aiocb = NULL;
737 acb->sector_num = sector_num;
739 if (qiov->niov > 1) {
740 acb->buf = acb->orig_buf = qemu_blockalign(bs, qiov->size);
741 qemu_iovec_to_buffer(qiov, acb->buf);
743 acb->buf = (uint8_t *)qiov->iov->iov_base;
745 acb->nb_sectors = nb_sectors;
748 qcow_aio_write_cb(acb, 0);
752 static void qcow_aio_cancel(BlockDriverAIOCB *blockacb)
754 QCowAIOCB *acb = (QCowAIOCB *)blockacb;
756 bdrv_aio_cancel(acb->hd_aiocb);
757 qemu_aio_release(acb);
760 static void qcow_close(BlockDriverState *bs)
762 BDRVQcowState *s = bs->opaque;
763 qemu_free(s->l1_table);
764 qemu_free(s->l2_cache);
765 qemu_free(s->cluster_cache);
766 qemu_free(s->cluster_data);
770 static int qcow_create(const char *filename, QEMUOptionParameter *options)
772 int fd, header_size, backing_filename_len, l1_size, i, shift;
775 int64_t total_size = 0;
776 const char *backing_file = NULL;
779 /* Read out options */
780 while (options && options->name) {
781 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
782 total_size = options->value.n / 512;
783 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
784 backing_file = options->value.s;
785 } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) {
786 flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0;
791 fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644);
794 memset(&header, 0, sizeof(header));
795 header.magic = cpu_to_be32(QCOW_MAGIC);
796 header.version = cpu_to_be32(QCOW_VERSION);
797 header.size = cpu_to_be64(total_size * 512);
798 header_size = sizeof(header);
799 backing_filename_len = 0;
801 if (strcmp(backing_file, "fat:")) {
802 header.backing_file_offset = cpu_to_be64(header_size);
803 backing_filename_len = strlen(backing_file);
804 header.backing_file_size = cpu_to_be32(backing_filename_len);
805 header_size += backing_filename_len;
807 /* special backing file for vvfat */
810 header.cluster_bits = 9; /* 512 byte cluster to avoid copying
811 unmodifyed sectors */
812 header.l2_bits = 12; /* 32 KB L2 tables */
814 header.cluster_bits = 12; /* 4 KB clusters */
815 header.l2_bits = 9; /* 4 KB L2 tables */
817 header_size = (header_size + 7) & ~7;
818 shift = header.cluster_bits + header.l2_bits;
819 l1_size = ((total_size * 512) + (1LL << shift) - 1) >> shift;
821 header.l1_table_offset = cpu_to_be64(header_size);
822 if (flags & BLOCK_FLAG_ENCRYPT) {
823 header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES);
825 header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
828 /* write all the data */
829 write(fd, &header, sizeof(header));
831 write(fd, backing_file, backing_filename_len);
833 lseek(fd, header_size, SEEK_SET);
835 for(i = 0;i < l1_size; i++) {
836 write(fd, &tmp, sizeof(tmp));
842 static int qcow_make_empty(BlockDriverState *bs)
844 BDRVQcowState *s = bs->opaque;
845 uint32_t l1_length = s->l1_size * sizeof(uint64_t);
848 memset(s->l1_table, 0, l1_length);
849 if (bdrv_pwrite(s->hd, s->l1_table_offset, s->l1_table, l1_length) < 0)
851 ret = bdrv_truncate(s->hd, s->l1_table_offset + l1_length);
855 memset(s->l2_cache, 0, s->l2_size * L2_CACHE_SIZE * sizeof(uint64_t));
856 memset(s->l2_cache_offsets, 0, L2_CACHE_SIZE * sizeof(uint64_t));
857 memset(s->l2_cache_counts, 0, L2_CACHE_SIZE * sizeof(uint32_t));
862 /* XXX: put compressed sectors first, then all the cluster aligned
863 tables to avoid losing bytes in alignment */
864 static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
865 const uint8_t *buf, int nb_sectors)
867 BDRVQcowState *s = bs->opaque;
871 uint64_t cluster_offset;
873 if (nb_sectors != s->cluster_sectors)
876 out_buf = qemu_malloc(s->cluster_size + (s->cluster_size / 1000) + 128);
880 /* best compression, small window, no zlib header */
881 memset(&strm, 0, sizeof(strm));
882 ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION,
884 9, Z_DEFAULT_STRATEGY);
890 strm.avail_in = s->cluster_size;
891 strm.next_in = (uint8_t *)buf;
892 strm.avail_out = s->cluster_size;
893 strm.next_out = out_buf;
895 ret = deflate(&strm, Z_FINISH);
896 if (ret != Z_STREAM_END && ret != Z_OK) {
901 out_len = strm.next_out - out_buf;
905 if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
906 /* could not compress: write normal cluster */
907 qcow_write(bs, sector_num, buf, s->cluster_sectors);
909 cluster_offset = get_cluster_offset(bs, sector_num << 9, 2,
911 cluster_offset &= s->cluster_offset_mask;
912 if (bdrv_pwrite(s->hd, cluster_offset, out_buf, out_len) != out_len) {
922 static void qcow_flush(BlockDriverState *bs)
924 BDRVQcowState *s = bs->opaque;
928 static int qcow_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
930 BDRVQcowState *s = bs->opaque;
931 bdi->cluster_size = s->cluster_size;
936 static QEMUOptionParameter qcow_create_options[] = {
937 { BLOCK_OPT_SIZE, OPT_SIZE },
938 { BLOCK_OPT_BACKING_FILE, OPT_STRING },
939 { BLOCK_OPT_ENCRYPT, OPT_FLAG },
943 static BlockDriver bdrv_qcow = {
944 .format_name = "qcow",
945 .instance_size = sizeof(BDRVQcowState),
946 .bdrv_probe = qcow_probe,
947 .bdrv_open = qcow_open,
948 .bdrv_close = qcow_close,
949 .bdrv_create = qcow_create,
950 .bdrv_flush = qcow_flush,
951 .bdrv_is_allocated = qcow_is_allocated,
952 .bdrv_set_key = qcow_set_key,
953 .bdrv_make_empty = qcow_make_empty,
954 .bdrv_aio_readv = qcow_aio_readv,
955 .bdrv_aio_writev = qcow_aio_writev,
956 .bdrv_aio_cancel = qcow_aio_cancel,
957 .aiocb_size = sizeof(QCowAIOCB),
958 .bdrv_write_compressed = qcow_write_compressed,
959 .bdrv_get_info = qcow_get_info,
961 .create_options = qcow_create_options,
964 static void bdrv_qcow_init(void)
966 bdrv_register(&bdrv_qcow);
969 block_init(bdrv_qcow_init);