From 7f111df0280b73ecaf3370e35a7e2dd7c92ba596 Mon Sep 17 00:00:00 2001 From: kientzle Date: Sun, 12 Nov 2006 23:45:40 +0000 Subject: [PATCH] Correctly handle writing very large blocks (>1M) through to a disk file. This doesn't happen in normal use, because the file I/O and decompression layers only pass through smaller blocks. It can happen with custom read functions that block I/O in larger blocks. --- lib/libarchive/archive_read_data_into_fd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/libarchive/archive_read_data_into_fd.c b/lib/libarchive/archive_read_data_into_fd.c index 579174eb89c9..509094ba7979 100644 --- a/lib/libarchive/archive_read_data_into_fd.c +++ b/lib/libarchive/archive_read_data_into_fd.c @@ -63,6 +63,7 @@ archive_read_data_into_fd(struct archive *a, int fd) while ((r = archive_read_data_block(a, &buff, &size, &offset)) == ARCHIVE_OK) { + const char *p = buff; if (offset > output_offset) { lseek(fd, offset - output_offset, SEEK_CUR); output_offset = offset; @@ -71,13 +72,14 @@ archive_read_data_into_fd(struct archive *a, int fd) bytes_to_write = size; if (bytes_to_write > MAX_WRITE) bytes_to_write = MAX_WRITE; - bytes_written = write(fd, buff, bytes_to_write); + bytes_written = write(fd, p, bytes_to_write); if (bytes_written < 0) { archive_set_error(a, errno, "Write error"); return (-1); } output_offset += bytes_written; total_written += bytes_written; + p += bytes_written; size -= bytes_written; if (a->extract_progress != NULL) (*a->extract_progress)(a->extract_progress_user_data);