Update vendor/libarchive to git 299c6bf136b9bc328b498505f24f87e732b73ff6

Vendor issues fixed:
Issue #731: Reject tar entries >= INT64_MAX
Issue #744 (part of Issue #743): Enforce sandbox with very long pathnames
Issue #748: Zip decompression failure with highly-compressed data
Issue #767: Buffer overflow printing a filename
Issue #770: Be more careful about extra_length
This commit is contained in:
Martin Matuska 2016-08-26 22:02:37 +00:00
parent 5b0ba62993
commit 5c8a8484e3
5 changed files with 52 additions and 18 deletions

View File

@ -1128,8 +1128,15 @@ header_common(struct archive_read *a, struct tar *tar,
if (tar->entry_bytes_remaining < 0) {
tar->entry_bytes_remaining = 0;
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Tar entry has negative size?");
err = ARCHIVE_WARN;
"Tar entry has negative size");
return (ARCHIVE_FATAL);
}
if (tar->entry_bytes_remaining == INT64_MAX) {
/* Note: tar_atol returns INT64_MAX on overflow */
tar->entry_bytes_remaining = 0;
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Tar entry size overflow");
return (ARCHIVE_FATAL);
}
tar->realsize = tar->entry_bytes_remaining;
archive_entry_set_size(entry, tar->entry_bytes_remaining);

View File

@ -418,18 +418,30 @@ zip_time(const char *p)
* id1+size1+data1 + id2+size2+data2 ...
* triplets. id and size are 2 bytes each.
*/
static void
process_extra(const char *p, size_t extra_length, struct zip_entry* zip_entry)
static int
process_extra(struct archive_read *a, const char *p, size_t extra_length, struct zip_entry* zip_entry)
{
unsigned offset = 0;
while (offset < extra_length - 4) {
if (extra_length == 0) {
return ARCHIVE_OK;
}
if (extra_length < 4) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Too-small extra data: Need at least 4 bytes, but only found %d bytes", (int)extra_length);
return ARCHIVE_FAILED;
}
while (offset <= extra_length - 4) {
unsigned short headerid = archive_le16dec(p + offset);
unsigned short datasize = archive_le16dec(p + offset + 2);
offset += 4;
if (offset + datasize > extra_length) {
break;
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Extra data overflow: Need %d bytes but only found %d bytes",
(int)datasize, (int)(extra_length - offset));
return ARCHIVE_FAILED;
}
#ifdef DEBUG
fprintf(stderr, "Header id 0x%04x, length %d\n",
@ -715,13 +727,13 @@ process_extra(const char *p, size_t extra_length, struct zip_entry* zip_entry)
}
offset += datasize;
}
#ifdef DEBUG
if (offset != extra_length)
{
fprintf(stderr,
"Extra data field contents do not match reported size!\n");
if (offset != extra_length) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Malformed extra data: Consumed %d bytes of %d bytes",
(int)offset, (int)extra_length);
return ARCHIVE_FAILED;
}
#endif
return ARCHIVE_OK;
}
/*
@ -840,7 +852,9 @@ zip_read_local_file_header(struct archive_read *a, struct archive_entry *entry,
return (ARCHIVE_FATAL);
}
process_extra(h, extra_length, zip_entry);
if (ARCHIVE_OK != process_extra(a, h, extra_length, zip_entry)) {
return ARCHIVE_FATAL;
}
__archive_read_consume(a, extra_length);
/* Work around a bug in Info-Zip: When reading from a pipe, it
@ -1293,7 +1307,7 @@ zip_read_data_deflate(struct archive_read *a, const void **buff,
&& bytes_avail > zip->entry_bytes_remaining) {
bytes_avail = (ssize_t)zip->entry_bytes_remaining;
}
if (bytes_avail <= 0) {
if (bytes_avail < 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated ZIP file body");
return (ARCHIVE_FATAL);
@ -2691,7 +2705,9 @@ slurp_central_directory(struct archive_read *a, struct zip *zip)
"Truncated ZIP file header");
return ARCHIVE_FATAL;
}
process_extra(p + filename_length, extra_length, zip_entry);
if (ARCHIVE_OK != process_extra(a, p + filename_length, extra_length, zip_entry)) {
return ARCHIVE_FATAL;
}
/*
* Mac resource fork files are stored under the

View File

@ -138,6 +138,7 @@ set_acl(struct archive *a, int fd, const char *name,
acl_permset_t acl_permset;
#ifdef ACL_TYPE_NFS4
acl_flagset_t acl_flagset;
int r;
#endif
int ret;
int ae_type, ae_permset, ae_tag, ae_id;
@ -145,7 +146,7 @@ set_acl(struct archive *a, int fd, const char *name,
gid_t ae_gid;
const char *ae_name;
int entries;
int i, r;
int i;
ret = ARCHIVE_OK;
entries = archive_acl_reset(abstract_acl, ae_requested_type);

View File

@ -2401,8 +2401,18 @@ check_symlinks(struct archive_write_disk *a)
r = lstat(a->name, &st);
if (r != 0) {
/* We've hit a dir that doesn't exist; stop now. */
if (errno == ENOENT)
if (errno == ENOENT) {
break;
} else {
/* Note: This effectively disables deep directory
* support when security checks are enabled.
* Otherwise, very long pathnames that trigger
* an error here could evade the sandbox.
* TODO: We could do better, but it would probably
* require merging the symlink checks with the
* deep-directory editing. */
return (ARCHIVE_FAILED);
}
} else if (S_ISLNK(st.st_mode)) {
if (c == '\0') {
/*

View File

@ -182,7 +182,7 @@ safe_fprintf(FILE *f, const char *fmt, ...)
}
/* If our output buffer is full, dump it and keep going. */
if (i > (sizeof(outbuff) - 20)) {
if (i > (sizeof(outbuff) - 128)) {
outbuff[i] = '\0';
fprintf(f, "%s", outbuff);
i = 0;