summaryrefslogtreecommitdiffstatshomepage
path: root/src/lib/util/chd.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/util/chd.cpp')
-rw-r--r--src/lib/util/chd.cpp1351
1 files changed, 759 insertions, 592 deletions
diff --git a/src/lib/util/chd.cpp b/src/lib/util/chd.cpp
index 194884bfd9a..c05977a84eb 100644
--- a/src/lib/util/chd.cpp
+++ b/src/lib/util/chd.cpp
@@ -2,8 +2,6 @@
// copyright-holders:Aaron Giles
/***************************************************************************
- chd.c
-
MAME Compressed Hunks of Data file format
***************************************************************************/
@@ -22,11 +20,13 @@
#include <zlib.h>
+#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <ctime>
#include <new>
+#include <tuple>
//**************************************************************************
@@ -134,7 +134,7 @@ struct chd_file::metadata_hash
// stream in bigendian order
//-------------------------------------------------
-inline util::sha1_t chd_file::be_read_sha1(const uint8_t *base)const
+inline util::sha1_t chd_file::be_read_sha1(const uint8_t *base) const noexcept
{
util::sha1_t result;
memcpy(&result.m_raw[0], base, sizeof(result.m_raw));
@@ -147,7 +147,7 @@ inline util::sha1_t chd_file::be_read_sha1(const uint8_t *base)const
// stream in bigendian order
//-------------------------------------------------
-inline void chd_file::be_write_sha1(uint8_t *base, util::sha1_t value)
+inline void chd_file::be_write_sha1(uint8_t *base, util::sha1_t value) noexcept
{
memcpy(base, &value.m_raw[0], sizeof(value.m_raw));
}
@@ -155,45 +155,45 @@ inline void chd_file::be_write_sha1(uint8_t *base, util::sha1_t value)
//-------------------------------------------------
// file_read - read from the file at the given
-// offset; on failure throw an error
+// offset.
//-------------------------------------------------
-inline void chd_file::file_read(uint64_t offset, void *dest, uint32_t length) const
+inline std::error_condition chd_file::file_read(uint64_t offset, void *dest, uint32_t length) const noexcept
{
// no file = failure
- if (!m_file)
- throw std::error_condition(error::NOT_OPEN);
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
// seek and read
- m_file->seek(offset, SEEK_SET);
+ std::error_condition err;
+ err = m_file->seek(offset, SEEK_SET);
+ if (UNEXPECTED(err))
+ return err;
size_t count;
- std::error_condition err = m_file->read(dest, length, count);
- if (err)
- throw err;
- else if (count != length)
- throw std::error_condition(std::errc::io_error); // TODO: revisit this error code (happens if file is cut off)
+ std::tie(err, count) = read(*m_file, dest, length);
+ if (UNEXPECTED(!err && (count != length)))
+ return std::error_condition(std::errc::io_error); // TODO: revisit this error code (happens if file is truncated)
+ return err;
}
//-------------------------------------------------
// file_write - write to the file at the given
-// offset; on failure throw an error
+// offset.
//-------------------------------------------------
-inline void chd_file::file_write(uint64_t offset, const void *source, uint32_t length)
+inline std::error_condition chd_file::file_write(uint64_t offset, const void *source, uint32_t length) noexcept
{
// no file = failure
- if (!m_file)
- throw std::error_condition(error::NOT_OPEN);
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
// seek and write
- m_file->seek(offset, SEEK_SET);
- size_t count;
- std::error_condition err = m_file->write(source, length, count);
- if (err)
- throw err;
- else if (count != length)
- throw std::error_condition(std::errc::interrupted); // can theoretically happen if write is inuterrupted by a signal
+ std::error_condition err;
+ err = m_file->seek(offset, SEEK_SET);
+ if (UNEXPECTED(err))
+ return err;
+ return write(*m_file, source, length).first;
}
@@ -205,21 +205,20 @@ inline void chd_file::file_write(uint64_t offset, const void *source, uint32_t l
inline uint64_t chd_file::file_append(const void *source, uint32_t length, uint32_t alignment)
{
- std::error_condition err;
-
// no file = failure
- if (!m_file)
+ if (UNEXPECTED(!m_file))
throw std::error_condition(error::NOT_OPEN);
// seek to the end and align if necessary
+ std::error_condition err;
err = m_file->seek(0, SEEK_END);
- if (err)
+ if (UNEXPECTED(err))
throw err;
if (alignment != 0)
{
uint64_t offset;
err = m_file->tell(offset);
- if (err)
+ if (UNEXPECTED(err))
throw err;
uint32_t delta = offset % alignment;
if (delta != 0)
@@ -232,8 +231,8 @@ inline uint64_t chd_file::file_append(const void *source, uint32_t length, uint3
{
uint32_t bytes_to_write = std::min<std::size_t>(sizeof(buffer), delta);
size_t count;
- err = m_file->write(buffer, bytes_to_write, count);
- if (err)
+ std::tie(err, count) = write(*m_file, buffer, bytes_to_write);
+ if (UNEXPECTED(err))
throw err;
delta -= count;
}
@@ -243,14 +242,11 @@ inline uint64_t chd_file::file_append(const void *source, uint32_t length, uint3
// write the real data
uint64_t offset;
err = m_file->tell(offset);
- if (err)
+ if (UNEXPECTED(err))
throw err;
- size_t count;
- err = m_file->write(source, length, count);
- if (err)
+ std::tie(err, std::ignore) = write(*m_file, source, length);
+ if (UNEXPECTED(err))
throw err;
- else if (count != length)
- throw std::error_condition(std::errc::interrupted); // can theoretically happen if write is interrupted by a signal
return offset;
}
@@ -260,11 +256,14 @@ inline uint64_t chd_file::file_append(const void *source, uint32_t length, uint3
// necessary to represent all numbers 0..value
//-------------------------------------------------
-inline uint8_t chd_file::bits_for_value(uint64_t value)
+inline uint8_t chd_file::bits_for_value(uint64_t value) noexcept
{
uint8_t result = 0;
while (value != 0)
- value >>= 1, result++;
+ {
+ value >>= 1;
+ result++;
+ }
return result;
}
@@ -338,20 +337,14 @@ bool chd_file::parent_missing() const noexcept
* @return A sha1_t.
*/
-util::sha1_t chd_file::sha1()
+util::sha1_t chd_file::sha1() const noexcept
{
- try
- {
- // read the big-endian version
- uint8_t rawbuf[sizeof(util::sha1_t)];
- file_read(m_sha1_offset, rawbuf, sizeof(rawbuf));
- return be_read_sha1(rawbuf);
- }
- catch (std::error_condition const &)
- {
- // on failure, return nullptr
- return util::sha1_t::null;
- }
+ // read the big-endian version
+ uint8_t rawbuf[sizeof(util::sha1_t)];
+ std::error_condition err = file_read(m_sha1_offset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(err))
+ return util::sha1_t::null; // on failure, return null
+ return be_read_sha1(rawbuf);
}
/**
@@ -367,22 +360,24 @@ util::sha1_t chd_file::sha1()
* @return A sha1_t.
*/
-util::sha1_t chd_file::raw_sha1()
+util::sha1_t chd_file::raw_sha1() const noexcept
{
try
{
// determine offset within the file for data-only
- if (!m_rawsha1_offset)
+ if (UNEXPECTED(!m_rawsha1_offset))
throw std::error_condition(error::UNSUPPORTED_VERSION);
// read the big-endian version
uint8_t rawbuf[sizeof(util::sha1_t)];
- file_read(m_rawsha1_offset, rawbuf, sizeof(rawbuf));
+ std::error_condition err = file_read(m_rawsha1_offset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(err))
+ throw err;
return be_read_sha1(rawbuf);
}
catch (std::error_condition const &)
{
- // on failure, return nullptr
+ // on failure, return null
return util::sha1_t::null;
}
}
@@ -400,17 +395,19 @@ util::sha1_t chd_file::raw_sha1()
* @return A sha1_t.
*/
-util::sha1_t chd_file::parent_sha1()
+util::sha1_t chd_file::parent_sha1() const noexcept
{
try
{
// determine offset within the file
- if (!m_parentsha1_offset)
+ if (UNEXPECTED(!m_parentsha1_offset))
throw std::error_condition(error::UNSUPPORTED_VERSION);
// read the big-endian version
uint8_t rawbuf[sizeof(util::sha1_t)];
- file_read(m_parentsha1_offset, rawbuf, sizeof(rawbuf));
+ std::error_condition err = file_read(m_parentsha1_offset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(err))
+ throw err;
return be_read_sha1(rawbuf);
}
catch (std::error_condition const &)
@@ -441,49 +438,51 @@ std::error_condition chd_file::hunk_info(uint32_t hunknum, chd_codec_type &compr
return std::error_condition(error::HUNK_OUT_OF_RANGE);
// get the map pointer
- uint8_t *rawmap;
switch (m_version)
{
- // v3/v4 map entries
- case 3:
- case 4:
- rawmap = &m_rawmap[16 * hunknum];
+ // v3/v4 map entries
+ case 3:
+ case 4:
+ {
+ uint8_t const *const rawmap = &m_rawmap[16 * hunknum];
switch (rawmap[15] & V34_MAP_ENTRY_FLAG_TYPE_MASK)
{
- case V34_MAP_ENTRY_TYPE_COMPRESSED:
- compressor = CHD_CODEC_ZLIB;
- compbytes = get_u16be(&rawmap[12]) + (rawmap[14] << 16);
- break;
+ case V34_MAP_ENTRY_TYPE_COMPRESSED:
+ compressor = CHD_CODEC_ZLIB;
+ compbytes = get_u16be(&rawmap[12]) + (rawmap[14] << 16);
+ break;
- case V34_MAP_ENTRY_TYPE_UNCOMPRESSED:
- compressor = CHD_CODEC_NONE;
- compbytes = m_hunkbytes;
- break;
+ case V34_MAP_ENTRY_TYPE_UNCOMPRESSED:
+ compressor = CHD_CODEC_NONE;
+ compbytes = m_hunkbytes;
+ break;
- case V34_MAP_ENTRY_TYPE_MINI:
- compressor = CHD_CODEC_MINI;
- compbytes = 0;
- break;
+ case V34_MAP_ENTRY_TYPE_MINI:
+ compressor = CHD_CODEC_MINI;
+ compbytes = 0;
+ break;
- case V34_MAP_ENTRY_TYPE_SELF_HUNK:
- compressor = CHD_CODEC_SELF;
- compbytes = 0;
- break;
+ case V34_MAP_ENTRY_TYPE_SELF_HUNK:
+ compressor = CHD_CODEC_SELF;
+ compbytes = 0;
+ break;
- case V34_MAP_ENTRY_TYPE_PARENT_HUNK:
- compressor = CHD_CODEC_PARENT;
- compbytes = 0;
- break;
+ case V34_MAP_ENTRY_TYPE_PARENT_HUNK:
+ compressor = CHD_CODEC_PARENT;
+ compbytes = 0;
+ break;
}
- break;
+ }
+ break;
- // v5 map entries
- case 5:
- rawmap = &m_rawmap[m_mapentrybytes * hunknum];
+ // v5 map entries
+ case 5:
+ {
+ uint8_t const *const rawmap = &m_rawmap[m_mapentrybytes * hunknum];
- // uncompressed case
if (!compressed())
{
+ // uncompressed case
if (get_u32be(&rawmap[0]) == 0)
{
compressor = CHD_CODEC_PARENT;
@@ -494,12 +493,12 @@ std::error_condition chd_file::hunk_info(uint32_t hunknum, chd_codec_type &compr
compressor = CHD_CODEC_NONE;
compbytes = m_hunkbytes;
}
- break;
}
-
- // compressed case
- switch (rawmap[0])
+ else
{
+ // compressed case
+ switch (rawmap[0])
+ {
case COMPRESSION_TYPE_0:
case COMPRESSION_TYPE_1:
case COMPRESSION_TYPE_2:
@@ -525,9 +524,12 @@ std::error_condition chd_file::hunk_info(uint32_t hunknum, chd_codec_type &compr
default:
return error::UNKNOWN_COMPRESSION;
+ }
}
- break;
+ }
+ break;
}
+
return std::error_condition();
}
@@ -541,20 +543,36 @@ std::error_condition chd_file::hunk_info(uint32_t hunknum, chd_codec_type &compr
* @param rawdata The rawdata.
*/
-void chd_file::set_raw_sha1(util::sha1_t rawdata)
+std::error_condition chd_file::set_raw_sha1(util::sha1_t rawdata) noexcept
{
+ uint64_t const offset = (m_rawsha1_offset != 0) ? m_rawsha1_offset : m_sha1_offset;
+ assert(offset != 0);
+
// create a big-endian version
uint8_t rawbuf[sizeof(util::sha1_t)];
be_write_sha1(rawbuf, rawdata);
// write to the header
- uint64_t offset = (m_rawsha1_offset != 0) ? m_rawsha1_offset : m_sha1_offset;
- assert(offset != 0);
- file_write(offset, rawbuf, sizeof(rawbuf));
+ std::error_condition err = file_write(offset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(err))
+ return err;
- // if we have a separate rawsha1_offset, update the full sha1 as well
- if (m_rawsha1_offset != 0)
- metadata_update_hash();
+ try
+ {
+ // if we have a separate rawsha1_offset, update the full sha1 as well
+ if (m_rawsha1_offset != 0)
+ metadata_update_hash();
+ }
+ catch (std::error_condition const &err)
+ {
+ return err;
+ }
+ catch (std::bad_alloc const &)
+ {
+ return std::errc::not_enough_memory;
+ }
+
+ return std::error_condition();
}
/**
@@ -569,23 +587,24 @@ void chd_file::set_raw_sha1(util::sha1_t rawdata)
* @param parent The parent.
*/
-void chd_file::set_parent_sha1(util::sha1_t parent)
+std::error_condition chd_file::set_parent_sha1(util::sha1_t parent) noexcept
{
// if no file, fail
- if (!m_file)
- throw std::error_condition(error::INVALID_FILE);
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::INVALID_FILE);
+
+ assert(m_parentsha1_offset != 0);
// create a big-endian version
uint8_t rawbuf[sizeof(util::sha1_t)];
be_write_sha1(rawbuf, parent);
// write to the header
- assert(m_parentsha1_offset != 0);
- file_write(m_parentsha1_offset, rawbuf, sizeof(rawbuf));
+ return file_write(m_parentsha1_offset, rawbuf, sizeof(rawbuf));
}
/**
- * @fn std::error_condition chd_file::create(util::random_read_write::ptr &&file, uint64_t logicalbytes, uint32_t hunkbytes, uint32_t unitbytes, chd_codec_type compression[4])
+ * @fn std::error_condition chd_file::create(util::random_read_write::ptr &&file, uint64_t logicalbytes, uint32_t hunkbytes, uint32_t unitbytes, const chd_codec_type (&compression)[4])
*
* @brief -------------------------------------------------
* create - create a new file with no parent using an existing opened file handle
@@ -605,12 +624,12 @@ std::error_condition chd_file::create(
uint64_t logicalbytes,
uint32_t hunkbytes,
uint32_t unitbytes,
- chd_codec_type compression[4])
+ const chd_codec_type (&compression)[4])
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
- else if (!file)
+ else if (UNEXPECTED(!file))
return std::errc::invalid_argument;
// set the header parameters
@@ -626,7 +645,7 @@ std::error_condition chd_file::create(
}
/**
- * @fn std::error_condition chd_file::create(util::random_read_write::ptr &&file, uint64_t logicalbytes, uint32_t hunkbytes, chd_codec_type compression[4], chd_file &parent)
+ * @fn std::error_condition chd_file::create(util::random_read_write::ptr &&file, uint64_t logicalbytes, uint32_t hunkbytes, const chd_codec_type (&compression)[4], chd_file &parent)
*
* @brief -------------------------------------------------
* create - create a new file with a parent using an existing opened file handle
@@ -645,13 +664,13 @@ std::error_condition chd_file::create(
util::random_read_write::ptr &&file,
uint64_t logicalbytes,
uint32_t hunkbytes,
- chd_codec_type compression[4],
+ const chd_codec_type (&compression)[4],
chd_file &parent)
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
- else if (!file)
+ else if (UNEXPECTED(!file))
return std::errc::invalid_argument;
// set the header parameters
@@ -667,7 +686,7 @@ std::error_condition chd_file::create(
}
/**
- * @fn std::error_condition chd_file::create(std::string_view filename, uint64_t logicalbytes, uint32_t hunkbytes, uint32_t unitbytes, chd_codec_type compression[4])
+ * @fn std::error_condition chd_file::create(std::string_view filename, uint64_t logicalbytes, uint32_t hunkbytes, uint32_t unitbytes, const chd_codec_type (&compression)[4])
*
* @brief -------------------------------------------------
* create - create a new file with no parent using a filename
@@ -687,23 +706,23 @@ std::error_condition chd_file::create(
uint64_t logicalbytes,
uint32_t hunkbytes,
uint32_t unitbytes,
- chd_codec_type compression[4])
+ const chd_codec_type (&compression)[4])
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
// create the new file
util::core_file::ptr file;
std::error_condition filerr = util::core_file::open(filename, OPEN_FLAG_READ | OPEN_FLAG_WRITE | OPEN_FLAG_CREATE, file);
- if (filerr)
+ if (UNEXPECTED(filerr))
return filerr;
// create the file normally, then claim the file
std::error_condition chderr = create(std::move(file), logicalbytes, hunkbytes, unitbytes, compression);
// if an error happened, close and delete the file
- if (chderr)
+ if (UNEXPECTED(chderr))
{
file.reset();
osd_file::remove(std::string(filename)); // FIXME: allow osd_file to use std::string_view
@@ -712,7 +731,7 @@ std::error_condition chd_file::create(
}
/**
- * @fn std::error_condition chd_file::create(std::string_view filename, uint64_t logicalbytes, uint32_t hunkbytes, chd_codec_type compression[4], chd_file &parent)
+ * @fn std::error_condition chd_file::create(std::string_view filename, uint64_t logicalbytes, uint32_t hunkbytes, const chd_codec_type (&compression)[4], chd_file &parent)
*
* @brief -------------------------------------------------
* create - create a new file with a parent using a filename
@@ -731,24 +750,24 @@ std::error_condition chd_file::create(
std::string_view filename,
uint64_t logicalbytes,
uint32_t hunkbytes,
- chd_codec_type compression[4],
+ const chd_codec_type (&compression)[4],
chd_file &parent)
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
// create the new file
util::core_file::ptr file;
std::error_condition filerr = util::core_file::open(filename, OPEN_FLAG_READ | OPEN_FLAG_WRITE | OPEN_FLAG_CREATE, file);
- if (filerr)
+ if (UNEXPECTED(filerr))
return filerr;
// create the file normally, then claim the file
std::error_condition chderr = create(std::move(file), logicalbytes, hunkbytes, compression, parent);
// if an error happened, close and delete the file
- if (chderr)
+ if (UNEXPECTED(chderr))
{
file.reset();
osd_file::remove(std::string(filename)); // FIXME: allow osd_file to use std::string_view
@@ -777,14 +796,14 @@ std::error_condition chd_file::open(
const open_parent_func &open_parent)
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
// open the file
const uint32_t openflags = writeable ? (OPEN_FLAG_READ | OPEN_FLAG_WRITE) : OPEN_FLAG_READ;
util::core_file::ptr file;
std::error_condition filerr = util::core_file::open(filename, openflags, file);
- if (filerr)
+ if (UNEXPECTED(filerr))
return filerr;
// now open the CHD
@@ -812,9 +831,9 @@ std::error_condition chd_file::open(
const open_parent_func &open_parent)
{
// make sure we don't already have a file open
- if (m_file)
+ if (UNEXPECTED(m_file))
return error::ALREADY_OPEN;
- else if (!file)
+ else if (UNEXPECTED(!file))
return std::errc::invalid_argument;
// open the file
@@ -848,7 +867,7 @@ void chd_file::close()
m_hunkcount = 0;
m_unitbytes = 0;
m_unitcount = 0;
- memset(m_compression, 0, sizeof(m_compression));
+ std::fill(std::begin(m_compression), std::end(m_compression), 0);
m_parent.reset();
m_parent_missing = false;
@@ -873,6 +892,105 @@ void chd_file::close()
m_cachehunk = ~0;
}
+std::error_condition chd_file::codec_process_hunk(uint32_t hunknum)
+{
+ // punt if no file
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
+
+ // return an error if out of range
+ if (UNEXPECTED(hunknum >= m_hunkcount))
+ return std::error_condition(error::HUNK_OUT_OF_RANGE);
+
+ // wrap this for clean reporting
+ try
+ {
+ // get a pointer to the map entry
+ switch (m_version)
+ {
+ // v3/v4 map entries
+ case 3:
+ case 4:
+ {
+ uint8_t const *const rawmap = &m_rawmap[16 * hunknum];
+ uint64_t const blockoffs = get_u64be(&rawmap[0]);
+ switch (rawmap[15] & V34_MAP_ENTRY_FLAG_TYPE_MASK)
+ {
+ case V34_MAP_ENTRY_TYPE_COMPRESSED:
+ {
+ uint32_t const blocklen = get_u16be(&rawmap[12]) | (uint32_t(rawmap[14]) << 16);
+ std::error_condition err = file_read(blockoffs, &m_compressed[0], blocklen);
+ if (UNEXPECTED(err))
+ return err;
+ m_decompressor[0]->process(&m_compressed[0], blocklen);
+ return std::error_condition();
+ }
+
+ case V34_MAP_ENTRY_TYPE_UNCOMPRESSED:
+ case V34_MAP_ENTRY_TYPE_MINI:
+ return std::error_condition(error::UNSUPPORTED_FORMAT);
+
+ case V34_MAP_ENTRY_TYPE_SELF_HUNK:
+ return codec_process_hunk(blockoffs);
+
+ case V34_MAP_ENTRY_TYPE_PARENT_HUNK:
+ if (UNEXPECTED(m_parent_missing))
+ return std::error_condition(error::REQUIRES_PARENT);
+ return m_parent->codec_process_hunk(blockoffs);
+ }
+ }
+ break;
+
+ // v5 map entries
+ case 5:
+ {
+ if (UNEXPECTED(!compressed()))
+ return std::error_condition(error::UNSUPPORTED_FORMAT);
+
+ // compressed case
+ uint8_t const *const rawmap = &m_rawmap[m_mapentrybytes * hunknum];
+ uint32_t const blocklen = get_u24be(&rawmap[1]);
+ uint64_t const blockoffs = get_u48be(&rawmap[4]);
+ switch (rawmap[0])
+ {
+ case COMPRESSION_TYPE_0:
+ case COMPRESSION_TYPE_1:
+ case COMPRESSION_TYPE_2:
+ case COMPRESSION_TYPE_3:
+ {
+ std::error_condition err = file_read(blockoffs, &m_compressed[0], blocklen);
+ if (UNEXPECTED(err))
+ return err;
+ auto &decompressor = *m_decompressor[rawmap[0]];
+ decompressor.process(&m_compressed[0], blocklen);
+ return std::error_condition();
+ }
+
+ case COMPRESSION_NONE:
+ return std::error_condition(error::UNSUPPORTED_FORMAT);
+
+ case COMPRESSION_SELF:
+ return codec_process_hunk(blockoffs);
+
+ case COMPRESSION_PARENT:
+ if (UNEXPECTED(m_parent_missing))
+ return std::error_condition(error::REQUIRES_PARENT);
+ return m_parent->codec_process_hunk(blockoffs / (m_parent->hunk_bytes() / m_parent->unit_bytes()));
+ }
+ break;
+ }
+ }
+
+ // if we get here, the map contained an unsupported block type
+ return std::error_condition(error::INVALID_DATA);
+ }
+ catch (std::error_condition const &err)
+ {
+ // just return errors
+ return err;
+ }
+}
+
/**
* @fn std::error_condition chd_file::read_hunk(uint32_t hunknum, void *buffer)
*
@@ -893,126 +1011,148 @@ void chd_file::close()
* @param hunknum The hunknum.
* @param [in,out] buffer If non-null, the buffer.
*
- * @return The hunk.
+ * @return An error condition.
*/
std::error_condition chd_file::read_hunk(uint32_t hunknum, void *buffer)
{
+ // punt if no file
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
+
+ // return an error if out of range
+ if (UNEXPECTED(hunknum >= m_hunkcount))
+ return std::error_condition(error::HUNK_OUT_OF_RANGE);
+
+ auto *const dest = reinterpret_cast<uint8_t *>(buffer);
+
// wrap this for clean reporting
try
{
- // punt if no file
- if (!m_file)
- throw std::error_condition(error::NOT_OPEN);
-
- // return an error if out of range
- if (hunknum >= m_hunkcount)
- throw std::error_condition(error::HUNK_OUT_OF_RANGE);
-
// get a pointer to the map entry
- uint64_t blockoffs;
- uint32_t blocklen;
- util::crc32_t blockcrc;
- uint8_t *rawmap;
- auto *dest = reinterpret_cast<uint8_t *>(buffer);
switch (m_version)
{
- // v3/v4 map entries
- case 3:
- case 4:
- rawmap = &m_rawmap[16 * hunknum];
- blockoffs = get_u64be(&rawmap[0]);
- blockcrc = get_u32be(&rawmap[8]);
+ // v3/v4 map entries
+ case 3:
+ case 4:
+ {
+ uint8_t const *const rawmap = &m_rawmap[16 * hunknum];
+ uint64_t const blockoffs = get_u64be(&rawmap[0]);
+ util::crc32_t const blockcrc = get_u32be(&rawmap[8]);
+ bool const nocrc = rawmap[15] & V34_MAP_ENTRY_FLAG_NO_CRC;
switch (rawmap[15] & V34_MAP_ENTRY_FLAG_TYPE_MASK)
{
- case V34_MAP_ENTRY_TYPE_COMPRESSED:
- blocklen = get_u16be(&rawmap[12]) + (rawmap[14] << 16);
- file_read(blockoffs, &m_compressed[0], blocklen);
+ case V34_MAP_ENTRY_TYPE_COMPRESSED:
+ {
+ uint32_t const blocklen = get_u16be(&rawmap[12]) | (uint32_t(rawmap[14]) << 16);
+ std::error_condition err = file_read(blockoffs, &m_compressed[0], blocklen);
+ if (UNEXPECTED(err))
+ return err;
m_decompressor[0]->decompress(&m_compressed[0], blocklen, dest, m_hunkbytes);
- if (!(rawmap[15] & V34_MAP_ENTRY_FLAG_NO_CRC) && dest != nullptr && util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
+ if (UNEXPECTED(!nocrc && (util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)))
+ return std::error_condition(error::DECOMPRESSION_ERROR);
return std::error_condition();
+ }
- case V34_MAP_ENTRY_TYPE_UNCOMPRESSED:
- file_read(blockoffs, dest, m_hunkbytes);
- if (!(rawmap[15] & V34_MAP_ENTRY_FLAG_NO_CRC) && util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
+ case V34_MAP_ENTRY_TYPE_UNCOMPRESSED:
+ {
+ std::error_condition err = file_read(blockoffs, dest, m_hunkbytes);
+ if (UNEXPECTED(err))
+ return err;
+ if (UNEXPECTED(!nocrc && (util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)))
+ return std::error_condition(error::DECOMPRESSION_ERROR);
return std::error_condition();
+ }
- case V34_MAP_ENTRY_TYPE_MINI:
- put_u64be(dest, blockoffs);
- for (uint32_t bytes = 8; bytes < m_hunkbytes; bytes++)
- dest[bytes] = dest[bytes - 8];
- if (!(rawmap[15] & V34_MAP_ENTRY_FLAG_NO_CRC) && util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
- return std::error_condition();
+ case V34_MAP_ENTRY_TYPE_MINI:
+ put_u64be(dest, blockoffs);
+ for (uint32_t bytes = 8; bytes < m_hunkbytes; bytes++)
+ dest[bytes] = dest[bytes - 8];
+ if (UNEXPECTED(!nocrc && (util::crc32_creator::simple(dest, m_hunkbytes) != blockcrc)))
+ return std::error_condition(error::DECOMPRESSION_ERROR);
+ return std::error_condition();
- case V34_MAP_ENTRY_TYPE_SELF_HUNK:
- return read_hunk(blockoffs, dest);
+ case V34_MAP_ENTRY_TYPE_SELF_HUNK:
+ return read_hunk(blockoffs, dest);
- case V34_MAP_ENTRY_TYPE_PARENT_HUNK:
- if (m_parent_missing)
- throw std::error_condition(error::REQUIRES_PARENT);
- return m_parent->read_hunk(blockoffs, dest);
+ case V34_MAP_ENTRY_TYPE_PARENT_HUNK:
+ if (UNEXPECTED(m_parent_missing))
+ return std::error_condition(error::REQUIRES_PARENT);
+ return m_parent->read_hunk(blockoffs, dest);
}
- break;
+ }
+ break;
- // v5 map entries
- case 5:
- rawmap = &m_rawmap[m_mapentrybytes * hunknum];
+ // v5 map entries
+ case 5:
+ {
+ uint8_t const *const rawmap = &m_rawmap[m_mapentrybytes * hunknum];
- // uncompressed case
if (!compressed())
{
- blockoffs = mulu_32x32(get_u32be(rawmap), m_hunkbytes);
+ // uncompressed case
+ uint64_t const blockoffs = mulu_32x32(get_u32be(rawmap), m_hunkbytes);
if (blockoffs != 0)
- file_read(blockoffs, dest, m_hunkbytes);
- else if (m_parent_missing)
- throw std::error_condition(error::REQUIRES_PARENT);
+ return file_read(blockoffs, dest, m_hunkbytes);
+ else if (UNEXPECTED(m_parent_missing))
+ return std::error_condition(error::REQUIRES_PARENT);
else if (m_parent)
- m_parent->read_hunk(hunknum, dest);
+ return m_parent->read_hunk(hunknum, dest);
else
memset(dest, 0, m_hunkbytes);
return std::error_condition();
}
-
- // compressed case
- blocklen = get_u24be(&rawmap[1]);
- blockoffs = get_u48be(&rawmap[4]);
- blockcrc = get_u16be(&rawmap[10]);
- switch (rawmap[0])
+ else
{
+ // compressed case
+ uint32_t const blocklen = get_u24be(&rawmap[1]);
+ uint64_t const blockoffs = get_u48be(&rawmap[4]);
+ util::crc16_t const blockcrc = get_u16be(&rawmap[10]);
+ switch (rawmap[0])
+ {
case COMPRESSION_TYPE_0:
case COMPRESSION_TYPE_1:
case COMPRESSION_TYPE_2:
case COMPRESSION_TYPE_3:
- file_read(blockoffs, &m_compressed[0], blocklen);
- m_decompressor[rawmap[0]]->decompress(&m_compressed[0], blocklen, dest, m_hunkbytes);
- if (!m_decompressor[rawmap[0]]->lossy() && dest != nullptr && util::crc16_creator::simple(dest, m_hunkbytes) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
- if (m_decompressor[rawmap[0]]->lossy() && util::crc16_creator::simple(&m_compressed[0], blocklen) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
- return std::error_condition();
+ {
+ std::error_condition err = file_read(blockoffs, &m_compressed[0], blocklen);
+ if (UNEXPECTED(err))
+ return err;
+ auto &decompressor = *m_decompressor[rawmap[0]];
+ decompressor.decompress(&m_compressed[0], blocklen, dest, m_hunkbytes);
+ util::crc16_t const calculated = !decompressor.lossy()
+ ? util::crc16_creator::simple(dest, m_hunkbytes)
+ : util::crc16_creator::simple(&m_compressed[0], blocklen);
+ if (UNEXPECTED(calculated != blockcrc))
+ return std::error_condition(error::DECOMPRESSION_ERROR);
+ return std::error_condition();
+ }
case COMPRESSION_NONE:
- file_read(blockoffs, dest, m_hunkbytes);
- if (util::crc16_creator::simple(dest, m_hunkbytes) != blockcrc)
- throw std::error_condition(error::DECOMPRESSION_ERROR);
- return std::error_condition();
+ {
+ std::error_condition err = file_read(blockoffs, dest, m_hunkbytes);
+ if (UNEXPECTED(err))
+ return err;
+ if (UNEXPECTED(util::crc16_creator::simple(dest, m_hunkbytes) != blockcrc))
+ return std::error_condition(error::DECOMPRESSION_ERROR);
+ return std::error_condition();
+ }
case COMPRESSION_SELF:
return read_hunk(blockoffs, dest);
case COMPRESSION_PARENT:
- if (m_parent_missing)
- throw std::error_condition(error::REQUIRES_PARENT);
- return m_parent->read_bytes(uint64_t(blockoffs) * uint64_t(m_parent->unit_bytes()), dest, m_hunkbytes);
+ if (UNEXPECTED(m_parent_missing))
+ return std::error_condition(error::REQUIRES_PARENT);
+ return m_parent->read_bytes(blockoffs * m_parent->unit_bytes(), dest, m_hunkbytes);
+ }
}
break;
+ }
}
- // if we get here, something was wrong
- throw std::error_condition(std::errc::io_error);
+ // if we get here, the map contained an unsupported block type
+ return std::error_condition(error::INVALID_DATA);
}
catch (std::error_condition const &err)
{
@@ -1042,68 +1182,73 @@ std::error_condition chd_file::read_hunk(uint32_t hunknum, void *buffer)
std::error_condition chd_file::write_hunk(uint32_t hunknum, const void *buffer)
{
- // wrap this for clean reporting
- try
- {
- // punt if no file
- if (!m_file)
- throw std::error_condition(error::NOT_OPEN);
+ // punt if no file
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
- // return an error if out of range
- if (hunknum >= m_hunkcount)
- throw std::error_condition(error::HUNK_OUT_OF_RANGE);
+ // return an error if out of range
+ if (UNEXPECTED(hunknum >= m_hunkcount))
+ return std::error_condition(error::HUNK_OUT_OF_RANGE);
- // if not writeable, fail
- if (!m_allow_writes)
- throw std::error_condition(error::FILE_NOT_WRITEABLE);
+ // if not writeable, fail
+ if (UNEXPECTED(!m_allow_writes))
+ return std::error_condition(error::FILE_NOT_WRITEABLE);
- // uncompressed writes only via this interface
- if (compressed())
- throw std::error_condition(error::FILE_NOT_WRITEABLE);
+ // uncompressed writes only via this interface
+ if (UNEXPECTED(compressed()))
+ return std::error_condition(error::FILE_NOT_WRITEABLE);
- // see if we have allocated the space on disk for this hunk
- uint8_t *rawmap = &m_rawmap[hunknum * 4];
- uint32_t rawentry = get_u32be(rawmap);
+ // see if we have allocated the space on disk for this hunk
+ uint8_t *const rawmap = &m_rawmap[hunknum * 4];
+ uint32_t rawentry = get_u32be(rawmap);
- // if not, allocate one now
- if (rawentry == 0)
+ // if not, allocate one now
+ if (rawentry == 0)
+ {
+ // first make sure we need to allocate it
+ bool all_zeros = true;
+ const auto *scan = reinterpret_cast<const uint32_t *>(buffer);
+ for (uint32_t index = 0; index < m_hunkbytes / 4; index++)
{
- // first make sure we need to allocate it
- bool all_zeros = true;
- const auto *scan = reinterpret_cast<const uint32_t *>(buffer);
- for (uint32_t index = 0; index < m_hunkbytes / 4; index++)
- if (scan[index] != 0)
- {
- all_zeros = false;
- break;
- }
+ if (scan[index] != 0)
+ {
+ all_zeros = false;
+ break;
+ }
+ }
- // if it's all zeros, do nothing more
- if (all_zeros)
- return std::error_condition();
+ // if it's all zeros, do nothing more
+ if (all_zeros)
+ return std::error_condition();
+ // wrap this for clean reporting
+ try
+ {
// append new data to the end of the file, aligning the first chunk
rawentry = file_append(buffer, m_hunkbytes, m_hunkbytes) / m_hunkbytes;
-
- // write the map entry back
- put_u32be(rawmap, rawentry);
- file_write(m_mapoffset + hunknum * 4, rawmap, 4);
-
- // update the cached hunk if we just wrote it
- if (hunknum == m_cachehunk && buffer != &m_cache[0])
- memcpy(&m_cache[0], buffer, m_hunkbytes);
}
- else
+ catch (std::error_condition const &err)
{
- // otherwise, just overwrite
- file_write(uint64_t(rawentry) * uint64_t(m_hunkbytes), buffer, m_hunkbytes);
+ // just return errors
+ return err;
}
+
+ // write the map entry back
+ put_u32be(rawmap, rawentry);
+ std::error_condition err = file_write(m_mapoffset + hunknum * 4, rawmap, 4);
+ if (UNEXPECTED(err))
+ return err;
+
+ // update the cached hunk if we just wrote it
+ if (hunknum == m_cachehunk && buffer != &m_cache[0])
+ memcpy(&m_cache[0], buffer, m_hunkbytes);
+
return std::error_condition();
}
- catch (std::error_condition const &err)
+ else
{
- // just return errors
- return err;
+ // otherwise, just overwrite
+ return file_write(uint64_t(rawentry) * uint64_t(m_hunkbytes), buffer, m_hunkbytes);
}
}
@@ -1163,36 +1308,36 @@ std::error_condition chd_file::write_units(uint64_t unitnum, const void *buffer,
std::error_condition chd_file::read_bytes(uint64_t offset, void *buffer, uint32_t bytes)
{
// iterate over hunks
- uint32_t first_hunk = offset / m_hunkbytes;
- uint32_t last_hunk = (offset + bytes - 1) / m_hunkbytes;
+ uint32_t const first_hunk = offset / m_hunkbytes;
+ uint32_t const last_hunk = (offset + bytes - 1) / m_hunkbytes;
auto *dest = reinterpret_cast<uint8_t *>(buffer);
for (uint32_t curhunk = first_hunk; curhunk <= last_hunk; curhunk++)
{
// determine start/end boundaries
- uint32_t startoffs = (curhunk == first_hunk) ? (offset % m_hunkbytes) : 0;
- uint32_t endoffs = (curhunk == last_hunk) ? ((offset + bytes - 1) % m_hunkbytes) : (m_hunkbytes - 1);
-
- // if it's a full block, just read directly from disk unless it's the cached hunk
- std::error_condition err;
- if (startoffs == 0 && endoffs == m_hunkbytes - 1 && curhunk != m_cachehunk)
- err = read_hunk(curhunk, dest);
+ uint32_t const startoffs = (curhunk == first_hunk) ? (offset % m_hunkbytes) : 0;
+ uint32_t const endoffs = (curhunk == last_hunk) ? ((offset + bytes - 1) % m_hunkbytes) : (m_hunkbytes - 1);
- // otherwise, read from the cache
+ if ((startoffs == 0) && (endoffs == m_hunkbytes - 1) && (curhunk != m_cachehunk))
+ {
+ // if it's a full block, just read directly from disk unless it's the cached hunk
+ std::error_condition err = read_hunk(curhunk, dest);
+ if (UNEXPECTED(err))
+ return err;
+ }
else
{
+ // otherwise, read from the cache
if (curhunk != m_cachehunk)
{
- err = read_hunk(curhunk, &m_cache[0]);
- if (err)
+ std::error_condition err = read_hunk(curhunk, &m_cache[0]);
+ if (UNEXPECTED(err))
return err;
m_cachehunk = curhunk;
}
memcpy(dest, &m_cache[startoffs], endoffs + 1 - startoffs);
}
- // handle errors and advance
- if (err)
- return err;
+ // advance
dest += endoffs + 1 - startoffs;
}
return std::error_condition();
@@ -1216,27 +1361,28 @@ std::error_condition chd_file::read_bytes(uint64_t offset, void *buffer, uint32_
std::error_condition chd_file::write_bytes(uint64_t offset, const void *buffer, uint32_t bytes)
{
// iterate over hunks
- uint32_t first_hunk = offset / m_hunkbytes;
- uint32_t last_hunk = (offset + bytes - 1) / m_hunkbytes;
- const auto *source = reinterpret_cast<const uint8_t *>(buffer);
+ uint32_t const first_hunk = offset / m_hunkbytes;
+ uint32_t const last_hunk = (offset + bytes - 1) / m_hunkbytes;
+ auto const *source = reinterpret_cast<uint8_t const *>(buffer);
for (uint32_t curhunk = first_hunk; curhunk <= last_hunk; curhunk++)
{
// determine start/end boundaries
- uint32_t startoffs = (curhunk == first_hunk) ? (offset % m_hunkbytes) : 0;
- uint32_t endoffs = (curhunk == last_hunk) ? ((offset + bytes - 1) % m_hunkbytes) : (m_hunkbytes - 1);
+ uint32_t const startoffs = (curhunk == first_hunk) ? (offset % m_hunkbytes) : 0;
+ uint32_t const endoffs = (curhunk == last_hunk) ? ((offset + bytes - 1) % m_hunkbytes) : (m_hunkbytes - 1);
- // if it's a full block, just write directly to disk unless it's the cached hunk
std::error_condition err;
- if (startoffs == 0 && endoffs == m_hunkbytes - 1 && curhunk != m_cachehunk)
+ if ((startoffs == 0) && (endoffs == m_hunkbytes - 1) && (curhunk != m_cachehunk))
+ {
+ // if it's a full block, just write directly to disk unless it's the cached hunk
err = write_hunk(curhunk, source);
-
- // otherwise, write from the cache
+ }
else
{
+ // otherwise, write from the cache
if (curhunk != m_cachehunk)
{
err = read_hunk(curhunk, &m_cache[0]);
- if (err)
+ if (UNEXPECTED(err))
return err;
m_cachehunk = curhunk;
}
@@ -1245,7 +1391,7 @@ std::error_condition chd_file::write_bytes(uint64_t offset, const void *buffer,
}
// handle errors and advance
- if (err)
+ if (UNEXPECTED(err))
return err;
source += endoffs + 1 - startoffs;
}
@@ -1266,29 +1412,20 @@ std::error_condition chd_file::write_bytes(uint64_t offset, const void *buffer,
* @param searchindex The searchindex.
* @param [in,out] output The output.
*
- * @return The metadata.
+ * @return An error condition.
*/
std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_t searchindex, std::string &output)
{
- // wrap this for clean reporting
- try
- {
- // if we didn't find it, just return
- metadata_entry metaentry;
- if (!metadata_find(searchtag, searchindex, metaentry))
- return std::error_condition(error::METADATA_NOT_FOUND);
-
- // read the metadata
- output.assign(metaentry.length, '\0');
- file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
- return std::error_condition();
- }
- catch (std::error_condition const &err)
- {
- // just return errors
+ // if we didn't find it, just return
+ metadata_entry metaentry;
+ if (std::error_condition err = metadata_find(searchtag, searchindex, metaentry))
return err;
- }
+
+ // read the metadata
+ try { output.assign(metaentry.length, '\0'); }
+ catch (std::bad_alloc const &) { return std::errc::not_enough_memory; }
+ return file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
}
/**
@@ -1303,29 +1440,20 @@ std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_
* @param searchindex The searchindex.
* @param [in,out] output The output.
*
- * @return The metadata.
+ * @return An error condition.
*/
std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_t searchindex, std::vector<uint8_t> &output)
{
- // wrap this for clean reporting
- try
- {
- // if we didn't find it, just return
- metadata_entry metaentry;
- if (!metadata_find(searchtag, searchindex, metaentry))
- throw std::error_condition(error::METADATA_NOT_FOUND);
-
- // read the metadata
- output.resize(metaentry.length);
- file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
- return std::error_condition();
- }
- catch (std::error_condition const &err)
- {
- // just return errors
+ // if we didn't find it, just return
+ metadata_entry metaentry;
+ if (std::error_condition err = metadata_find(searchtag, searchindex, metaentry))
return err;
- }
+
+ // read the metadata
+ try { output.resize(metaentry.length); }
+ catch (std::bad_alloc const &) { return std::errc::not_enough_memory; }
+ return file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
}
/**
@@ -1342,29 +1470,19 @@ std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_
* @param outputlen The outputlen.
* @param [in,out] resultlen The resultlen.
*
- * @return The metadata.
+ * @return An error condition.
*/
std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_t searchindex, void *output, uint32_t outputlen, uint32_t &resultlen)
{
- // wrap this for clean reporting
- try
- {
- // if we didn't find it, just return
- metadata_entry metaentry;
- if (!metadata_find(searchtag, searchindex, metaentry))
- throw std::error_condition(error::METADATA_NOT_FOUND);
-
- // read the metadata
- resultlen = metaentry.length;
- file_read(metaentry.offset + METADATA_HEADER_SIZE, output, std::min(outputlen, resultlen));
- return std::error_condition();
- }
- catch (std::error_condition const &err)
- {
- // just return errors
+ // if we didn't find it, just return
+ metadata_entry metaentry;
+ if (std::error_condition err = metadata_find(searchtag, searchindex, metaentry))
return err;
- }
+
+ // read the metadata
+ resultlen = metaentry.length;
+ return file_read(metaentry.offset + METADATA_HEADER_SIZE, output, std::min(outputlen, resultlen));
}
/**
@@ -1381,31 +1499,28 @@ std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_
* @param [in,out] resulttag The resulttag.
* @param [in,out] resultflags The resultflags.
*
- * @return The metadata.
+ * @return An error condition.
*/
std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_t searchindex, std::vector<uint8_t> &output, chd_metadata_tag &resulttag, uint8_t &resultflags)
{
- // wrap this for clean reporting
- try
- {
- // if we didn't find it, just return
- metadata_entry metaentry;
- if (!metadata_find(searchtag, searchindex, metaentry))
- throw std::error_condition(error::METADATA_NOT_FOUND);
-
- // read the metadata
- output.resize(metaentry.length);
- file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
- resulttag = metaentry.metatag;
- resultflags = metaentry.flags;
- return std::error_condition();
- }
- catch (std::error_condition const &err)
- {
- // just return errors
+ std::error_condition err;
+
+ // if we didn't find it, just return
+ metadata_entry metaentry;
+ err = metadata_find(searchtag, searchindex, metaentry);
+ if (err)
return err;
- }
+
+ // read the metadata
+ try { output.resize(metaentry.length); }
+ catch (std::bad_alloc const &) { return std::errc::not_enough_memory; }
+ err = file_read(metaentry.offset + METADATA_HEADER_SIZE, &output[0], metaentry.length);
+ if (UNEXPECTED(err))
+ return err;
+ resulttag = metaentry.metatag;
+ resultflags = metaentry.flags;
+ return std::error_condition();
}
/**
@@ -1426,40 +1541,52 @@ std::error_condition chd_file::read_metadata(chd_metadata_tag searchtag, uint32_
std::error_condition chd_file::write_metadata(chd_metadata_tag metatag, uint32_t metaindex, const void *inputbuf, uint32_t inputlen, uint8_t flags)
{
- // wrap this for clean reporting
- try
+ // must write at least 1 byte and no more than 16MB
+ if (UNEXPECTED((inputlen < 1) || (inputlen >= 16 * 1024 * 1024)))
+ return std::error_condition(std::errc::invalid_argument);
+
+ // find the entry if it already exists
+ metadata_entry metaentry;
+ bool finished = false;
+ std::error_condition err = metadata_find(metatag, metaindex, metaentry);
+ if (!err)
{
- // must write at least 1 byte and no more than 16MB
- if (inputlen < 1 || inputlen >= 16 * 1024 * 1024)
- return std::error_condition(std::errc::invalid_argument);
-
- // find the entry if it already exists
- metadata_entry metaentry;
- bool finished = false;
- if (metadata_find(metatag, metaindex, metaentry))
+ if (inputlen <= metaentry.length)
{
// if the new data fits over the old data, just overwrite
- if (inputlen <= metaentry.length)
- {
- file_write(metaentry.offset + METADATA_HEADER_SIZE, inputbuf, inputlen);
-
- // if the lengths don't match, we need to update the length in our header
- if (inputlen != metaentry.length)
- {
- uint8_t length[3];
- put_u24be(length, inputlen);
- file_write(metaentry.offset + 5, length, sizeof(length));
- }
+ err = file_write(metaentry.offset + METADATA_HEADER_SIZE, inputbuf, inputlen);
+ if (UNEXPECTED(err))
+ return err;
- // indicate we did everything
- finished = true;
+ // if the lengths don't match, we need to update the length in our header
+ if (inputlen != metaentry.length)
+ {
+ uint8_t length[3];
+ put_u24be(length, inputlen);
+ err = file_write(metaentry.offset + 5, length, sizeof(length));
+ if (UNEXPECTED(err))
+ return err;
}
+ // indicate we did everything
+ finished = true;
+ }
+ else
+ {
// if it doesn't fit, unlink the current entry
- else
- metadata_set_previous_next(metaentry.prev, metaentry.next);
+ err = metadata_set_previous_next(metaentry.prev, metaentry.next);
+ if (UNEXPECTED(err))
+ return err;
}
+ }
+ else if (UNEXPECTED(err != error::METADATA_NOT_FOUND))
+ {
+ return err;
+ }
+ // wrap this for clean reporting
+ try
+ {
// if not yet done, create a new entry and append
if (!finished)
{
@@ -1475,7 +1602,9 @@ std::error_condition chd_file::write_metadata(chd_metadata_tag metatag, uint32_t
file_append(inputbuf, inputlen);
// set the previous entry to point to us
- metadata_set_previous_next(metaentry.prev, offset);
+ err = metadata_set_previous_next(metaentry.prev, offset);
+ if (UNEXPECTED(err))
+ return err;
}
// update the hash
@@ -1487,6 +1616,10 @@ std::error_condition chd_file::write_metadata(chd_metadata_tag metatag, uint32_t
// return any errors
return err;
}
+ catch (std::bad_alloc const &)
+ {
+ return std::errc::not_enough_memory;
+ }
}
/**
@@ -1507,23 +1640,13 @@ std::error_condition chd_file::write_metadata(chd_metadata_tag metatag, uint32_t
std::error_condition chd_file::delete_metadata(chd_metadata_tag metatag, uint32_t metaindex)
{
- // wrap this for clean reporting
- try
- {
- // find the entry
- metadata_entry metaentry;
- if (!metadata_find(metatag, metaindex, metaentry))
- throw std::error_condition(error::METADATA_NOT_FOUND);
-
- // point the previous to the next, unlinking us
- metadata_set_previous_next(metaentry.prev, metaentry.next);
- return std::error_condition();
- }
- catch (std::error_condition const &err)
- {
- // return any errors
+ // find the entry
+ metadata_entry metaentry;
+ if (std::error_condition err = metadata_find(metatag, metaindex, metaentry))
return err;
- }
+
+ // point the previous to the next, unlinking us
+ return metadata_set_previous_next(metaentry.prev, metaentry.next);
}
/**
@@ -1542,34 +1665,32 @@ std::error_condition chd_file::delete_metadata(chd_metadata_tag metatag, uint32_
std::error_condition chd_file::clone_all_metadata(chd_file &source)
{
- // wrap this for clean reporting
- try
+ // iterate over metadata entries in the source
+ std::vector<uint8_t> filedata;
+ metadata_entry metaentry;
+ metaentry.metatag = 0;
+ metaentry.length = 0;
+ metaentry.next = 0;
+ metaentry.flags = 0;
+ std::error_condition err;
+ for (err = source.metadata_find(CHDMETATAG_WILDCARD, 0, metaentry); !err; err = source.metadata_find(CHDMETATAG_WILDCARD, 0, metaentry, true))
{
- // iterate over metadata entries in the source
- std::vector<uint8_t> filedata;
- metadata_entry metaentry;
- metaentry.metatag = 0;
- metaentry.length = 0;
- metaentry.next = 0;
- metaentry.flags = 0;
- for (bool has_data = source.metadata_find(CHDMETATAG_WILDCARD, 0, metaentry); has_data; has_data = source.metadata_find(CHDMETATAG_WILDCARD, 0, metaentry, true))
- {
- // read the metadata item
- filedata.resize(metaentry.length);
- source.file_read(metaentry.offset + METADATA_HEADER_SIZE, &filedata[0], metaentry.length);
-
- // write it to the destination
- std::error_condition err = write_metadata(metaentry.metatag, (uint32_t)-1, &filedata[0], metaentry.length, metaentry.flags);
- if (err)
- throw err;
- }
- return std::error_condition();
+ // read the metadata item
+ try { filedata.resize(metaentry.length); }
+ catch (std::bad_alloc const &) { return std::errc::not_enough_memory; }
+ err = source.file_read(metaentry.offset + METADATA_HEADER_SIZE, &filedata[0], metaentry.length);
+ if (UNEXPECTED(err))
+ return err;
+
+ // write it to the destination
+ err = write_metadata(metaentry.metatag, (uint32_t)-1, &filedata[0], metaentry.length, metaentry.flags);
+ if (UNEXPECTED(err))
+ return err;
}
- catch (std::error_condition const &err)
- {
- // return any errors
+ if (err == error::METADATA_NOT_FOUND)
+ return std::error_condition();
+ else
return err;
- }
}
/**
@@ -1595,15 +1716,18 @@ util::sha1_t chd_file::compute_overall_sha1(util::sha1_t rawsha1)
std::vector<uint8_t> filedata;
std::vector<metadata_hash> hasharray;
metadata_entry metaentry;
- for (bool has_data = metadata_find(CHDMETATAG_WILDCARD, 0, metaentry); has_data; has_data = metadata_find(CHDMETATAG_WILDCARD, 0, metaentry, true))
+ std::error_condition err;
+ for (err = metadata_find(CHDMETATAG_WILDCARD, 0, metaentry); !err; err = metadata_find(CHDMETATAG_WILDCARD, 0, metaentry, true))
{
// if not checksumming, continue
- if ((metaentry.flags & CHD_MDFLAGS_CHECKSUM) == 0)
+ if (!(metaentry.flags & CHD_MDFLAGS_CHECKSUM))
continue;
// allocate memory and read the data
filedata.resize(metaentry.length);
- file_read(metaentry.offset + METADATA_HEADER_SIZE, &filedata[0], metaentry.length);
+ err = file_read(metaentry.offset + METADATA_HEADER_SIZE, &filedata[0], metaentry.length);
+ if (UNEXPECTED(err))
+ throw err;
// create an entry for this metadata and add it
metadata_hash hashentry;
@@ -1611,6 +1735,8 @@ util::sha1_t chd_file::compute_overall_sha1(util::sha1_t rawsha1)
hashentry.sha1 = util::sha1_creator::simple(&filedata[0], metaentry.length);
hasharray.push_back(hashentry);
}
+ if (err != error::METADATA_NOT_FOUND)
+ throw err;
// sort the array
if (!hasharray.empty())
@@ -1772,7 +1898,7 @@ uint32_t chd_file::guess_unitbytes()
void chd_file::parse_v3_header(uint8_t *rawheader, util::sha1_t &parentsha1)
{
// verify header length
- if (get_u32be(&rawheader[8]) != V3_HEADER_SIZE)
+ if (UNEXPECTED(get_u32be(&rawheader[8]) != V3_HEADER_SIZE))
throw std::error_condition(error::INVALID_FILE);
// extract core info
@@ -1835,7 +1961,7 @@ void chd_file::parse_v3_header(uint8_t *rawheader, util::sha1_t &parentsha1)
void chd_file::parse_v4_header(uint8_t *rawheader, util::sha1_t &parentsha1)
{
// verify header length
- if (get_u32be(&rawheader[8]) != V4_HEADER_SIZE)
+ if (UNEXPECTED(get_u32be(&rawheader[8]) != V4_HEADER_SIZE))
throw std::error_condition(error::INVALID_FILE);
// extract core info
@@ -1895,7 +2021,7 @@ void chd_file::parse_v4_header(uint8_t *rawheader, util::sha1_t &parentsha1)
void chd_file::parse_v5_header(uint8_t *rawheader, util::sha1_t &parentsha1)
{
// verify header length
- if (get_u32be(&rawheader[8]) != V5_HEADER_SIZE)
+ if (UNEXPECTED(get_u32be(&rawheader[8]) != V5_HEADER_SIZE))
throw std::error_condition(error::INVALID_FILE);
// extract core info
@@ -1969,9 +2095,9 @@ std::error_condition chd_file::compress_v5_map()
{
uint8_t curcomp = m_rawmap[hunknum * 12 + 0];
- // promote self block references to more compact forms
if (curcomp == COMPRESSION_SELF)
{
+ // promote self block references to more compact forms
uint32_t refhunk = get_u48be(&m_rawmap[hunknum * 12 + 4]);
if (refhunk == last_self)
curcomp = COMPRESSION_SELF_0;
@@ -1981,10 +2107,9 @@ std::error_condition chd_file::compress_v5_map()
max_self = std::max(max_self, refhunk);
last_self = refhunk;
}
-
- // promote parent block references to more compact forms
else if (curcomp == COMPRESSION_PARENT)
{
+ // promote parent block references to more compact forms
uint32_t refunit = get_u48be(&m_rawmap[hunknum * 12 + 4]);
if (refunit == mulu_32x32(hunknum, m_hunkbytes) / m_unitbytes)
curcomp = COMPRESSION_PARENT_SELF;
@@ -2032,26 +2157,37 @@ std::error_condition chd_file::compress_v5_map()
}
}
- // compute a tree and export it to the buffer
- std::vector<uint8_t> compressed(m_hunkcount * 6);
+ // determine the number of bits we need to hold the a length and a hunk index
+ const uint8_t lengthbits = bits_for_value(max_complen);
+ const uint8_t selfbits = bits_for_value(max_self);
+ const uint8_t parentbits = bits_for_value(max_parent);
+
+ // determine the needed size of the output buffer
+ // 16 bytes is required for the header
+ // max len per entry given to huffman encoder at instantiation is 8 bits
+ // this corresponds to worst-case max 12 bits per entry when RLE encoded.
+ // max additional bits per entry after RLE encoded tree is
+ // for COMPRESSION_TYPE_0-3: lengthbits+16
+ // for COMPRESSION_NONE: 16
+ // for COMPRESSION_SELF: selfbits
+ // for COMPRESSION_PARENT: parentbits
+ // the overall size is clamped later with bitbuf.flush()
+ int nbits_needed = (8*16) + (12 + std::max<int>({lengthbits+16, selfbits, parentbits}))*m_hunkcount;
+ std::vector<uint8_t> compressed(nbits_needed / 8 + 1);
bitstream_out bitbuf(&compressed[16], compressed.size() - 16);
+
+ // compute a tree and export it to the buffer
huffman_error err = encoder.compute_tree_from_histo();
- if (err != HUFFERR_NONE)
+ if (UNEXPECTED(err != HUFFERR_NONE))
throw std::error_condition(error::COMPRESSION_ERROR);
err = encoder.export_tree_rle(bitbuf);
- if (err != HUFFERR_NONE)
+ if (UNEXPECTED(err != HUFFERR_NONE))
throw std::error_condition(error::COMPRESSION_ERROR);
// encode the data
for (uint8_t *src = &compression_rle[0]; src < dest; src++)
encoder.encode_one(bitbuf, *src);
- // determine the number of bits we need to hold the a length
- // and a hunk index
- uint8_t lengthbits = bits_for_value(max_complen);
- uint8_t selfbits = bits_for_value(max_self);
- uint8_t parentbits = bits_for_value(max_parent);
-
// for each compression type, output the relevant data
lastcomp = 0;
count = 0;
@@ -2134,13 +2270,16 @@ std::error_condition chd_file::compress_v5_map()
// then write the map offset
uint8_t rawbuf[sizeof(uint64_t)];
put_u64be(rawbuf, m_mapoffset);
- file_write(m_mapoffset_offset, rawbuf, sizeof(rawbuf));
- return std::error_condition();
+ return file_write(m_mapoffset_offset, rawbuf, sizeof(rawbuf));
}
catch (std::error_condition const &err)
{
return err;
}
+ catch (std::bad_alloc const &)
+ {
+ return std::errc::not_enough_memory;
+ }
}
/**
@@ -2165,7 +2304,10 @@ void chd_file::decompress_v5_map()
// read the reader
uint8_t rawbuf[16];
- file_read(m_mapoffset, rawbuf, sizeof(rawbuf));
+ std::error_condition ioerr;
+ ioerr = file_read(m_mapoffset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(ioerr))
+ throw ioerr;
uint32_t const mapbytes = get_u32be(&rawbuf[0]);
uint64_t const firstoffs = get_u48be(&rawbuf[4]);
util::crc16_t const mapcrc = get_u16be(&rawbuf[10]);
@@ -2175,13 +2317,15 @@ void chd_file::decompress_v5_map()
// now read the map
std::vector<uint8_t> compressed(mapbytes);
- file_read(m_mapoffset + 16, &compressed[0], mapbytes);
+ ioerr = file_read(m_mapoffset + 16, &compressed[0], mapbytes);
+ if (UNEXPECTED(ioerr))
+ throw ioerr;
bitstream_in bitbuf(&compressed[0], compressed.size());
// first decode the compression types
huffman_decoder<16, 8> decoder;
- huffman_error err = decoder.import_tree_rle(bitbuf);
- if (err != HUFFERR_NONE)
+ huffman_error const huferr = decoder.import_tree_rle(bitbuf);
+ if (UNEXPECTED(huferr != HUFFERR_NONE))
throw std::error_condition(error::DECOMPRESSION_ERROR);
uint8_t lastcomp = 0;
int repcount = 0;
@@ -2265,7 +2409,7 @@ void chd_file::decompress_v5_map()
}
// verify the final CRC
- if (util::crc16_creator::simple(&m_rawmap[0], m_hunkcount * 12) != mapcrc)
+ if (UNEXPECTED(util::crc16_creator::simple(&m_rawmap[0], m_hunkcount * 12) != mapcrc))
throw std::error_condition(error::DECOMPRESSION_ERROR);
}
@@ -2295,13 +2439,13 @@ std::error_condition chd_file::create_common()
m_metaoffset = 0;
// if we have a parent, it must be V3 or later
- if (m_parent && m_parent->version() < 3)
+ if (UNEXPECTED(m_parent && m_parent->version() < 3))
throw std::error_condition(error::UNSUPPORTED_VERSION);
// must be an even number of units per hunk
- if (m_hunkbytes % m_unitbytes != 0)
+ if (UNEXPECTED(m_hunkbytes % m_unitbytes != 0))
throw std::error_condition(std::errc::invalid_argument);
- if (m_parent && m_unitbytes != m_parent->unit_bytes())
+ if (UNEXPECTED(m_parent && m_unitbytes != m_parent->unit_bytes()))
throw std::error_condition(std::errc::invalid_argument);
// verify the compression types
@@ -2336,7 +2480,9 @@ std::error_condition chd_file::create_common()
be_write_sha1(&rawheader[104], m_parent ? m_parent->sha1() : util::sha1_t::null);
// write the resulting header
- file_write(0, rawheader, sizeof(rawheader));
+ std::error_condition err = file_write(0, rawheader, sizeof(rawheader));
+ if (UNEXPECTED(err))
+ throw err;
// parse it back out to set up fields appropriately
util::sha1_t parentsha1;
@@ -2354,8 +2500,10 @@ std::error_condition chd_file::create_common()
uint64_t offset = m_mapoffset;
while (mapsize != 0)
{
- uint32_t bytes_to_write = (std::min<size_t>)(mapsize, sizeof(buffer));
- file_write(offset, buffer, bytes_to_write);
+ uint32_t const bytes_to_write = std::min<size_t>(mapsize, sizeof(buffer));
+ err = file_write(offset, buffer, bytes_to_write);
+ if (UNEXPECTED(err))
+ throw err;
offset += bytes_to_write;
mapsize -= bytes_to_write;
}
@@ -2411,10 +2559,12 @@ std::error_condition chd_file::open_common(bool writeable, const open_parent_fun
// read the raw header
uint8_t rawheader[MAX_HEADER_SIZE];
- file_read(0, rawheader, sizeof(rawheader));
+ std::error_condition err = file_read(0, rawheader, sizeof(rawheader));
+ if (UNEXPECTED(err))
+ throw err;
// verify the signature
- if (memcmp(rawheader, "MComprHD", 8) != 0)
+ if (UNEXPECTED(memcmp(rawheader, "MComprHD", 8) != 0))
throw std::error_condition(error::INVALID_FILE);
m_version = get_u32be(&rawheader[12]);
@@ -2433,7 +2583,7 @@ std::error_condition chd_file::open_common(bool writeable, const open_parent_fun
if (m_version < HEADER_VERSION)
m_allow_writes = false;
- if (writeable && !m_allow_writes)
+ if (UNEXPECTED(writeable && !m_allow_writes))
throw std::error_condition(error::FILE_NOT_WRITEABLE);
// make sure we have a parent if we need one (and don't if we don't)
@@ -2447,7 +2597,7 @@ std::error_condition chd_file::open_common(bool writeable, const open_parent_fun
else if (m_parent->sha1() != parentsha1)
throw std::error_condition(error::INVALID_PARENT);
}
- else if (m_parent)
+ else if (UNEXPECTED(m_parent))
{
throw std::error_condition(std::errc::invalid_argument);
}
@@ -2481,16 +2631,22 @@ void chd_file::create_open_common()
for (int decompnum = 0; decompnum < std::size(m_compression); decompnum++)
{
m_decompressor[decompnum] = chd_codec_list::new_decompressor(m_compression[decompnum], *this);
- if (m_decompressor[decompnum] == nullptr && m_compression[decompnum] != 0)
+ if (UNEXPECTED(!m_decompressor[decompnum] && (m_compression[decompnum] != 0)))
throw std::error_condition(error::UNKNOWN_COMPRESSION);
}
// read the map; v5+ compressed drives need to read and decompress their map
m_rawmap.resize(m_hunkcount * m_mapentrybytes);
if (m_version >= 5 && compressed())
+ {
decompress_v5_map();
+ }
else
- file_read(m_mapoffset, &m_rawmap[0], m_rawmap.size());
+ {
+ std::error_condition err = file_read(m_mapoffset, &m_rawmap[0], m_rawmap.size());
+ if (UNEXPECTED(err))
+ throw err;
+ }
// allocate the temporary compressed buffer and a buffer for caching
m_compressed.resize(m_hunkbytes);
@@ -2505,44 +2661,37 @@ void chd_file::create_open_common()
* for appending to a compressed CHD
* -------------------------------------------------.
*
- * @exception CHDERR_NOT_OPEN Thrown when a chderr not open error condition occurs.
- * @exception CHDERR_HUNK_OUT_OF_RANGE Thrown when a chderr hunk out of range error
- * condition occurs.
- * @exception CHDERR_FILE_NOT_WRITEABLE Thrown when a chderr file not writeable error
- * condition occurs.
- * @exception CHDERR_COMPRESSION_ERROR Thrown when a chderr compression error error
- * condition occurs.
- *
* @param hunknum The hunknum.
*/
-void chd_file::verify_proper_compression_append(uint32_t hunknum)
+std::error_condition chd_file::verify_proper_compression_append(uint32_t hunknum) const noexcept
{
// punt if no file
- if (!m_file)
- throw std::error_condition(error::NOT_OPEN);
+ if (UNEXPECTED(!m_file))
+ return std::error_condition(error::NOT_OPEN);
// return an error if out of range
- if (hunknum >= m_hunkcount)
- throw std::error_condition(error::HUNK_OUT_OF_RANGE);
+ if (UNEXPECTED(hunknum >= m_hunkcount))
+ return std::error_condition(error::HUNK_OUT_OF_RANGE);
// if not writeable, fail
- if (!m_allow_writes)
- throw std::error_condition(error::FILE_NOT_WRITEABLE);
+ if (UNEXPECTED(!m_allow_writes))
+ return std::error_condition(error::FILE_NOT_WRITEABLE);
// compressed writes only via this interface
- if (!compressed())
- throw std::error_condition(error::FILE_NOT_WRITEABLE);
+ if (UNEXPECTED(!compressed()))
+ return std::error_condition(error::FILE_NOT_WRITEABLE);
// only permitted to write new blocks
- uint8_t *rawmap = &m_rawmap[hunknum * 12];
- if (rawmap[0] != 0xff)
- throw std::error_condition(error::COMPRESSION_ERROR);
+ uint8_t const *const rawmap = &m_rawmap[hunknum * 12];
+ if (UNEXPECTED(rawmap[0] != 0xff))
+ return std::error_condition(error::COMPRESSION_ERROR);
+
+ // if this isn't the first block, only permitted to write immediately after the previous one
+ if (UNEXPECTED((hunknum != 0) && (rawmap[-12] == 0xff)))
+ return std::error_condition(error::COMPRESSION_ERROR);
- // if this isn't the first block, only permitted to write immediately
- // after the previous one
- if (hunknum != 0 && rawmap[-12] == 0xff)
- throw std::error_condition(error::COMPRESSION_ERROR);
+ return std::error_condition();
}
/**
@@ -2563,7 +2712,9 @@ void chd_file::verify_proper_compression_append(uint32_t hunknum)
void chd_file::hunk_write_compressed(uint32_t hunknum, int8_t compression, const uint8_t *compressed, uint32_t complength, util::crc16_t crc16)
{
// verify that we are appending properly to a compressed file
- verify_proper_compression_append(hunknum);
+ std::error_condition err = verify_proper_compression_append(hunknum);
+ if (UNEXPECTED(err))
+ throw err;
// write the final result
uint64_t offset = file_append(compressed, complength);
@@ -2593,11 +2744,13 @@ void chd_file::hunk_write_compressed(uint32_t hunknum, int8_t compression, const
void chd_file::hunk_copy_from_self(uint32_t hunknum, uint32_t otherhunk)
{
// verify that we are appending properly to a compressed file
- verify_proper_compression_append(hunknum);
+ std::error_condition err = verify_proper_compression_append(hunknum);
+ if (UNEXPECTED(err))
+ throw err;
// only permitted to reference prior hunks
- if (otherhunk >= hunknum)
- throw std::error_condition(std::errc::invalid_argument);
+ if (UNEXPECTED(otherhunk >= hunknum))
+ throw std::error_condition(error::HUNK_OUT_OF_RANGE);
// update the map entry
uint8_t *rawmap = &m_rawmap[hunknum * 12];
@@ -2621,10 +2774,12 @@ void chd_file::hunk_copy_from_self(uint32_t hunknum, uint32_t otherhunk)
void chd_file::hunk_copy_from_parent(uint32_t hunknum, uint64_t parentunit)
{
// verify that we are appending properly to a compressed file
- verify_proper_compression_append(hunknum);
+ std::error_condition err = verify_proper_compression_append(hunknum);
+ if (UNEXPECTED(err))
+ throw err;
// update the map entry
- uint8_t *rawmap = &m_rawmap[hunknum * 12];
+ uint8_t *const rawmap = &m_rawmap[hunknum * 12];
rawmap[0] = COMPRESSION_PARENT;
put_u24be(&rawmap[1], 0);
put_u48be(&rawmap[4], parentunit);
@@ -2632,7 +2787,7 @@ void chd_file::hunk_copy_from_parent(uint32_t hunknum, uint64_t parentunit)
}
/**
- * @fn bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metadata_entry &metaentry, bool resume)
+ * @fn std::error_condition chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metadata_entry &metaentry, bool resume)
*
* @brief -------------------------------------------------
* metadata_find - find a metadata entry
@@ -2643,10 +2798,10 @@ void chd_file::hunk_copy_from_parent(uint32_t hunknum, uint64_t parentunit)
* @param [in,out] metaentry The metaentry.
* @param resume true to resume.
*
- * @return true if it succeeds, false if it fails.
+ * @return A std::error_condition (error::METADATA_NOT_FOUND if the search fails).
*/
-bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metadata_entry &metaentry, bool resume) const
+std::error_condition chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metadata_entry &metaentry, bool resume) const noexcept
{
// start at the beginning unless we're resuming a previous search
if (!resume)
@@ -2665,7 +2820,9 @@ bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metada
{
// read the raw header
uint8_t raw_meta_header[METADATA_HEADER_SIZE];
- file_read(metaentry.offset, raw_meta_header, sizeof(raw_meta_header));
+ std::error_condition err = file_read(metaentry.offset, raw_meta_header, sizeof(raw_meta_header));
+ if (UNEXPECTED(err))
+ return err;
// extract the data
metaentry.metatag = get_u32be(&raw_meta_header[0]);
@@ -2676,7 +2833,7 @@ bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metada
// if we got a match, proceed
if (metatag == CHDMETATAG_WILDCARD || metaentry.metatag == metatag)
if (metaindex-- == 0)
- return true;
+ return std::error_condition();
// no match, fetch the next link
metaentry.prev = metaentry.offset;
@@ -2684,7 +2841,7 @@ bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metada
}
// if we get here, we didn't find it
- return false;
+ return error::METADATA_NOT_FOUND;
}
/**
@@ -2698,27 +2855,28 @@ bool chd_file::metadata_find(chd_metadata_tag metatag, int32_t metaindex, metada
* @param nextoffset The nextoffset.
*/
-void chd_file::metadata_set_previous_next(uint64_t prevoffset, uint64_t nextoffset)
+std::error_condition chd_file::metadata_set_previous_next(uint64_t prevoffset, uint64_t nextoffset) noexcept
{
uint64_t offset = 0;
- // if we were the first entry, make the next entry the first
if (prevoffset == 0)
{
+ // if we were the first entry, make the next entry the first
offset = m_metaoffset_offset;
m_metaoffset = nextoffset;
}
-
- // otherwise, update the link in the previous header
else
+ {
+ // otherwise, update the link in the previous header
offset = prevoffset + 8;
+ }
// create a big-endian version
uint8_t rawbuf[sizeof(uint64_t)];
put_u64be(rawbuf, nextoffset);
// write to the header and update our local copy
- file_write(offset, rawbuf, sizeof(rawbuf));
+ return file_write(offset, rawbuf, sizeof(rawbuf));
}
/**
@@ -2732,7 +2890,7 @@ void chd_file::metadata_set_previous_next(uint64_t prevoffset, uint64_t nextoffs
void chd_file::metadata_update_hash()
{
// only works for V4 and above, and only for compressed CHDs
- if (m_version < 4 || !compressed())
+ if ((m_version < 4) || !compressed())
return;
// compute the new overall hash
@@ -2743,7 +2901,9 @@ void chd_file::metadata_update_hash()
be_write_sha1(&rawbuf[0], fullsha1);
// write to the header
- file_write(m_sha1_offset, rawbuf, sizeof(rawbuf));
+ std::error_condition err = file_write(m_sha1_offset, rawbuf, sizeof(rawbuf));
+ if (UNEXPECTED(err))
+ throw err;
}
/**
@@ -2778,19 +2938,18 @@ int CLIB_DECL chd_file::metadata_hash_compare(const void *elem1, const void *ele
* -------------------------------------------------.
*/
-chd_file_compressor::chd_file_compressor()
- : m_walking_parent(false),
- m_total_in(0),
- m_total_out(0),
- m_read_queue(nullptr),
- m_read_queue_offset(0),
- m_read_done_offset(0),
- m_read_error(false),
- m_work_queue(nullptr),
- m_write_hunk(0)
+chd_file_compressor::chd_file_compressor() :
+ m_walking_parent(false),
+ m_total_in(0),
+ m_total_out(0),
+ m_read_queue(nullptr),
+ m_read_queue_offset(0),
+ m_read_done_offset(0),
+ m_work_queue(nullptr),
+ m_write_hunk(0)
{
// zap arrays
- memset(m_codecs, 0, sizeof(m_codecs));
+ std::fill(std::begin(m_codecs), std::end(m_codecs), nullptr);
// allocate work queues
m_read_queue = osd_work_queue_alloc(WORK_QUEUE_FLAG_IO);
@@ -2839,7 +2998,7 @@ void chd_file_compressor::compress_begin()
// reset read state
m_read_queue_offset = 0;
m_read_done_offset = 0;
- m_read_error = false;
+ m_read_error.clear();
// reset work item state
m_work_buffer.resize(hunk_bytes() * (WORK_BUFFER_HUNKS + 1));
@@ -2880,20 +3039,19 @@ void chd_file_compressor::compress_begin()
std::error_condition chd_file_compressor::compress_continue(double &progress, double &ratio)
{
- // if we got an error, return an error
- if (m_read_error)
- return std::errc::io_error;
+ // if we got an error, return the error
+ if (UNEXPECTED(m_read_error))
+ return m_read_error;
// if done reading, queue some more
while (m_read_queue_offset < m_logicalbytes && osd_work_queue_items(m_read_queue) < 2)
{
// see if we have enough free work items to read the next half of a buffer
- uint32_t startitem = m_read_queue_offset / hunk_bytes();
- uint32_t enditem = startitem + WORK_BUFFER_HUNKS / 2;
- uint32_t curitem;
- for (curitem = startitem; curitem < enditem; curitem++)
- if (m_work_item[curitem % WORK_BUFFER_HUNKS].m_status != WS_READY)
- break;
+ uint32_t const startitem = m_read_queue_offset / hunk_bytes();
+ uint32_t const enditem = startitem + WORK_BUFFER_HUNKS / 2;
+ uint32_t curitem = startitem;
+ while ((curitem < enditem) && (m_work_item[curitem % WORK_BUFFER_HUNKS].m_status == WS_READY))
+ ++curitem;
// if it's not all clear, defer
if (curitem != enditem)
@@ -2917,64 +3075,60 @@ std::error_condition chd_file_compressor::compress_continue(double &progress, do
work_item &item = m_work_item[m_write_hunk % WORK_BUFFER_HUNKS];
// free any OSD work item
- if (item.m_osd != nullptr)
+ if (item.m_osd)
+ {
osd_work_item_release(item.m_osd);
- item.m_osd = nullptr;
+ item.m_osd = nullptr;
+ }
- // for parent walking, just add to the hashmap
if (m_walking_parent)
{
- uint32_t uph = hunk_bytes() / unit_bytes();
+ // for parent walking, just add to the hashmap
+ uint32_t const uph = hunk_bytes() / unit_bytes();
uint32_t units = uph;
if (item.m_hunknum == hunk_count() - 1 || !compressed())
units = 1;
for (uint32_t unit = 0; unit < units; unit++)
+ {
if (m_parent_map.find(item.m_hash[unit].m_crc16, item.m_hash[unit].m_sha1) == hashmap::NOT_FOUND)
m_parent_map.add(item.m_hunknum * uph + unit, item.m_hash[unit].m_crc16, item.m_hash[unit].m_sha1);
+ }
}
-
- // if we're uncompressed, use regular writes
else if (!compressed())
{
+ // if we're uncompressed, use regular writes
std::error_condition err = write_hunk(item.m_hunknum, item.m_data);
- if (err)
+ if (UNEXPECTED(err))
return err;
// writes of all-0 data don't actually take space, so see if we count this
chd_codec_type codec = CHD_CODEC_NONE;
uint32_t complen;
- hunk_info(item.m_hunknum, codec, complen);
- if (codec == CHD_CODEC_NONE)
+ err = hunk_info(item.m_hunknum, codec, complen);
+ if (!err && codec == CHD_CODEC_NONE) // TODO: report error?
m_total_out += m_hunkbytes;
}
-
- // for compressing, process the result
- else do
+ else if (uint64_t const selfhunk = m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1); selfhunk != hashmap::NOT_FOUND)
+ {
+ // the hunk is in the self map
+ hunk_copy_from_self(item.m_hunknum, selfhunk);
+ }
+ else
{
- // first see if the hunk is in the parent or self maps
- uint64_t selfhunk = m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1);
- if (selfhunk != hashmap::NOT_FOUND)
+ // if not, see if it's in the parent map
+ uint64_t const parentunit = m_parent ? m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) : hashmap::NOT_FOUND;
+ if (parentunit != hashmap::NOT_FOUND)
{
- hunk_copy_from_self(item.m_hunknum, selfhunk);
- break;
+ hunk_copy_from_parent(item.m_hunknum, parentunit);
}
-
- // if not, see if it's in the parent map
- if (m_parent)
+ else
{
- uint64_t parentunit = m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1);
- if (parentunit != hashmap::NOT_FOUND)
- {
- hunk_copy_from_parent(item.m_hunknum, parentunit);
- break;
- }
+ // otherwise, append it compressed and add to the self map
+ hunk_write_compressed(item.m_hunknum, item.m_compression, item.m_compressed, item.m_complen, item.m_hash[0].m_crc16);
+ m_total_out += item.m_complen;
+ m_current_map.add(item.m_hunknum, item.m_hash[0].m_crc16, item.m_hash[0].m_sha1);
}
-
- // otherwise, append it compressed and add to the self map
- hunk_write_compressed(item.m_hunknum, item.m_compression, item.m_compressed, item.m_complen, item.m_hash[0].m_crc16);
- m_total_out += item.m_complen;
- m_current_map.add(item.m_hunknum, item.m_hash[0].m_crc16, item.m_hash[0].m_sha1);
- } while (false);
+ }
// reset the item and advance
item.m_status = WS_READY;
@@ -2983,23 +3137,24 @@ std::error_condition chd_file_compressor::compress_continue(double &progress, do
// if we hit the end, finalize
if (m_write_hunk == m_hunkcount)
{
- // if this is just walking the parent, reset and get ready for compression
if (m_walking_parent)
{
+ // if this is just walking the parent, reset and get ready for compression
m_walking_parent = false;
m_read_queue_offset = m_read_done_offset = 0;
m_write_hunk = 0;
- for (auto & elem : m_work_item)
+ for (auto &elem : m_work_item)
elem.m_status = WS_READY;
}
-
- // wait for all reads to finish and if we're compressed, write the final SHA1 and map
else
{
+ // wait for all reads to finish and if we're compressed, write the final SHA1 and map
osd_work_queue_wait(m_read_queue, 30 * osd_ticks_per_second());
if (!compressed())
return std::error_condition();
- set_raw_sha1(m_compsha1.finish());
+ std::error_condition err = set_raw_sha1(m_compsha1.finish());
+ if (UNEXPECTED(err))
+ return err;
return compress_v5_map();
}
}
@@ -3014,9 +3169,9 @@ std::error_condition chd_file_compressor::compress_continue(double &progress, do
// if we're waiting for work, wait
// sometimes code can get here with .m_status == WS_READY and .m_osd != nullptr, TODO find out why this happens
- while (m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_status != WS_READY &&
- m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_status != WS_COMPLETE &&
- m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_osd != nullptr)
+ while ((m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_status != WS_READY) &&
+ (m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_status != WS_COMPLETE) &&
+ m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_osd)
osd_work_item_wait(m_work_item[m_write_hunk % WORK_BUFFER_HUNKS].m_osd, osd_ticks_per_second());
return m_walking_parent ? error::WALKING_PARENT : error::COMPRESSING;
@@ -3037,7 +3192,7 @@ std::error_condition chd_file_compressor::compress_continue(double &progress, do
void *chd_file_compressor::async_walk_parent_static(void *param, int threadid)
{
- auto *item = reinterpret_cast<work_item *>(param);
+ auto *const item = reinterpret_cast<work_item *>(param);
item->m_compressor->async_walk_parent(*item);
return nullptr;
}
@@ -3079,7 +3234,7 @@ void chd_file_compressor::async_walk_parent(work_item &item)
void *chd_file_compressor::async_compress_hunk_static(void *param, int threadid)
{
- auto *item = reinterpret_cast<work_item *>(param);
+ auto *const item = reinterpret_cast<work_item *>(param);
item->m_compressor->async_compress_hunk(*item, threadid);
return nullptr;
}
@@ -3106,8 +3261,8 @@ void chd_file_compressor::async_compress_hunk(work_item &item, int threadid)
// find the best compression scheme, unless we already have a self or parent match
// (note we may miss a self match from blocks not yet added, but this just results in extra work)
// TODO: data race
- if (m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND &&
- m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND)
+ if ((m_current_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND) &&
+ (m_parent_map.find(item.m_hash[0].m_crc16, item.m_hash[0].m_sha1) == hashmap::NOT_FOUND))
item.m_compression = item.m_codecs->find_best_compressor(item.m_data, item.m_compressed, item.m_complen);
// mark us complete
@@ -3142,37 +3297,45 @@ void *chd_file_compressor::async_read_static(void *param, int threadid)
void chd_file_compressor::async_read()
{
// if in the error or complete state, stop
- if (m_read_error)
+ if (UNEXPECTED(m_read_error))
return;
// determine parameters for the read
- uint32_t work_buffer_bytes = WORK_BUFFER_HUNKS * hunk_bytes();
+ uint32_t const work_buffer_bytes = WORK_BUFFER_HUNKS * hunk_bytes();
uint32_t numbytes = work_buffer_bytes / 2;
- if (m_read_done_offset + numbytes > logical_bytes())
+ if ((m_read_done_offset + numbytes) > logical_bytes())
numbytes = logical_bytes() - m_read_done_offset;
+ uint8_t *const dest = &m_work_buffer[0] + (m_read_done_offset % work_buffer_bytes);
+ assert((&m_work_buffer[0] == dest) || (&m_work_buffer[work_buffer_bytes / 2] == dest));
+ assert(!(m_read_done_offset % hunk_bytes()));
+ uint64_t const end_offset = m_read_done_offset + numbytes;
+
// catch any exceptions coming out of here
try
{
// do the read
- uint8_t *dest = &m_work_buffer[0] + (m_read_done_offset % work_buffer_bytes);
- assert(dest == &m_work_buffer[0] || dest == &m_work_buffer[work_buffer_bytes/2]);
- uint64_t end_offset = m_read_done_offset + numbytes;
-
- // if walking the parent, read in hunks from the parent CHD
if (m_walking_parent)
{
+ // if walking the parent, read in hunks from the parent CHD
+ uint64_t curoffs = m_read_done_offset;
uint8_t *curdest = dest;
- for (uint64_t curoffs = m_read_done_offset; curoffs < end_offset + 1; curoffs += hunk_bytes())
+ uint32_t curhunk = m_read_done_offset / hunk_bytes();
+ while (curoffs < end_offset + 1)
{
- m_parent->read_hunk(curoffs / hunk_bytes(), curdest);
+ std::error_condition err = m_parent->read_hunk(curhunk, curdest);
+ if (err && (error::HUNK_OUT_OF_RANGE != err)) // FIXME: fix the code so it doesn't depend on trying to read past the end of the parent CHD
+ throw err;
+ curoffs += hunk_bytes();
curdest += hunk_bytes();
+ ++curhunk;
}
}
-
- // otherwise, call the virtual function
else
+ {
+ // otherwise, call the virtual function
read_data(dest, m_read_done_offset, numbytes);
+ }
// spawn off work for each hunk
for (uint64_t curoffs = m_read_done_offset; curoffs < end_offset; curoffs += hunk_bytes())
@@ -3199,12 +3362,12 @@ void chd_file_compressor::async_read()
catch (std::error_condition const &err)
{
fprintf(stderr, "CHD error occurred: %s\n", err.message().c_str());
- m_read_error = true;
+ m_read_error = err;
}
catch (std::exception const &ex)
{
fprintf(stderr, "exception occurred: %s\n", ex.what());
- m_read_error = true;
+ m_read_error = std::errc::io_error; // TODO: revisit this error code
}
}
@@ -3222,8 +3385,8 @@ void chd_file_compressor::async_read()
* -------------------------------------------------.
*/
-chd_file_compressor::hashmap::hashmap()
- : m_block_list(new entry_block(nullptr))
+chd_file_compressor::hashmap::hashmap() :
+ m_block_list(new entry_block(nullptr))
{
// initialize the map to empty
memset(m_map, 0, sizeof(m_map));
@@ -3279,10 +3442,10 @@ void chd_file_compressor::hashmap::reset()
* @return An uint64_t.
*/
-uint64_t chd_file_compressor::hashmap::find(util::crc16_t crc16, util::sha1_t sha1)
+uint64_t chd_file_compressor::hashmap::find(util::crc16_t crc16, util::sha1_t sha1) const noexcept
{
// look up the entry in the map
- for (entry_t *entry = m_map[crc16]; entry != nullptr; entry = entry->m_next)
+ for (entry_t *entry = m_map[crc16]; entry; entry = entry->m_next)
if (entry->m_sha1 == sha1)
return entry->m_itemnum;
return NOT_FOUND;
@@ -3312,36 +3475,40 @@ void chd_file_compressor::hashmap::add(uint64_t itemnum, util::crc16_t crc16, ut
m_map[crc16] = entry;
}
-bool chd_file::is_hd() const
+std::error_condition chd_file::check_is_hd() const noexcept
{
metadata_entry metaentry;
return metadata_find(HARD_DISK_METADATA_TAG, 0, metaentry);
}
-bool chd_file::is_cd() const
+std::error_condition chd_file::check_is_cd() const noexcept
{
metadata_entry metaentry;
- return metadata_find(CDROM_OLD_METADATA_TAG, 0, metaentry)
- || metadata_find(CDROM_TRACK_METADATA_TAG, 0, metaentry)
- || metadata_find(CDROM_TRACK_METADATA2_TAG, 0, metaentry);
+ std::error_condition err = metadata_find(CDROM_OLD_METADATA_TAG, 0, metaentry);
+ if (err == error::METADATA_NOT_FOUND)
+ err = metadata_find(CDROM_TRACK_METADATA_TAG, 0, metaentry);
+ if (err == error::METADATA_NOT_FOUND)
+ err = metadata_find(CDROM_TRACK_METADATA2_TAG, 0, metaentry);
+ return err;
}
-bool chd_file::is_gd() const
+std::error_condition chd_file::check_is_gd() const noexcept
{
metadata_entry metaentry;
- return metadata_find(GDROM_OLD_METADATA_TAG, 0, metaentry)
- || metadata_find(GDROM_TRACK_METADATA_TAG, 0, metaentry);
+ std::error_condition err = metadata_find(GDROM_OLD_METADATA_TAG, 0, metaentry);
+ if (err == error::METADATA_NOT_FOUND)
+ err = metadata_find(GDROM_TRACK_METADATA_TAG, 0, metaentry);
+ return err;
}
-bool chd_file::is_dvd() const
+std::error_condition chd_file::check_is_dvd() const noexcept
{
metadata_entry metaentry;
return metadata_find(DVD_METADATA_TAG, 0, metaentry);
}
-bool chd_file::is_av() const
+std::error_condition chd_file::check_is_av() const noexcept
{
metadata_entry metaentry;
return metadata_find(AV_METADATA_TAG, 0, metaentry);
}
-