AK: Remove DuplexMemoryStream

This commit is contained in:
Tim Schumacher 2023-01-13 13:59:24 +01:00 committed by Linus Groh
parent 5896f8cf2b
commit d7eead4f4c
3 changed files with 0 additions and 220 deletions

View file

@ -42,7 +42,6 @@ class Utf8CodePointIterator;
class Utf8View;
class InputStream;
class InputMemoryStream;
class DuplexMemoryStream;
class OutputStream;
class InputBitStream;
class OutputBitStream;
@ -160,7 +159,6 @@ using AK::CircularQueue;
using AK::DeprecatedFlyString;
using AK::DeprecatedString;
using AK::DoublyLinkedList;
using AK::DuplexMemoryStream;
using AK::Error;
using AK::ErrorOr;
using AK::FixedArray;

View file

@ -138,142 +138,9 @@ private:
Bytes m_bytes;
};
class DuplexMemoryStream final : public DuplexStream {
public:
static constexpr size_t chunk_size = 4 * 1024;
bool unreliable_eof() const override { return eof(); }
bool eof() const { return m_write_offset == m_read_offset; }
bool discard_or_error(size_t count) override
{
if (m_write_offset - m_read_offset < count) {
set_recoverable_error();
return false;
}
m_read_offset += count;
try_discard_chunks();
return true;
}
Optional<size_t> offset_of(ReadonlyBytes value) const
{
// We can't directly pass m_chunks to memmem since we have a limited read/write range we want to search in.
Vector<ReadonlyBytes> spans;
auto chunk_index = (m_read_offset - m_base_offset) / chunk_size;
auto chunk_read_offset = (m_read_offset - m_base_offset) % chunk_size;
auto bytes_to_search = m_write_offset - m_read_offset;
for (; bytes_to_search > 0;) {
ReadonlyBytes span = m_chunks[chunk_index];
if (chunk_read_offset) {
span = span.slice(chunk_read_offset);
chunk_read_offset = 0;
}
if (bytes_to_search < span.size()) {
spans.append(span.slice(0, bytes_to_search));
break;
}
bytes_to_search -= span.size();
spans.append(move(span));
++chunk_index;
}
return memmem(spans.begin(), spans.end(), value);
}
size_t read_without_consuming(Bytes bytes) const
{
size_t nread = 0;
while (bytes.size() - nread > 0 && m_write_offset - m_read_offset - nread > 0) {
auto const chunk_index = (m_read_offset - m_base_offset + nread) / chunk_size;
auto const chunk_bytes = m_chunks[chunk_index].bytes().slice((m_read_offset + nread) % chunk_size).trim(m_write_offset - m_read_offset - nread);
nread += chunk_bytes.copy_trimmed_to(bytes.slice(nread));
}
return nread;
}
size_t read(Bytes bytes) override
{
if (has_any_error())
return 0;
auto const nread = read_without_consuming(bytes);
m_read_offset += nread;
try_discard_chunks();
return nread;
}
bool read_or_error(Bytes bytes) override
{
if (m_write_offset - m_read_offset < bytes.size()) {
set_recoverable_error();
return false;
}
return read(bytes) == bytes.size();
}
size_t write(ReadonlyBytes bytes) override
{
// FIXME: This doesn't write around chunk borders correctly?
size_t nwritten = 0;
while (bytes.size() - nwritten > 0) {
if ((m_write_offset + nwritten) % chunk_size == 0)
m_chunks.append(ByteBuffer::create_uninitialized(chunk_size).release_value_but_fixme_should_propagate_errors()); // FIXME: Handle possible OOM situation.
nwritten += bytes.slice(nwritten).copy_trimmed_to(m_chunks.last().bytes().slice((m_write_offset + nwritten) % chunk_size));
}
m_write_offset += nwritten;
return nwritten;
}
bool write_or_error(ReadonlyBytes bytes) override
{
write(bytes);
return true;
}
ByteBuffer copy_into_contiguous_buffer() const
{
// FIXME: Handle possible OOM situation.
auto buffer = ByteBuffer::create_uninitialized(size()).release_value_but_fixme_should_propagate_errors();
auto const nread = read_without_consuming(buffer);
VERIFY(nread == buffer.size());
return buffer;
}
size_t roffset() const { return m_read_offset; }
size_t woffset() const { return m_write_offset; }
size_t size() const { return m_write_offset - m_read_offset; }
private:
void try_discard_chunks()
{
while (m_read_offset - m_base_offset >= chunk_size) {
m_chunks.take_first();
m_base_offset += chunk_size;
}
}
Vector<ByteBuffer> m_chunks;
size_t m_write_offset { 0 };
size_t m_read_offset { 0 };
size_t m_base_offset { 0 };
};
}
#if USING_AK_GLOBALLY
using AK::DuplexMemoryStream;
using AK::InputMemoryStream;
using AK::InputStream;
using AK::OutputMemoryStream;

View file

@ -100,44 +100,6 @@ TEST_CASE(seeking_slicing_offset)
EXPECT_EQ(expected2, actual2);
}
TEST_CASE(duplex_simple)
{
DuplexMemoryStream stream;
EXPECT(stream.eof());
stream << 42;
EXPECT(!stream.eof());
int value;
stream >> value;
EXPECT_EQ(value, 42);
EXPECT(stream.eof());
}
TEST_CASE(duplex_large_buffer)
{
DuplexMemoryStream stream;
Array<u8, 1024> one_kibibyte;
EXPECT_EQ(stream.size(), 0ul);
for (size_t idx = 0; idx < 256; ++idx)
stream << one_kibibyte;
EXPECT_EQ(stream.size(), 256 * 1024ul);
for (size_t idx = 0; idx < 128; ++idx)
stream >> one_kibibyte;
EXPECT_EQ(stream.size(), 128 * 1024ul);
for (size_t idx = 0; idx < 128; ++idx)
stream >> one_kibibyte;
EXPECT(stream.eof());
}
TEST_CASE(read_endian_values)
{
Array<u8, 8> const input { 0, 1, 2, 3, 4, 5, 6, 7 };
@ -151,17 +113,6 @@ TEST_CASE(read_endian_values)
EXPECT_EQ(value2, 0x04050607u);
}
TEST_CASE(write_endian_values)
{
Array<u8, 8> const expected { 4, 3, 2, 1, 1, 2, 3, 4 };
DuplexMemoryStream stream;
stream << LittleEndian<u32> { 0x01020304 } << BigEndian<u32> { 0x01020304 };
EXPECT_EQ(stream.size(), 8u);
EXPECT(expected.span() == stream.copy_into_contiguous_buffer().span());
}
TEST_CASE(new_output_memory_stream)
{
Array<u8, 16> buffer;
@ -184,39 +135,3 @@ TEST_CASE(new_output_memory_stream)
EXPECT_EQ(stream.bytes().data(), buffer.data());
EXPECT_EQ(stream.bytes().size(), 2u);
}
TEST_CASE(offset_of_out_of_bounds)
{
Array<u8, 4> target { 0xff, 0xff, 0xff, 0xff };
Array<u8, DuplexMemoryStream::chunk_size> whole_chunk;
whole_chunk.span().fill(0);
DuplexMemoryStream stream;
stream << whole_chunk;
EXPECT(!stream.offset_of(target).has_value());
}
TEST_CASE(unsigned_integer_underflow_regression)
{
Array<u8, DuplexMemoryStream::chunk_size + 1> buffer;
DuplexMemoryStream stream;
stream << buffer;
}
TEST_CASE(offset_calculation_error_regression)
{
Array<u8, DuplexMemoryStream::chunk_size> input, output;
input.span().fill(0xff);
DuplexMemoryStream stream;
stream << 0x00000000 << input << 0x00000000;
stream.discard_or_error(sizeof(int));
stream.read(output);
EXPECT_EQ(input, output);
}