fix overflow of block_index in hash_picker for v2 torrents

This commit is contained in:
arvidn
2021-01-20 12:43:10 +01:00
committed by Arvid Norberg
parent ac1b534643
commit 58a489332c
5 changed files with 27 additions and 12 deletions

View File

@ -1,3 +1,4 @@
* fix integer overflow in hash_picker and properly restrict max file sizes in torrents
* strengthen SSRF mitigation for web seeds
* 2.0.2 released

View File

@ -52,6 +52,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/flags.hpp"
#include "libtorrent/error_code.hpp"
#include "libtorrent/units.hpp"
#include "libtorrent/disk_interface.hpp" // for default_block_size
#include "libtorrent/fwd.hpp"
namespace libtorrent {
@ -229,7 +230,12 @@ namespace aux {
file_storage& operator=(file_storage&&) &;
// internal limitations restrict file sizes to not be larger than this
static constexpr std::int64_t max_file_size = (std::int64_t(1) << 48) - 1;
// We use int to index into file merkle trees, so a file may not contain more
// than INT_MAX entries. That means INT_MAX / 2 blocks (leafs) in each
// tree.
static constexpr std::int64_t max_file_size = std::min(
(std::int64_t(1) << 48) - 1
, std::int64_t(std::numeric_limits<int>::max() / 2) * default_block_size);
static constexpr std::int64_t max_file_offset = (std::int64_t(1) << 48) - 1;
// returns true if the piece length has been initialized

View File

@ -651,7 +651,7 @@ namespace aux {
return;
}
if (max_file_size - m_total_size < file_size)
if (max_file_offset - m_total_size < file_size)
{
ec = make_error_code(errors::torrent_invalid_length);
return;

View File

@ -35,6 +35,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "libtorrent/hash_picker.hpp"
#include "libtorrent/aux_/merkle.hpp"
#include "libtorrent/peer_connection.hpp"
#include "libtorrent/aux_/numeric_cast.hpp"
namespace libtorrent
{
@ -340,9 +341,9 @@ bool validate_hash_request(hash_request const& hr, file_storage const& fs)
auto const f = m_files.file_index_at_piece(piece);
auto& merkle_tree = m_merkle_trees[f];
piece_index_t const file_first_piece = m_files.piece_index_at_file(f);
int const block_offset = static_cast<int>(static_cast<int>(piece) * std::int64_t(m_files.piece_length())
+ offset - m_files.file_offset(f));
int const block_index = block_offset / default_block_size;
std::int64_t const block_offset = static_cast<int>(piece) * std::int64_t(m_files.piece_length())
+ offset - m_files.file_offset(f);
int const block_index = aux::numeric_cast<int>(block_offset / default_block_size);
int const first_block_index = m_files.file_first_block_node(f);
int const block_tree_index = first_block_index + block_index;

View File

@ -763,31 +763,38 @@ TORRENT_TEST(files_equal_symlink)
TEST_CHECK(!lt::aux::files_equal(fs1, fs2));
}
std::int64_t const int_max = std::numeric_limits<int>::max();
TORRENT_TEST(large_files)
{
file_storage fs1;
fs1.set_piece_length(0x4000);
TEST_THROW(fs1.add_file("test/0", std::int64_t(1) << 48));
TEST_THROW(fs1.add_file("test/0", int_max / 2 * lt::default_block_size + 1));
error_code ec;
fs1.add_file(ec, "test/0", (std::int64_t(1) << 48));
fs1.add_file(ec, "test/0", int_max * lt::default_block_size + 1);
TEST_EQUAL(ec, make_error_code(boost::system::errc::file_too_large));
// should not throw
TEST_NOTHROW(fs1.add_file("test/1", (std::int64_t(1) << 48) - 1));
TEST_NOTHROW(fs1.add_file("test/0", int_max / 2 * lt::default_block_size));
}
TORRENT_TEST(large_offset)
{
file_storage fs1;
fs1.set_piece_length(0x4000);
fs1.add_file("test/0", (std::int64_t(1) << 48) - 10);
// 11 bytes + (2^48 - 10) exceeds the limit
TEST_THROW(fs1.add_file("test/1", 11));
for (int i = 0; i < 16; ++i)
fs1.add_file(("test/" + std::to_string(i)).c_str(), int_max / 2 * lt::default_block_size);
// this exceeds the 2^48-1 limit
TEST_THROW(fs1.add_file("test/16", 262144));
error_code ec;
fs1.add_file(ec, "test/1", 11);
fs1.add_file(ec, "test/8", 262144);
TEST_EQUAL(ec, make_error_code(errors::torrent_invalid_length));
// this should be OK, but just
fs1.add_file("test/8", 262143);
}
TORRENT_TEST(large_filename)