in order to support SSL over uTP, the utp_socket manager either needs to be able to receive packets on multiple ports, or we need to peek into the first few bytes the payload stream of a socket to determine whether or not it's an SSL connection. (The former is simpler but won't do as well with NATs)
in order to support SSL over uTP, the utp_socket manager either
needs to be able to receive packets on multiple ports, or we need to
peek into the first few bytes the payload stream of a socket to determine
@@ -80,7 +80,7 @@ do as well with NATs)
{
int deepest_bucket = 0;
int deepest_size = 0;
for (table_t::const_iterator i = m_buckets.begin()
@@ -155,7 +208,7 @@ int routing_table::depth() const
for (table_t::const_iterator i = m_buckets.begin()
, end(m_buckets.end()); i != end; ++i)
{
- if (i->live_nodes.size() < m_bucket_size)
+ if (i->live_nodes.size() < m_bucket_size / 2)
break;
// this bucket is full
++deepest_bucket;
@@ -182,7 +235,7 @@ void routing_table::print_state(std::ostream& os) const
{
for (table_t::const_iterator i = m_buckets.begin(), end(m_buckets.end());
i != end; ++i)
-
create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection
create a mapping of file-index to redirection URLs. Use that to form URLs instead. Support to reconnect to a new server without destructing this peer_connection
../src/web_peer_connection.cpp:546
t->remove_web_seed(this);
+ disconnect(errors::missing_location, 2);
+#ifdef TORRENT_DEBUG
+ TORRENT_ASSERT(m_statistics.last_payload_downloaded()
+ + m_statistics.last_protocol_downloaded()
+ == dl_target);
+#endif
+ return;
+ }
+
+ bool single_file_request = false;
+ if (!m_path.empty() && m_path[m_path.size() - 1] != '/')
+ single_file_request = true;
+
+ // add the redirected url and remove the current one
+ if (!single_file_request)
+ {
+ TORRENT_ASSERT(!m_file_requests.empty());
+ int file_index = m_file_requests.front();
+
+
in chunked encoding mode, this assert won't hold. the chunk headers should be subtracted from the receive_buffer_size
in chunked encoding mode, this assert won't hold.
the chunk headers should be subtracted from the receive_buffer_size
../src/http_seed_connection.cpp:117
boost::optional<piece_block_progress>
http_seed_connection::downloading_piece_progress() const
{
@@ -462,7 +566,7 @@ the chunk headers should be subtracted from the receive_buffer_size
../s
else
{
int receive_buffer_size = receive_buffer().left() - m_parser.body_start();
-
ret.bytes_downloaded = t->block_size() - receive_buffer_size;
}
// this is used to make sure that the block_index stays within
@@ -493,8 +597,8 @@ the chunk headers should be subtracted from the receive_buffer_size
report the proper address of the router as the source IP of this understanding of our external address, instead of the empty address
report the proper address of the router as the source IP of
+this understanding of our external address, instead of the empty address
../src/session_impl.cpp:5720
void session_impl::on_port_mapping(int mapping, address const& ip, int port
, error_code const& ec, int map_transport)
{
TORRENT_ASSERT(is_network_thread());
@@ -597,7 +701,7 @@ this understanding of our external address, instead of the empty address
we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily
we only need to do this if our global IPv4 address has changed
+
we only need to do this if our global IPv4 address has changed since the DHT (currently) only supports IPv4. Since restarting the DHT is kind of expensive, it would be nice to not do it unnecessarily
we only need to do this if our global IPv4 address has changed
since the DHT (currently) only supports IPv4. Since restarting the DHT
-is kind of expensive, it would be nice to not do it unnecessarily
../src/session_impl.cpp:6400
void session_impl::set_external_address(address const& ip
+is kind of expensive, it would be nice to not do it unnecessarily
../src/session_impl.cpp:6401
void session_impl::set_external_address(address const& ip
, int source_type, address const& source)
{
#if defined TORRENT_VERBOSE_LOGGING
@@ -701,7 +805,7 @@ is kind of expensive, it would be nice to not do it unnecessarily
make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file
make this depend on the error and on the filesystem the
+
make this depend on the error and on the filesystem the files are being downloaded to. If the error is no_space_left_on_device and the filesystem doesn't support sparse files, only zero the priorities of the pieces that are at the tails of all files, leaving everything up to the highest written piece in each file
make this depend on the error and on the filesystem the
files are being downloaded to. If the error is no_space_left_on_device
and the filesystem doesn't support sparse files, only zero the priorities
of the pieces that are at the tails of all files, leaving everything
@@ -756,8 +860,8 @@ up to the highest written piece in each file
once the filename renaming is removed from here this check can be removed as well
once the filename renaming is removed from here
this check can be removed as well
../src/torrent_info.cpp:418
if (!extract_single_file(*list.list_at(i), e, root_dir
, &file_hash, &fee, &mtime))
return false;
@@ -1014,7 +1118,7 @@ this check can be removed as well
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.
instead, have one instance per possible subnet, global IPv4, global IPv6, loopback, 192.168.x.x, 10.x.x.x, etc.
../include/libtorrent/ip_voter.hpp:100
bloom_filter<32> m_external_address_voters;
std::vector<external_ip_t> m_external_addresses;
address m_external_address;
};
@@ -1092,7 +1196,7 @@ this check can be removed as well
implement blocking write. Low priority since it's not used (yet)
implement blocking write. Low priority since it's not used (yet)
../include/libtorrent/utp_stream.hpp:376
for (typename Mutable_Buffers::const_iterator i = buffers.begin()
, end(buffers.end()); i != end; ++i)
{
using asio::buffer_cast;
@@ -1143,7 +1247,7 @@ this check can be removed as well
if (m_encrypted && m_rc4_encrypted)
{
fun = encrypt;
userdata = m_enc_handler.get();
@@ -1247,7 +1351,7 @@ use allocate_disk_receive_buffer and release_disk_receive_buffer
move the erasing into the loop above remove all payload ranges that has been sent
move the erasing into the loop above
remove all payload ranges that has been sent
../src/bt_peer_connection.cpp:3323
for (std::vector<range>::iterator i = m_payloads.begin();
i != m_payloads.end(); ++i)
{
@@ -1350,7 +1454,7 @@ remove all payload ranges that has been sent
support authentication (i.e. user name and password) in the URL
support authentication (i.e. user name and password) in the URL
../src/http_tracker_connection.cpp:99
, aux::session_impl const& ses
, proxy_settings const& ps
, std::string const& auth
#if TORRENT_USE_I2P
@@ -1431,8 +1535,7 @@ remove all payload ranges that has been sent
../src/bt_peer_connection.c
std::size_t pos = url.find("announce");
if (pos == std::string::npos)
{
- m_ios.post(boost::bind(&http_tracker_connection::fail_disp, self()
- , error_code(errors::scrape_not_available)));
+ tracker_connection::fail(error_code(errors::scrape_not_available));
return;
}
url.replace(pos, 8, "scrape");
@@ -1452,7 +1555,8 @@ remove all payload ranges that has been sent
while (new_size < size)
new_size <<= 1;
void** new_storage = (void**)malloc(sizeof(void*) * new_size);
@@ -1554,9 +1658,9 @@ remove all payload ranges that has been sent
// this piece index later
m_allowed_fast.push_back(index);
// if the peer has the piece and we want
@@ -1658,8 +1762,8 @@ we can construct a full bitfield
peers should really be corked/uncorked outside of all completed disk operations
peers should really be corked/uncorked outside of
+all completed disk operations
../src/peer_connection.cpp:4577
// this means we're in seed mode and we haven't yet
// verified this piece (r.piece)
t->filesystem().async_read_and_hash(r, boost::bind(&peer_connection::on_disk_read_complete
, self(), _1, _2, r), cache.second);
@@ -1710,7 +1814,58 @@ all completed disk operations
recalculate all connect candidates for all torrents
recalculate all connect candidates for all torrents
../src/session_impl.cpp:1943
m_upload_rate.close();
// #error closing the udp socket here means that
// the uTP connections cannot be closed gracefully
@@ -1864,7 +2019,7 @@ override at a time
have a separate list for these connections, instead of having to loop through all of them
have a separate list for these connections, instead of having to loop through all of them
../src/session_impl.cpp:3394
// --------------------------------------------------------------
if (!m_paused) m_auto_manage_time_scaler--;
if (m_auto_manage_time_scaler < 0)
{
@@ -1915,7 +2070,7 @@ override at a time
make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info
make this more generic to not just work if files have been
+
make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
../src/storage.cpp:629
for (;;)
{
@@ -2309,7 +2464,7 @@ maybe use the same format as .torrent files and reuse some code from torrent_inf
for (int i = 0; i < file_sizes_ent->list_size(); ++i)
{
-
what if file_base is used to merge several virtual files into a single physical file? We should probably disable this if file_base is used. This is not a widely used feature though
what if file_base is used to merge several virtual files
+
what if file_base is used to merge several virtual files into a single physical file? We should probably disable this if file_base is used. This is not a widely used feature though
what if file_base is used to merge several virtual files
into a single physical file? We should probably disable this
if file_base is used. This is not a widely used feature though
../src/storage.cpp:1246
int bytes_transferred = 0;
// if the file is opened in no_buffer mode, and the
@@ -2362,7 +2517,7 @@ if file_base is used. This is not a widely used feature though
../src/st
// makes unaligned requests (and the disk cache is disabled or fully utilized
// for write cache).
-
is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash
is verify_peer_cert called once per certificate in the chain, and
+
is verify_peer_cert called once per certificate in the chain, and this function just tells us which depth we're at right now? If so, the comment makes sense. any certificate that isn't the leaf (i.e. the one presented by the peer) should be accepted automatically, given preverified is true. The leaf certificate need to be verified to make sure its DN matches the info-hash
is verify_peer_cert called once per certificate in the chain, and
this function just tells us which depth we're at right now? If so, the comment
makes sense.
any certificate that isn't the leaf (i.e. the one presented by the peer)
@@ -2418,12 +2573,12 @@ need to be verified to make sure its DN matches the info-hash
../src/tor
{
#if defined(TORRENT_VERBOSE_LOGGING) || defined(TORRENT_LOGGING)
match = true;
-
make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync
make this more generic to not just work if files have been
+
make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance maybe use the same format as .torrent files and reuse some code from torrent_info The mapped_files needs to be read both in the network thread and in the disk thread, since they both have their own mapped files structures which are kept in sync
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance
maybe use the same format as .torrent files and reuse some code from torrent_info
The mapped_files needs to be read both in the network thread
and in the disk thread, since they both have their own mapped files structures
-which are kept in sync
../src/torrent.cpp:5170
if (m_seed_mode) m_verified.resize(m_torrent_file->num_pieces(), false);
+which are kept in sync
../src/torrent.cpp:5171
if (m_seed_mode) m_verified.resize(m_torrent_file->num_pieces(), false);
super_seeding(rd.dict_find_int_value("super_seeding", 0));
m_last_scrape = rd.dict_find_int_value("last_scrape", 0);
@@ -2474,12 +2629,12 @@ which are kept in sync
if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents
if this is a merkle torrent and we can't restore the tree, we need to wipe all the bits in the have array, but not necessarily we might want to do a full check to see if we have all the pieces. This is low priority since almost no one uses merkle torrents
if this is a merkle torrent and we can't
restore the tree, we need to wipe all the
bits in the have array, but not necessarily
we might want to do a full check to see if we have
all the pieces. This is low priority since almost
-no one uses merkle torrents
../src/torrent.cpp:5306
add_web_seed(url, web_seed_entry::http_seed);
+no one uses merkle torrents
../src/torrent.cpp:5307
add_web_seed(url, web_seed_entry::http_seed);
}
}
@@ -2530,9 +2685,9 @@ no one uses merkle torrents
make this more generic to not just work if files have been renamed, but also if they have been merged into a single file for instance. using file_base
make this more generic to not just work if files have been
renamed, but also if they have been merged into a single file for instance.
-using file_base
go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece
go through the pieces we have and count the total number
+
go through the pieces we have and count the total number of downloaders we have. Only count peers that are interested in us since some peers might not send have messages for pieces we have it num_interested == 0, we need to pick a new piece
go through the pieces we have and count the total number
of downloaders we have. Only count peers that are interested in us
since some peers might not send have messages for pieces we have
-it num_interested == 0, we need to pick a new piece
../src/torrent.cpp:8035
}
+it num_interested == 0, we need to pick a new piece
../src/torrent.cpp:8037
}
rarest_pieces.clear();
rarest_rarity = pp.peer_count;
@@ -2637,9 +2792,9 @@ it num_interested == 0, we need to pick a new piece
it would be more efficient to not use a string here. however, the problem is that some trackers will respond with actual strings. For example i2p trackers
it would be more efficient to not use a string here.
+
it would be more efficient to not use a string here. however, the problem is that some trackers will respond with actual strings. For example i2p trackers
it would be more efficient to not use a string here.
however, the problem is that some trackers will respond
-with actual strings. For example i2p trackers
../src/udp_tracker_connection.cpp:552
}
+with actual strings. For example i2p trackers
../src/udp_tracker_connection.cpp:550
}
boost::shared_ptr<request_callback> cb = requester();
#if defined TORRENT_VERBOSE_LOGGING || defined TORRENT_LOGGING
@@ -2690,7 +2845,7 @@ with actual strings. For example i2p trackers
../src/udp_tracker_connect
{
restart_read_timeout();
int action = detail::read_int32(buf);
-
include the number of peers received from this tracker, at last announce
include the number of peers received from this tracker, at last announce
../include/libtorrent/torrent_info.hpp:123
// if this tracker failed the last time it was contacted
// this error code specifies what error occurred
error_code last_error;
@@ -2947,7 +3102,7 @@ m_sock.bind(endpoint, ec);
../include/libtorrent/proxy_base.hpp:166
// flags for the source bitmask, each indicating where
// we heard about this tracker
enum tracker_source
-
support using the windows API for UPnP operations as well
support using the windows API for UPnP operations as well
../include/libtorrent/upnp.hpp:121
{
virtual const char* name() const BOOST_SYSTEM_NOEXCEPT;
virtual std::string message(int ev) const BOOST_SYSTEM_NOEXCEPT;
virtual boost::system::error_condition default_error_condition(int ev) const BOOST_SYSTEM_NOEXCEPT
diff --git a/docs/udp_tracker_protocol.html b/docs/udp_tracker_protocol.html
index 580821bab..c6de5e5c0 100644
--- a/docs/udp_tracker_protocol.html
+++ b/docs/udp_tracker_protocol.html
@@ -3,7 +3,7 @@
-
+
Bittorrent udp-tracker protocol extension
diff --git a/docs/utp.html b/docs/utp.html
index 5e38ab9d3..02c7697f0 100644
--- a/docs/utp.html
+++ b/docs/utp.html
@@ -3,7 +3,7 @@
-
+
libtorrent manual
diff --git a/include/libtorrent/file_pool.hpp b/include/libtorrent/file_pool.hpp
index ae86df780..fb6a5c95e 100644
--- a/include/libtorrent/file_pool.hpp
+++ b/include/libtorrent/file_pool.hpp
@@ -51,17 +51,35 @@ POSSIBILITY OF SUCH DAMAGE.
namespace libtorrent
{
+ // this is an internal cache of open file handles. It's primarily used by
+ // storage_interface implementations. It provides semi weak guarantees of
+ // not opening more file handles than specified. Given multiple threads,
+ // each with the ability to lock a file handle (via smart pointer), there
+ // may be windows where more file handles are open.
struct TORRENT_EXPORT file_pool : boost::noncopyable
{
+ // ``size`` specifies the number of allowed files handles
+ // to hold open at any given time.
file_pool(int size = 40);
~file_pool();
boost::intrusive_ptr open_file(void* st, std::string const& p
, int file_index, file_storage const& fs, int m, error_code& ec);
+
+ // release all files belonging to the specified storage_interface (``st``)
+ // the overload that takes ``file_index`` releases only the file with
+ // that index in storage ``st``.
void release(void* st);
void release(void* st, int file_index);
+
+ // update the allowed number of open file handles to ``size``.
void resize(int size);
+
+ // returns the current limit of number of allowed open file handles held
+ // by the file_pool.
int size_limit() const { return m_size; }
+
+ // internal
void set_low_prio_io(bool b) { m_low_prio_io = b; }
private:
diff --git a/include/libtorrent/lazy_entry.hpp b/include/libtorrent/lazy_entry.hpp
index d11a7c381..347805124 100644
--- a/include/libtorrent/lazy_entry.hpp
+++ b/include/libtorrent/lazy_entry.hpp
@@ -368,6 +368,7 @@ namespace libtorrent
TORRENT_EXTRA_EXPORT std::string print_entry(lazy_entry const& e
, bool single_line = false, int indent = 0);
+ // get the ``error_category`` for bdecode errors
TORRENT_EXPORT boost::system::error_category& get_bdecode_category();
namespace bdecode_errors
diff --git a/include/libtorrent/storage.hpp b/include/libtorrent/storage.hpp
index 95b6d274c..23854e058 100644
--- a/include/libtorrent/storage.hpp
+++ b/include/libtorrent/storage.hpp
@@ -419,6 +419,10 @@ namespace libtorrent
bool verify_resume_data(lazy_entry const& rd, error_code& error);
bool write_resume_data(entry& rd) const;
+ file_storage const& files() const { return m_mapped_files?*m_mapped_files:m_files; }
+
+ private:
+
// this identifies a read or write operation
// so that default_storage::readwritev() knows what to
// do when it's actually touching the file
@@ -442,8 +446,6 @@ namespace libtorrent
size_type write_unaligned(boost::intrusive_ptr const& file_handle
, size_type file_offset, file::iovec_t const* bufs, int num_bufs, error_code& ec);
- file_storage const& files() const { return m_mapped_files?*m_mapped_files:m_files; }
-
boost::scoped_ptr m_mapped_files;
file_storage const& m_files;
diff --git a/include/libtorrent/torrent_info.hpp b/include/libtorrent/torrent_info.hpp
index 0f51f8ab9..22d9ed56f 100644
--- a/include/libtorrent/torrent_info.hpp
+++ b/include/libtorrent/torrent_info.hpp
@@ -655,9 +655,15 @@ namespace libtorrent
boost::shared_array metadata() const
{ return m_info_section; }
+ // internal
bool add_merkle_nodes(std::map const& subtree
, int piece);
std::map build_merkle_list(int piece) const;
+
+ // returns whether or not this is a merkle torrent.
+ // see BEP30__.
+ //
+ // __ http://bittorrent.org/beps/bep_0030.html
bool is_merkle_torrent() const { return !m_merkle_tree.empty(); }
// if we're logging member offsets, we need access to them