Compare commits

...

287 Commits

Author SHA1 Message Date
337605dc0f Release 0.4.15 2019-10-10 16:48:10 +01:00
14bdfa6b2e throttle even further - 500/s 2019-10-09 17:34:54 +01:00
ed3f9da773 throttle loading even further, to 1000/sec 2019-10-09 16:46:17 +01:00
251080d08f throttle loading of files to 500/s 2019-10-09 16:34:09 +01:00
f530ab999d operations on multiple selection in shared files table 2019-10-09 03:38:08 +01:00
4133384e48 ability to share multiple files and directories 2019-10-08 21:30:34 +01:00
600fc98868 update TODO 2019-10-07 12:38:26 +01:00
129eeb3b88 JDK needed, not JRE 2019-10-07 12:38:09 +01:00
20b51b78a0 reduce priority of file persister thread 2019-10-07 11:59:51 +01:00
33fe755b60 implement multiple-selection on downloads table 2019-10-07 04:26:35 +01:00
8b0668a134 Rewrite utils into Java, cache the persistable data of shared files to reduce object churn 2019-10-05 22:50:32 +01:00
730d2202fd bundles for linux available now 2019-10-05 18:53:43 +01:00
69906a986d set i2p.dir.base to prevent router creating files in PWD 2019-10-05 15:03:59 +01:00
5bc8fa8633 Preserve selection on refresh #18 2019-10-05 05:13:49 +01:00
7de7c9d8f3 Add 'Clear Hits' button to content control panel #18 2019-10-05 05:03:25 +01:00
e943f6019d disable all GUI unit tests, enable host-cache unit tests. The 'build' target now succeeds 2019-10-05 04:31:11 +01:00
2eec7bec5b fix most core tests 2019-10-05 04:20:14 +01:00
c36110cf76 update readme 2019-10-04 16:41:07 +01:00
abe28517bc Release 0.4.14 2019-10-04 13:00:57 +01:00
15bc4c064d center the button 2019-10-03 21:32:32 +01:00
91d771944b add option for sequential download 2019-10-03 20:45:22 +01:00
e09c456a13 make the download retry interval in seconds, default still 1 minute 2019-10-03 19:31:15 +01:00
d9c1067226 Add Neutral button to search tab, issue #17 2019-10-02 06:02:06 +01:00
eda3e7ad3a Add option to not search extra hop, only considered if connecting only to trusted peers, issue #6 2019-10-02 05:45:46 +01:00
e9798c7eaa remember last rejection and back off from hosts that reject us. Fix return value of retry and hopelessness predicates 2019-10-01 08:34:43 +01:00
66bb4eef5b close outbound establishments on a separate thread 2019-10-01 07:50:29 +01:00
55f260b3f4 update version 2019-09-29 19:21:06 +01:00
32d4c3965e Release 0.4.13 2019-09-29 19:00:20 +01:00
de1534d837 reduce the default host retry interval 2019-09-29 18:45:09 +01:00
7b58e8a88a separate setting for the interval after which a host is considered hopeless 2019-09-29 18:43:39 +01:00
8a03b89985 clean up the filtering logic; allow serialization of hosts that can be retried 2019-09-29 16:49:02 +01:00
1d97374857 track last successful attempt. Only re-attempt hosts if they have ever been successful. Do not serialize hosts considered hopeless 2019-09-29 16:19:19 +01:00
549e8c2d98 Release 0.4.12 2019-09-22 16:55:04 +01:00
b54d24db0d new update server destination 2019-09-22 16:47:35 +01:00
fa12e84345 stronger sig type 2019-09-22 16:23:01 +01:00
6430ff2691 bump i2p libs version 2019-09-22 16:13:12 +01:00
591313c81c point to the pkg project 2019-09-20 21:09:53 +01:00
ce7b6a0c65 change to gasp AA font table, try metal lnf if the others fail 2019-09-16 15:06:45 +01:00
5c4d4c4580 embedded router will not work without reseed certificates, so remove it 2019-09-16 15:04:34 +01:00
4cb864ff9f update version 2019-09-16 15:03:20 +01:00
417675ad07 update dark_trion's hostcache address 2019-07-22 21:48:29 +01:00
9513e5ba3c update todo 2019-07-20 13:15:44 +01:00
85610cf169 add new host-cache 2019-07-15 22:05:09 +01:00
e8322384b8 Release 0.4.11 2019-07-15 14:28:21 +01:00
179279ed30 Merge branch 'master' of https://github.com/zlatinb/muwire 2019-07-14 06:19:18 +01:00
ae79f0fded Clear Done button, thanks to Aegon 2019-07-14 06:19:05 +01:00
ed878b3762 Merge pull request #11 from zetok/readme
Add info about the default I2CP port to README.md
2019-07-12 09:17:24 +01:00
623cca0ef2 Add info about the default I2CP port to README.md
Also:
 - improved formatting a bit
 - removed trailing whitespaces
2019-07-12 07:28:12 +01:00
eaa883c3ba count duplicate files towards total in Uploads panel 2019-07-11 23:28:12 +01:00
7ae8076865 disable webui for now 2019-07-11 22:29:47 +01:00
b1aa92661c do not pack200 some jars because of duplicate entries 2019-07-11 20:42:24 +01:00
9ed94c8376 do not include tomcat runtime 2019-07-11 20:41:57 +01:00
fa6aea1abe attempt to produce an I2P plugin 2019-07-11 19:49:04 +01:00
0de84e704b hello webui 2019-07-11 18:34:27 +01:00
a767dda044 add empty grails project for a web ui 2019-07-11 17:56:42 +01:00
56e9235d7b avoid FS call to get file length 2019-07-11 15:28:25 +01:00
2fba9a74ce persist files.json every minute 2019-07-11 14:32:57 +01:00
2bb6826906 canonicalize all files before they enter FileManager and do not look for absolute path on persistence 2019-07-11 14:32:12 +01:00
9f339629a9 remove unnecessary canonicalization 2019-07-11 11:58:20 +01:00
58d4207f94 Release 0.4.10 2019-07-11 05:09:05 +01:00
32577a28dc some download stats 2019-07-11 05:00:25 +01:00
f7b43304d4 use split pane in downloads tab as well 2019-07-11 03:57:49 +01:00
dcbe09886d split pane instead of gridlayout 2019-07-11 03:48:05 +01:00
5a54b2dcda shift focus to search pane on search 2019-07-10 22:33:21 +01:00
581293b24f column sizes 2019-07-10 22:27:07 +01:00
cd072b9f76 enable/disable download button correctly 2019-07-10 22:23:20 +01:00
6b74fc5956 fix trust/distrust buttons 2019-07-10 22:17:32 +01:00
3de2f872bb show results per sender 2019-07-10 22:08:18 +01:00
fcde917d08 fix context menu and double-click 2019-07-10 21:26:13 +01:00
4ded065010 move buttons onto search result tab 2019-07-10 21:23:00 +01:00
18a1c7091a move downloads to their own pane 2019-07-10 20:54:45 +01:00
46aee19f80 disable the button of the currently open pane 2019-07-10 20:37:09 +01:00
92dd7064c6 Release 0.4.9 2019-07-10 12:02:36 +01:00
b2e4dda677 rearrange tables 2019-07-10 11:55:06 +01:00
e77a2c8961 clear hits table on refresh 2019-07-09 21:42:52 +01:00
ee2fd2ef68 single hit per search uuid 2019-07-09 21:22:31 +01:00
3f95d2bf1d trust and distrust buttons 2019-07-09 21:15:08 +01:00
1390983732 populate hits table 2019-07-09 21:05:49 +01:00
ce660cefe9 deleting of rules 2019-07-09 20:50:07 +01:00
72b81eb886 fix matching 2019-07-09 20:27:28 +01:00
57d593a68a persist watched keywords and regexes 2019-07-09 20:11:29 +01:00
39a81a3376 hook up rule creation 2019-07-09 19:53:40 +01:00
fd0bf17c24 add ability to unregister event listeners 2019-07-09 19:53:08 +01:00
ac12bff69b wip on content control panel ui 2019-07-09 19:20:06 +01:00
feef773bac hook up content control panel to rest of UI 2019-07-09 17:55:36 +01:00
239d8f12a7 wip on core side of content management 2019-07-09 17:13:09 +01:00
8bbc61a7cb add settings for watched keywords and regexes 2019-07-09 16:50:51 +01:00
7f31c4477f matchers for keywords 2019-07-09 11:47:55 +01:00
6bad67c1bf Release 0.4.8 2019-07-08 18:30:19 +01:00
c76e6dc99f Merge pull request #9 from zetok/backticks
Replace deprecated backticks with $() for command substitution
2019-07-08 08:24:37 +01:00
acf9db0db3 Replace deprecated backticks with $() for command substitution
Although it's a Bash FAQ, the point also applies to POSIX-compatible
shells: https://mywiki.wooledge.org/BashFAQ/082
2019-07-08 06:29:33 +01:00
69b4f0b547 Add trust/distrust action from monitor window. Thanks Aegon 2019-07-07 15:31:21 +01:00
80e165b505 fix download size in renderer, thanks Aegon 2019-07-07 11:17:56 +01:00
bcce55b873 fix integer overflow 2019-07-07 10:58:39 +01:00
d5c92560db fix integer overflow 2019-07-07 10:56:14 +01:00
f827c1c9bf Home directories for different OSes 2019-07-07 09:14:13 +01:00
88c5f1a02d Add GPG key link 2019-07-07 09:04:52 +01:00
d8e44f5f39 kill other workers if download is finished 2019-07-06 22:21:13 +01:00
72ff47ffe5 use custom renderer and comparator for download progress 2019-07-06 12:53:49 +01:00
066ee2c96d wrong list 2019-07-06 11:28:04 +01:00
0a8016dea7 enable stealing of pieces from other download workers 2019-07-06 11:26:18 +01:00
db36367b11 avoid AIOOBE 2019-07-06 11:00:31 +01:00
b6c9ccb7f6 return up to 9 X-Alts 2019-07-06 09:03:27 +01:00
a9dc636bce write pieces every time a downloader finishes 2019-07-06 00:52:49 +01:00
3cc0574d11 working partial pieces 2019-07-06 00:47:45 +01:00
20fab9b16d work on partial piece persistence 2019-07-06 00:17:46 +01:00
4015818323 center buttons 2019-07-05 17:15:50 +01:00
f569d45c8c reallign tables 2019-07-05 17:07:14 +01:00
3773647869 remove diff rejects 2019-07-05 16:24:57 +01:00
29cdbf018c remove trailing spaces 2019-07-05 16:24:19 +01:00
94bb7022eb tabs -> spaces 2019-07-05 16:22:34 +01:00
39808302df Show which file is hashing, thanks to Aegon 2019-07-05 16:20:03 +01:00
2d22f9c39e override router log manager 2019-07-05 12:32:23 +01:00
ee8f80bab6 up i2p to 0.9.41 2019-07-05 12:26:48 +01:00
3e6242e583 break when matching search is found 2019-07-04 18:12:22 +01:00
41181616ee compact display of incoming searches, thanks Aegon 2019-07-04 17:59:53 +01:00
eb2530ca32 fix sorting of download/upload tables thanks Aegon 2019-07-04 17:58:06 +01:00
b5233780ef Release 0.4.7 2019-07-03 20:36:54 +01:00
78753d7538 shut down cache client on shutdown 2019-07-03 19:50:00 +01:00
4740e8b4f5 log hostcache stats 2019-07-03 19:46:24 +01:00
ad5b00fc90 prettier progress status thanks to Aegon 2019-07-03 12:50:24 +01:00
d6c6880848 update readme 2019-07-03 07:27:48 +01:00
4f948c1b9e Release 0.4.6 2019-07-03 07:11:59 +01:00
2b68c24f9c use switch 2019-07-03 07:01:27 +01:00
bcdf0422db update for embedded router 2019-07-03 07:00:04 +01:00
f6434b478d remove FAQ 2019-07-03 06:56:20 +01:00
e979fdd26f update list view tables 2019-07-03 06:51:21 +01:00
e6bfcaaab9 size columns, center integers 2019-07-03 06:11:02 +01:00
9780108e8a disable trust buttons on action 2019-07-03 06:00:09 +01:00
697c7d2d6d enable/disable trust panel buttons 2019-07-03 05:41:17 +01:00
887d10c8bf move buttons around 2019-07-03 05:30:39 +01:00
ef6b8fe458 add a state for failed updates 2019-07-03 05:12:00 +01:00
20ab55d763 update todo 2019-07-03 00:23:21 +01:00
eda58c9e0d Merge branch 'trust-lists' 2019-07-03 00:04:50 +01:00
fb42fc0e35 add trust panel in options 2019-07-03 00:04:08 +01:00
35cabc47ad hook up trust and distrust buttons 2019-07-02 23:44:43 +01:00
5be97d0404 show something when review button is pressed 2019-07-02 22:51:04 +01:00
82b0fa253c enable update and unsubscribe buttons 2019-07-02 22:26:29 +01:00
011a4d5766 prevent duplicate updates and zero timestamps 2019-07-02 22:02:15 +01:00
5cd1ca88c1 do actual updating on in a threadpool 2019-07-02 21:34:29 +01:00
44c880d911 store subscriber list upon subscription 2019-07-02 20:53:29 +01:00
14857cb5ad swallow headers in trust list response 2019-07-02 20:35:50 +01:00
7daf981f1a fix NPE 2019-07-02 20:24:51 +01:00
b99bc0ea32 fix 2019-07-02 20:12:22 +01:00
1ccf6fbdfa participating bandwidth grid cell 2019-07-02 15:35:42 +01:00
5711979272 Release 0.4.5 2019-07-02 15:01:51 +01:00
9a5e2b1fa3 speed smoothing patch courtesy of Aegon 2019-07-02 14:46:40 +01:00
cafc5f582e subscribe button 2019-07-02 14:35:52 +01:00
a89b423dfc simpler speed calculation 2019-07-02 13:05:06 +01:00
79e8438941 always assume interval is at least 1 second 2019-07-02 12:49:00 +01:00
19c2c46491 prevent NPE on startup 2019-07-02 12:27:15 +01:00
78f1d54b69 add new host cache 2019-07-02 10:04:24 +01:00
9461649ed4 change sig type 2019-07-02 09:49:13 +01:00
8573ab2850 work on trust list UI 2019-07-02 09:35:21 +01:00
8b3d752727 add status to the trust list object 2019-07-02 08:59:30 +01:00
7c54bd8966 start work on sharing of trust lists 2019-07-01 23:33:39 +01:00
5d0fcb7027 start work on sharing of trust lists 2019-07-01 23:15:13 +01:00
3ec9654d3c start work on sharing of trust lists 2019-07-01 22:05:43 +01:00
7c8d64b462 start work on sharing of trust lists 2019-07-01 21:40:07 +01:00
31e30e3d31 excludePeerCaps 2019-07-01 18:31:58 +01:00
8caf6e99b0 show floodfill status 2019-07-01 13:18:31 +01:00
624155debd update todo 2019-07-01 06:17:46 +01:00
4468a262ae actually add timestamps to the list 2019-06-30 21:40:18 +01:00
1780901cb0 throttle connections to 10 searches per second 2019-06-30 21:22:49 +01:00
d830d9261f canonicalize before checking if file is already shared 2019-06-30 17:12:25 +01:00
f5e1833a48 Release 0.4.4 2019-06-30 15:55:23 +01:00
9feb2a3c8f fix NPE on update search 2019-06-30 15:11:13 +01:00
b27665f5dd Merge pull request #5 from 0rC0/patch-1
code markdown for commands and paths in README.md
2019-06-30 13:45:36 +01:00
4465aa4134 code markdown for commands and paths in README.md
... instead of quotes
2019-06-30 14:27:33 +02:00
ad766ac748 try to unmap files when done 2019-06-30 13:20:26 +01:00
d9e7d67d86 javadoc 2019-06-30 12:51:34 +01:00
3fefbc94b3 utility to decode personas 2019-06-30 10:41:42 +01:00
21034209a5 add ? to split pattern 2019-06-30 06:29:46 +01:00
7c04c0f83c unshare individual file 2019-06-30 05:44:08 +01:00
f5293d65dd update todo 2019-06-29 16:00:49 +01:00
8191bf6066 Release 0.4.3 2019-06-29 10:44:15 +01:00
29b6bfd463 support different update types 2019-06-29 10:31:27 +01:00
2f3d23bc34 fixes 2019-06-29 10:12:50 +01:00
98dd80c4b8 fix 2019-06-29 10:03:58 +01:00
d9edb2e128 ability to download updates automatically 2019-06-29 09:23:27 +01:00
de04b40b86 Release 0.4.2 2019-06-29 07:17:45 +01:00
7206a3d926 more i2p metrics 2019-06-29 07:07:48 +01:00
98b98d8938 I2P status panel 2019-06-29 06:33:53 +01:00
294b8fcc2f MW status window 2019-06-29 05:58:46 +01:00
32f601a1b1 add ability to change i2p port 2019-06-28 23:53:22 +01:00
8e3a398080 Release 0.4.1 2019-06-28 16:42:37 +01:00
720b9688b4 Add unsharing of directories 2019-06-28 16:08:04 +01:00
e3066161c5 do not perform filesystem operations in the UI thread 2019-06-27 23:29:48 +01:00
a9aa3a524f disable i2cp interface on embedded router 2019-06-27 09:56:18 +01:00
92848e818a on empty properties source from java props 2019-06-27 03:47:56 +01:00
a7aa3008c0 bandwidth settings 2019-06-27 00:42:27 +01:00
485325e824 embedded router except for logs 2019-06-26 23:25:22 +01:00
0df2a0e039 start work on embedded router 2019-06-26 22:39:25 +01:00
fb7b4466c2 update readme 2019-06-26 22:05:04 +01:00
53105245f4 Release 0.4.0 2019-06-26 21:59:28 +01:00
b68eab91e0 Release 0.3.10 2019-06-25 22:39:43 +01:00
f72cf91462 wait for files to be loaded before sharing watched directories 2019-06-25 22:24:32 +01:00
a655c4ef50 add toString 2019-06-25 22:24:15 +01:00
5d46e9b796 switch 4_ to INFO 2019-06-25 21:50:15 +01:00
642e6e67b3 wait for all files loaded before watching dirs 2019-06-25 21:43:07 +01:00
2b6b86f903 show how many pieces the remote side already has 2019-06-25 17:44:05 +01:00
f2706a4426 clarify upload column 2019-06-25 17:24:42 +01:00
1af75413aa update for brackets 2019-06-25 16:27:02 +01:00
adc4077b1a filter asterix 2019-06-25 15:54:30 +01:00
01f4e2453b limit search length to 128 characters 2019-06-25 15:53:53 +01:00
61267374dd move button around 2019-06-25 08:10:20 +01:00
970f814685 make mesh expiration configurable 2019-06-25 08:04:57 +01:00
4fd9fc1991 add option to change download location 2019-06-25 07:59:30 +01:00
26207ffd1b add constructor 2019-06-25 07:53:24 +01:00
2614cfbe5f make host clear interval configurable 2019-06-25 07:41:20 +01:00
f11d461ec0 make download sequential ratio a property 2019-06-25 07:34:26 +01:00
b2eb2d2755 show hidden files in file choosers 2019-06-24 23:09:20 +01:00
ea46a54f19 enable AA by default 2019-06-24 22:55:26 +01:00
627add45ad remove griffon icons 2019-06-24 22:51:43 +01:00
d364855459 logo 2019-06-24 22:13:03 +01:00
14ee35e77a Release 0.3.9 2019-06-24 18:39:59 +01:00
8773eb4ee0 fix piece size calculation 2019-06-24 18:29:00 +01:00
51425bbfd9 Release 0.3.8 2019-06-24 07:38:39 +01:00
6a4879bc0b always save pieces 2019-06-24 07:29:49 +01:00
e7fe56439b persist X-Have, fix flickering bug 2019-06-24 07:20:53 +01:00
2886feab4a do not modify the set of available pieces 2019-06-23 17:08:07 +01:00
fb91194026 even noisier log 2019-06-23 16:39:38 +01:00
4527478b0d even noisier 4_ 2019-06-23 12:42:44 +01:00
b0062f146e log roots of download exceptions 2019-06-23 12:10:19 +01:00
bf16561170 Release 0.3.7 2019-06-23 11:25:19 +01:00
3b23dc29c4 if all sources are expired forget mesh 2019-06-23 11:21:39 +01:00
c0645b670e no split on list 2019-06-23 10:50:19 +01:00
30613fe530 update todo 2019-06-23 09:56:51 +01:00
e7822f6edc expire sources, fix compilation 2019-06-23 09:43:56 +01:00
7e5c9ba115 actually save 2019-06-23 09:41:20 +01:00
647fa3a481 persist download mesh 2019-06-23 09:38:42 +01:00
538eca9297 Release 0.3.6 2019-06-23 08:54:28 +01:00
e73a23d4a4 fix space not showing 2019-06-23 08:44:51 +01:00
76e41a0383 fix restoring paused downloads 2019-06-23 08:42:45 +01:00
7045927666 hide monitor options from gui 2019-06-23 08:02:28 +01:00
5fb3086b42 update faq 2019-06-23 07:52:01 +01:00
2de18227c1 persist pause state 2019-06-23 07:48:49 +01:00
bd12a1de3d pause/resume downloads 2019-06-23 06:59:52 +01:00
a3a91050c8 update todo 2019-06-23 01:50:30 +01:00
6c1cc28e49 shutdown if connection to I2P router is lost 2019-06-22 17:32:12 +01:00
b6e5b54f05 do not show monitor by default 2019-06-22 14:51:26 +01:00
a6e559ec67 change some defaults 2019-06-22 06:54:49 +01:00
f11badb824 update todo 2019-06-21 22:43:46 +01:00
44da44ff6f Release 0.3.5 2019-06-21 22:35:54 +01:00
aae3fc29ca add logging.properties with various degree of noisiness 2019-06-21 22:28:57 +01:00
c30aa19d8b Merge branch 'download-mesh' 2019-06-21 22:26:17 +01:00
c79e8712d0 correctly determine if uploader has requested piece 2019-06-21 20:36:33 +01:00
ed12d78a48 clear pieces on cancel 2019-06-21 17:22:55 +01:00
d27872cc8b investigate StringIndexOutOfBounds 2019-06-21 16:29:52 +01:00
f794c39760 personas not destinations 2019-06-21 16:15:35 +01:00
2be9c425f7 compute which pieces are requested 2019-06-21 16:09:57 +01:00
ab5fea9216 416 if piece not downloaded 2019-06-21 16:03:20 +01:00
d1c8328080 do not send alts if there aren't any 2019-06-21 15:39:00 +01:00
89e761f53b write personas on the wire part1 2019-06-21 15:26:18 +01:00
40410eba63 fix constructor 2019-06-21 14:57:53 +01:00
85466a8e80 fix npe 2019-06-21 14:45:14 +01:00
c210af7870 source partial uploads from incompletes file 2019-06-21 14:39:20 +01:00
38ff49d28f downloaders get pieces from mesh manager 2019-06-21 14:17:10 +01:00
710f9f52a8 send X-Have and X-Alts from uploader 2019-06-21 13:58:21 +01:00
1b6eda5a40 skeleton of mesh manager 2019-06-21 13:34:00 +01:00
1ee9ccf098 parse X-Have on uploader side 2019-06-21 12:55:25 +01:00
0f07562de3 pass new sources to active downloaders 2019-06-21 12:39:16 +01:00
6eb1aa07f5 key downloaders by infohash 2019-06-21 12:29:32 +01:00
05b02834af parse X-Alt 2019-06-21 12:25:04 +01:00
56125f6df8 refactor X-Have decoding logic 2019-06-21 09:32:10 +01:00
8f9996848b send X-Have from downloader too 2019-06-21 09:25:28 +01:00
dd655ed60f test for re-requesting available pieces 2019-06-21 09:12:42 +01:00
8923c6ff7d exclude local results by default 2019-06-21 08:15:20 +01:00
807ab22f8e test parsing of X-Have 2019-06-21 06:43:48 +01:00
a26ad229ee more tests 2019-06-21 05:56:42 +01:00
5504dd2251 tighten conditions 2019-06-21 05:45:11 +01:00
f9777d29f4 get existing tests to pass 2019-06-21 05:41:49 +01:00
b23226e8c6 wip on parsing X-Have from uploader 2019-06-21 05:30:56 +01:00
1249ad29e0 claim pieces from list of available pieces 2019-06-21 04:42:02 +01:00
7bb5e5b632 Release 0.3.4 2019-06-20 21:07:50 +01:00
b2e43f9765 update split pattern and add unit test 2019-06-20 21:06:39 +01:00
2aa73c203a Release 0.3.3 2019-06-20 18:08:02 +01:00
18d2b56563 fix indexing 2019-06-20 17:57:36 +01:00
a455b4ad6e redirect exceptions in result sender to log 2019-06-20 17:22:59 +01:00
761b683a81 Release 0.3.2 2019-06-20 16:04:46 +01:00
1d41bcd825 prevent empty tokens in search index 2019-06-20 16:02:48 +01:00
f1ac038b55 update split pattern 2019-06-20 15:47:00 +01:00
396c636e42 prevent empty search terms 2019-06-20 15:29:27 +01:00
e32c858e90 update README with quick FAQ 2019-06-20 14:18:37 +01:00
821555f3f1 Release 0.3.1 2019-06-20 14:02:22 +01:00
089ab4f0d9 do not retry downloads if core is shut(ting) down 2019-06-20 13:40:04 +01:00
948b6292fe add shutdown hook to shutdown core on SIGTERM 2019-06-20 13:29:15 +01:00
278 changed files with 34430 additions and 4969 deletions

View File

@ -4,14 +4,14 @@ MuWire is an easy to use file-sharing program which offers anonymity using [I2P
It is inspired by the LimeWire Gnutella client and developped by a former LimeWire developer.
The current stable release - 0.2.5 is avaiable for download at http://muwire.com. You can find technical documentation in the "doc" folder.
The current stable release - 0.4.14 is avaiable for download at https://muwire.com. You can find technical documentation in the "doc" folder.
### Building
You need JRE 8 or newer. After installing that and setting up the appropriate paths, just type
You need JDK 8 or newer. After installing that and setting up the appropriate paths, just type
```
./gradlew clean assemble
./gradlew clean assemble
```
If you want to run the unit tests, type
@ -19,15 +19,23 @@ If you want to run the unit tests, type
./gradlew clean build
```
Some of the UI tests will fail because they haven't been written yet :-/
If you want to build binary bundles that do not depend on Java or I2P, see the https://github.com/zlatinb/muwire-pkg project
### Running
You need to have an I2P router up and running on the same machine. After you build the application, look inside "gui/build/distributions". Untar/unzip one of the "shadow" files and then run the jar contained inside by typing "java -jar MuWire-x.y.z.jar" in a terminal or command prompt. If you use a custom I2CP host and port, create a file $HOME/.MuWire/i2p.properties and put "i2cp.tcp.host=<host>" and "i2cp.tcp.port=<port>" in there.
After you build the application, look inside `gui/build/distributions`. Untar/unzip one of the `shadow` files and then run the jar contained inside by typing `java -jar gui-x.y.z.jar` in a terminal or command prompt.
The first time you run MuWire it will ask you to select a nickname. This nickname will be displayed with search results, so that others can verify the file was shared by you. It is best to leave MuWire running all the time, just like I2P.
If you have an I2P router running on the same machine that is all you need to do. If you use a custom I2CP host and port, create a file `i2p.properties` and put `i2cp.tcp.host=<host>` and `i2cp.tcp.port=<port>` in there. On Windows that file should go into `%HOME%\AppData\Roaming\MuWire`, on Mac into `$HOME/Library/Application Support/MuWire` and on Linux `$HOME/.MuWire`
[Default I2CP port]\: `7654`
### GPG Fingerprint
```
471B 9FD4 5517 A5ED 101F C57D A728 3207 2D52 5E41
```
You can find the full key at https://keybase.io/zlatinb
### Known bugs and limitations
* Many UI features you would expect are not there yet
[Default I2CP port]: https://geti2p.net/en/docs/ports

21
TODO.md
View File

@ -4,10 +4,6 @@ Not in any particular order yet
### Big Items
##### Alternate Locations
This helps peers discover new sources for a file while the download is in progress. Also makes sharing of partial files possible.
##### Bloom Filters
This reduces query traffic by not sending last hop queries to peers that definitely do not have the file
@ -16,18 +12,6 @@ This reduces query traffic by not sending last hop queries to peers that definit
This helps with scalability
##### Trust List Sharing
For helping users make better decisions whom to trust
##### Content Control Panel
To allow every user to not route queries for content they do not like. This is mostly GUI work, the backend part is simple
##### Packaging With JRE, Embedded Router
For ease of deployment for new users, and so that users do not need to run a separate I2P router
##### Web UI, REST Interface, etc.
Basically any non-gui non-cli user interface
@ -38,8 +22,5 @@ To enable parsing of metadata from known file types and the user editing it or a
### Small Items
* Detect if router is dead and show warning or exit
* Wrapper of some kind for in-place upgrades
* Download file sequentially
* Unsharing of files
* Multiple-selection download, Ctrl-A
* Automatic adjustment of number of I2P tunnels

View File

@ -2,7 +2,7 @@ subprojects {
apply plugin: 'groovy'
dependencies {
compile 'net.i2p:i2p:0.9.40'
compile 'net.i2p:i2p:0.9.42'
compile 'org.codehaus.groovy:groovy-all:2.4.15'
}

View File

@ -16,66 +16,66 @@ import com.muwire.core.upload.UploadEvent
import com.muwire.core.upload.UploadFinishedEvent
class Cli {
public static void main(String[] args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
Core core
Core core
try {
core = new Core(props, home, "0.3.0")
core = new Core(props, home, "0.4.15")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def filesList
if (args.length == 0) {
println "Enter a file containing list of files to share"
def reader = new BufferedReader(new InputStreamReader(System.in))
filesList = reader.readLine()
} else
} else
filesList = args[0]
Thread.sleep(1000)
println "loading shared files from $filesList"
// listener for shared files
def sharedListener = new SharedListener()
core.eventBus.register(FileHashedEvent.class, sharedListener)
core.eventBus.register(FileLoadedEvent.class, sharedListener)
// for connections
def connectionsListener = new ConnectionListener()
core.eventBus.register(ConnectionEvent.class, connectionsListener)
core.eventBus.register(DisconnectionEvent.class, connectionsListener)
// for uploads
def uploadsListener = new UploadsListener()
core.eventBus.register(UploadEvent.class, uploadsListener)
core.eventBus.register(UploadFinishedEvent.class, uploadsListener)
Timer timer = new Timer("status-printer", true)
timer.schedule({
println String.valueOf(new Date()) + " Connections $connectionsListener.connections Uploads $uploadsListener.uploads Shared $sharedListener.shared"
} as TimerTask, 60000, 60000)
def latch = new CountDownLatch(1)
def fileLoader = new Object() {
public void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
@ -85,14 +85,14 @@ class Cli {
core.eventBus.register(AllFilesLoadedEvent.class, fileLoader)
core.startServices()
core.eventBus.publish(new UILoadedEvent())
core.eventBus.publish(new UILoadedEvent())
println "waiting for files to load"
latch.await()
// now we begin
println "MuWire is ready"
filesList = new File(filesList)
filesList.withReader {
filesList.withReader {
def toShare = it.readLine()
core.eventBus.publish(new FileSharedEvent(file : new File(toShare)))
}
@ -103,7 +103,7 @@ class Cli {
})
Thread.sleep(Integer.MAX_VALUE)
}
static class ConnectionListener {
volatile int connections
public void onConnectionEvent(ConnectionEvent e) {
@ -114,7 +114,7 @@ class Cli {
connections--
}
}
static class UploadsListener {
volatile int uploads
public void onUploadEvent(UploadEvent e) {
@ -126,7 +126,7 @@ class Cli {
println String.valueOf(new Date()) + " Finished upload of ${e.uploader.file.getName()} to ${e.uploader.request.downloader.getHumanReadableName()}"
}
}
static class SharedListener {
volatile int shared
void onFileHashedEvent(FileHashedEvent e) {

View File

@ -17,31 +17,31 @@ import com.muwire.core.search.UIResultEvent
import net.i2p.data.Base64
class CliDownloader {
private static final List<Downloader> downloaders = Collections.synchronizedList(new ArrayList<>())
private static final Map<UUID,ResultsHolder> resultsListeners = new ConcurrentHashMap<>()
public static void main(String []args) {
def home = System.getProperty("user.home") + File.separator + ".MuWire"
home = new File(home)
if (!home.exists())
home.mkdirs()
def propsFile = new File(home,"MuWire.properties")
if (!propsFile.exists()) {
println "create props file ${propsFile.getAbsoluteFile()} before launching MuWire"
System.exit(1)
}
def props = new Properties()
propsFile.withInputStream { props.load(it) }
props = new MuWireSettings(props)
def filesList
int connections
int resultWait
if (args.length != 3) {
println "Enter a file containing list of hashes of files to download, " +
println "Enter a file containing list of hashes of files to download, " +
"how many connections you want before searching" +
"and how long to wait for results to arrive"
System.exit(1)
@ -53,24 +53,24 @@ class CliDownloader {
Core core
try {
core = new Core(props, home, "0.3.0")
core = new Core(props, home, "0.4.15")
} catch (Exception bad) {
bad.printStackTrace(System.out)
println "Failed to initialize core, exiting"
System.exit(1)
}
def latch = new CountDownLatch(connections)
def connectionListener = new ConnectionWaiter(latch : latch)
core.eventBus.register(ConnectionEvent.class, connectionListener)
core.startServices()
println "starting to wait until there are $connections connections"
latch.await()
println "connected, searching for files"
def file = new File(filesList)
file.eachLine {
String[] split = it.split(",")
@ -79,22 +79,22 @@ class CliDownloader {
def hash = Base64.decode(split[0])
def searchEvent = new SearchEvent(searchHash : hash, uuid : uuid)
core.eventBus.publish(new QueryEvent(searchEvent : searchEvent, firstHop:true,
replyTo: core.me.destination, receivedOn : core.me.destination, originator: core.me))
replyTo: core.me.destination, receivedOn : core.me.destination, originator: core.me))
}
println "waiting for results to arrive"
Thread.sleep(resultWait * 1000)
core.eventBus.register(DownloadStartedEvent.class, new DownloadListener())
resultsListeners.each { uuid, resultsListener ->
println "starting download of $resultsListener.fileName from ${resultsListener.getResults().size()} hosts"
File target = new File(resultsListener.fileName)
core.eventBus.publish(new UIDownloadEvent(target : target, result : resultsListener.getResults()))
}
Thread.sleep(1000)
Timer timer = new Timer("stats-printer")
timer.schedule({
println "==== STATUS UPDATE ==="
@ -109,7 +109,7 @@ class CliDownloader {
}
println "==== END ==="
} as TimerTask, 60000, 60000)
println "waiting for downloads to finish"
while(true) {
boolean allFinished = true
@ -120,10 +120,10 @@ class CliDownloader {
break
Thread.sleep(1000)
}
println "all downloads finished"
}
static class ResultsHolder {
final List<UIResultEvent> results = Collections.synchronizedList(new ArrayList<>())
String fileName
@ -134,7 +134,7 @@ class CliDownloader {
results
}
}
static class ResultsListener {
UUID uuid
String fileName
@ -148,7 +148,7 @@ class CliDownloader {
listener.add(e)
}
}
static class ConnectionWaiter {
CountDownLatch latch
public void onConnectionEvent(ConnectionEvent e) {
@ -156,7 +156,7 @@ class CliDownloader {
latch.countDown()
}
}
static class DownloadListener {
public void onDownloadStartedEvent(DownloadStartedEvent e) {

View File

@ -11,10 +11,10 @@ class FileList {
println "pass files.json as argument"
System.exit(1)
}
def slurper = new JsonSlurper()
File filesJson = new File(args[0])
filesJson.eachLine {
filesJson.eachLine {
def json = slurper.parseText(it)
String name = DataUtil.readi18nString(Base64.decode(json.file))
println "$name,$json.length,$json.pieceSize,$json.infoHash"

View File

@ -2,8 +2,9 @@ apply plugin : 'application'
mainClassName = 'com.muwire.core.Core'
applicationDefaultJvmArgs = ['-Djava.util.logging.config.file=logging.properties']
dependencies {
compile 'net.i2p.client:mstreaming:0.9.40'
compile 'net.i2p.client:streaming:0.9.40'
compile 'net.i2p:router:0.9.42'
compile 'net.i2p.client:mstreaming:0.9.42'
compile 'net.i2p.client:streaming:0.9.42'
testCompile 'org.junit.jupiter:junit-jupiter-api:5.4.2'
testCompile 'junit:junit:4.12'

View File

@ -1,15 +0,0 @@
package com.muwire.core
import net.i2p.crypto.SigType
class Constants {
public static final byte PERSONA_VERSION = (byte)1
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519
public static final int MAX_HEADER_SIZE = 0x1 << 14
public static final int MAX_HEADERS = 16
public static final float DOWNLOAD_SEQUENTIAL_RATIO = 0.8f
public static final String SPLIT_PATTERN = "[\\.,_-]"
}

View File

@ -1,6 +1,7 @@
package com.muwire.core
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean
import com.muwire.core.connection.ConnectionAcceptor
import com.muwire.core.connection.ConnectionEstablisher
@ -12,10 +13,14 @@ import com.muwire.core.connection.I2PConnector
import com.muwire.core.connection.LeafConnectionManager
import com.muwire.core.connection.UltrapeerConnectionManager
import com.muwire.core.download.DownloadManager
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.download.UIDownloadCancelledEvent
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.download.UIDownloadPausedEvent
import com.muwire.core.download.UIDownloadResumedEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHashedEvent
import com.muwire.core.files.FileHashingEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.files.FileLoadedEvent
import com.muwire.core.files.FileManager
@ -23,20 +28,28 @@ import com.muwire.core.files.FileSharedEvent
import com.muwire.core.files.FileUnsharedEvent
import com.muwire.core.files.HasherService
import com.muwire.core.files.PersisterService
import com.muwire.core.files.AllFilesLoadedEvent
import com.muwire.core.files.DirectoryUnsharedEvent
import com.muwire.core.files.DirectoryWatcher
import com.muwire.core.hostcache.CacheClient
import com.muwire.core.hostcache.HostCache
import com.muwire.core.hostcache.HostDiscoveredEvent
import com.muwire.core.mesh.MeshManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.ResultsEvent
import com.muwire.core.search.ResultsSender
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.SearchManager
import com.muwire.core.search.UIResultBatchEvent
import com.muwire.core.trust.TrustEvent
import com.muwire.core.trust.TrustService
import com.muwire.core.trust.TrustSubscriber
import com.muwire.core.trust.TrustSubscriptionEvent
import com.muwire.core.update.UpdateClient
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.MuWireLogManager
import com.muwire.core.content.ContentControlEvent
import com.muwire.core.content.ContentManager
import groovy.util.logging.Log
import net.i2p.I2PAppContext
@ -45,6 +58,7 @@ import net.i2p.client.I2PSession
import net.i2p.client.streaming.I2PSocketManager
import net.i2p.client.streaming.I2PSocketManagerFactory
import net.i2p.client.streaming.I2PSocketOptions
import net.i2p.client.streaming.I2PSocketManager.DisconnectListener
import net.i2p.crypto.DSAEngine
import net.i2p.crypto.SigType
import net.i2p.data.Destination
@ -52,16 +66,20 @@ import net.i2p.data.PrivateKey
import net.i2p.data.Signature
import net.i2p.data.SigningPrivateKey
import net.i2p.router.Router
import net.i2p.router.RouterContext
@Log
public class Core {
final EventBus eventBus
final Persona me
final File home
final Properties i2pOptions
final MuWireSettings muOptions
private final TrustService trustService
private final TrustSubscriber trustSubscriber
private final PersisterService persisterService
private final HostCache hostCache
private final ConnectionManager connectionManager
@ -73,24 +91,17 @@ public class Core {
private final DownloadManager downloadManager
private final DirectoryWatcher directoryWatcher
final FileManager fileManager
final UploadManager uploadManager
final ContentManager contentManager
private final Router router
final AtomicBoolean shutdown = new AtomicBoolean()
public Core(MuWireSettings props, File home, String myVersion) {
this.home = home
this.home = home
this.muOptions = props
log.info "Initializing I2P context"
I2PAppContext.getGlobalContext().logManager()
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
i2pOptions = new Properties()
def i2pOptionsFile = new File(home,"i2p.properties")
if (i2pOptionsFile.exists()) {
@ -98,8 +109,8 @@ public class Core {
if (!i2pOptions.containsKey("inbound.nickname"))
i2pOptions["inbound.nickname"] = "MuWire"
if (!i2pOptions.containsKey("outbound.nickname"))
i2pOptions["outbound.nickname"] = "MuWire"
if (!i2pOptions.containsKey("outbound.nickname"))
i2pOptions["outbound.nickname"] = "MuWire"
} else {
i2pOptions["inbound.nickname"] = "MuWire"
i2pOptions["outbound.nickname"] = "MuWire"
@ -109,18 +120,59 @@ public class Core {
i2pOptions["outbound.quantity"] = "4"
i2pOptions["i2cp.tcp.host"] = "127.0.0.1"
i2pOptions["i2cp.tcp.port"] = "7654"
Random r = new Random()
int port = r.nextInt(60000) + 4000
i2pOptions["i2np.ntcp.port"] = String.valueOf(port)
i2pOptions["i2np.udp.port"] = String.valueOf(port)
i2pOptionsFile.withOutputStream { i2pOptions.store(it, "") }
}
if (!props.embeddedRouter) {
log.info "Initializing I2P context"
I2PAppContext.getGlobalContext().logManager()
I2PAppContext.getGlobalContext()._logManager = new MuWireLogManager()
router = null
} else {
log.info("launching embedded router")
Properties routerProps = new Properties()
routerProps.setProperty("i2p.dir.base", home.getAbsolutePath())
routerProps.setProperty("i2p.dir.config", home.getAbsolutePath())
routerProps.setProperty("router.excludePeerCaps", "KLM")
routerProps.setProperty("i2np.inboundKBytesPerSecond", String.valueOf(props.inBw))
routerProps.setProperty("i2np.outboundKBytesPerSecond", String.valueOf(props.outBw))
routerProps.setProperty("i2cp.disableInterface", "true")
routerProps.setProperty("i2np.ntcp.port", i2pOptions["i2np.ntcp.port"])
routerProps.setProperty("i2np.udp.port", i2pOptions["i2np.udp.port"])
routerProps.setProperty("i2np.udp.internalPort", i2pOptions["i2np.udp.port"])
router = new Router(routerProps)
router.getContext().setLogManager(new MuWireLogManager())
router.runRouter()
while(!router.isRunning())
Thread.sleep(100)
}
log.info("initializing I2P socket manager")
def i2pClient = new I2PClientFactory().createClient()
File keyDat = new File(home, "key.dat")
if (!keyDat.exists()) {
log.info("Creating new key.dat")
keyDat.withOutputStream {
i2pClient.createDestination(it, Constants.SIG_TYPE)
}
}
// options like tunnel length and quantity
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
i2pSession = socketManager.getSession()
I2PSession i2pSession
I2PSocketManager socketManager
keyDat.withInputStream {
socketManager = new I2PSocketManagerFactory().createManager(it, i2pOptions["i2cp.tcp.host"], i2pOptions["i2cp.tcp.port"].toInteger(), i2pOptions)
}
socketManager.getDefaultOptions().setReadTimeout(60000)
socketManager.getDefaultOptions().setConnectTimeout(30000)
socketManager.addDisconnectListener({eventBus.publish(new RouterDisconnectedEvent())} as DisconnectListener)
i2pSession = socketManager.getSession()
def destination = new Destination()
def spk = new SigningPrivateKey(Constants.SIG_TYPE)
keyDat.withInputStream {
@ -128,8 +180,8 @@ public class Core {
def privateKey = new PrivateKey()
privateKey.readBytes(it)
spk.readBytes(it)
}
}
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.write(Constants.PERSONA_VERSION)
@ -146,89 +198,110 @@ public class Core {
me = new Persona(new ByteArrayInputStream(baos.toByteArray()))
log.info("Loaded myself as "+me.getHumanReadableName())
eventBus = new EventBus()
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager"
fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 15000, fileManager)
eventBus = new EventBus()
log.info("initializing trust service")
File goodTrust = new File(home, "trusted")
File badTrust = new File(home, "distrusted")
trustService = new TrustService(goodTrust, badTrust, 5000)
eventBus.register(TrustEvent.class, trustService)
log.info "initializing file manager"
fileManager = new FileManager(eventBus, props)
eventBus.register(FileHashedEvent.class, fileManager)
eventBus.register(FileLoadedEvent.class, fileManager)
eventBus.register(FileDownloadedEvent.class, fileManager)
eventBus.register(FileUnsharedEvent.class, fileManager)
eventBus.register(SearchEvent.class, fileManager)
eventBus.register(DirectoryUnsharedEvent.class, fileManager)
log.info("initializing mesh manager")
MeshManager meshManager = new MeshManager(fileManager, home, props)
eventBus.register(SourceDiscoveredEvent.class, meshManager)
log.info "initializing persistence service"
persisterService = new PersisterService(new File(home, "files.json"), eventBus, 60000, fileManager)
eventBus.register(UILoadedEvent.class, persisterService)
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
log.info("initializing host cache")
File hostStorage = new File(home, "hosts.json")
hostCache = new HostCache(trustService,hostStorage, 30000, props, i2pSession.getMyDestination())
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
eventBus.register(HostDiscoveredEvent.class, hostCache)
eventBus.register(ConnectionEvent.class, hostCache)
log.info("initializing connection manager")
connectionManager = props.isLeaf() ?
new LeafConnectionManager(eventBus, me, 3, hostCache, props) :
new UltrapeerConnectionManager(eventBus, me, 512, 512, hostCache, trustService, props)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(TrustEvent.class, connectionManager)
eventBus.register(ConnectionEvent.class, connectionManager)
eventBus.register(DisconnectionEvent.class, connectionManager)
eventBus.register(QueryEvent.class, connectionManager)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing cache client")
cacheClient = new CacheClient(eventBus,hostCache, connectionManager, i2pSession, props, 10000)
log.info("initializing update client")
updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
updateClient = new UpdateClient(eventBus, i2pSession, myVersion, props, fileManager, me)
eventBus.register(FileDownloadedEvent.class, updateClient)
eventBus.register(UIResultBatchEvent.class, updateClient)
log.info("initializing connector")
I2PConnector i2pConnector = new I2PConnector(socketManager)
log.info "initializing results sender"
ResultsSender resultsSender = new ResultsSender(eventBus, i2pConnector, me)
log.info "initializing search manager"
SearchManager searchManager = new SearchManager(eventBus, me, resultsSender)
eventBus.register(QueryEvent.class, searchManager)
eventBus.register(ResultsEvent.class, searchManager)
log.info("initializing download manager")
downloadManager = new DownloadManager(eventBus, i2pConnector, home, me)
downloadManager = new DownloadManager(eventBus, trustService, meshManager, props, i2pConnector, home, me)
eventBus.register(UIDownloadEvent.class, downloadManager)
eventBus.register(UILoadedEvent.class, downloadManager)
eventBus.register(FileDownloadedEvent.class, downloadManager)
eventBus.register(UIDownloadCancelledEvent.class, downloadManager)
eventBus.register(SourceDiscoveredEvent.class, downloadManager)
eventBus.register(UIDownloadPausedEvent.class, downloadManager)
eventBus.register(UIDownloadResumedEvent.class, downloadManager)
log.info("initializing upload manager")
UploadManager uploadManager = new UploadManager(eventBus, fileManager)
uploadManager = new UploadManager(eventBus, fileManager, meshManager, downloadManager)
log.info("initializing connection establisher")
connectionEstablisher = new ConnectionEstablisher(eventBus, i2pConnector, props, connectionManager, hostCache)
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
log.info("initializing acceptor")
I2PAcceptor i2pAcceptor = new I2PAcceptor(socketManager)
connectionAcceptor = new ConnectionAcceptor(eventBus, connectionManager, props,
i2pAcceptor, hostCache, trustService, searchManager, uploadManager, connectionEstablisher)
log.info("initializing directory watcher")
directoryWatcher = new DirectoryWatcher(eventBus, fileManager)
eventBus.register(FileSharedEvent.class, directoryWatcher)
eventBus.register(AllFilesLoadedEvent.class, directoryWatcher)
eventBus.register(DirectoryUnsharedEvent.class, directoryWatcher)
log.info("initializing hasher service")
hasherService = new HasherService(new FileHasher(), eventBus, fileManager)
eventBus.register(FileSharedEvent.class, hasherService)
}
log.info("initializing trust subscriber")
trustSubscriber = new TrustSubscriber(eventBus, i2pConnector, props)
eventBus.register(UILoadedEvent.class, trustSubscriber)
eventBus.register(TrustSubscriptionEvent.class, trustSubscriber)
log.info("initializing content manager")
contentManager = new ContentManager()
eventBus.register(ContentControlEvent.class, contentManager)
eventBus.register(QueryEvent.class, contentManager)
}
public void startServices() {
hasherService.start()
directoryWatcher.start()
trustService.start()
trustService.waitForLoad()
hostCache.start()
@ -239,8 +312,14 @@ public class Core {
hostCache.waitForLoad()
updateClient.start()
}
public void shutdown() {
if (!shutdown.compareAndSet(false, true)) {
log.info("already shutting down")
return
}
log.info("shutting down trust subscriber")
trustSubscriber.stop()
log.info("shutting down download manageer")
downloadManager.shutdown()
log.info("shutting down connection acceeptor")
@ -249,8 +328,14 @@ public class Core {
connectionEstablisher.stop()
log.info("shutting down directory watcher")
directoryWatcher.stop()
log.info("shutting down cache client")
cacheClient.stop()
log.info("shutting down connection manager")
connectionManager.shutdown()
if (router != null) {
log.info("shutting down embedded router")
router.shutdown(0)
}
}
static main(args) {
@ -260,7 +345,7 @@ public class Core {
log.info("creating home dir")
home.mkdir()
}
def props = new Properties()
def propsFile = new File(home, "MuWire.properties")
if (propsFile.exists()) {
@ -276,10 +361,10 @@ public class Core {
props.write(it)
}
}
Core core = new Core(props, home, "0.3.0")
Core core = new Core(props, home, "0.4.15")
core.startServices()
// ... at the end, sleep or execute script
if (args.length == 0) {
log.info("initialized everything, sleeping")

View File

@ -4,17 +4,17 @@ import java.util.concurrent.atomic.AtomicLong
class Event {
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
private static final AtomicLong SEQ_NO = new AtomicLong();
final long seqNo
final long timestamp
Event() {
seqNo = SEQ_NO.getAndIncrement()
timestamp = System.currentTimeMillis()
}
@Override
public String toString() {
"seqNo $seqNo timestamp $timestamp"
}
}

View File

@ -10,42 +10,47 @@ import com.muwire.core.files.FileSharedEvent
import groovy.util.logging.Log
@Log
class EventBus {
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
private Map handlers = new HashMap()
private final Executor executor = Executors.newSingleThreadExecutor {r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("event-bus")
rv
}
void publish(Event e) {
executor.execute({publishInternal(e)} as Runnable)
}
private void publishInternal(Event e) {
log.fine "publishing event $e of type ${e.getClass().getSimpleName()} event $e"
def currentHandlers
final def clazz = e.getClass()
synchronized(this) {
currentHandlers = handlers.getOrDefault(clazz, [])
}
currentHandlers.each {
try {
it."on${clazz.getSimpleName()}"(e)
} catch (Exception bad) {
log.log(Level.SEVERE, "exception dispatching event",bad)
}
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
}
}
synchronized void register(Class<? extends Event> eventType, def handler) {
log.info "Registering $handler for type $eventType"
def currentHandlers = handlers.get(eventType)
if (currentHandlers == null) {
currentHandlers = new CopyOnWriteArrayList()
handlers.put(eventType, currentHandlers)
}
currentHandlers.add handler
}
synchronized void unregister(Class<? extends Event> eventType, def handler) {
log.info("Unregistering $handler for type $eventType")
handlers[eventType]?.remove(handler)
}
}

View File

@ -13,5 +13,5 @@ class InvalidSignatureException extends Exception {
public InvalidSignatureException(Throwable cause) {
super(cause);
}
}

View File

@ -8,81 +8,148 @@ import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
class MuWireSettings {
final boolean isLeaf
boolean allowUntrusted
boolean searchExtraHop
boolean allowTrustLists
int trustListInterval
Set<Persona> trustSubscriptions
int downloadRetryInterval
int updateCheckInterval
boolean autoDownloadUpdate
String updateType
String nickname
File downloadLocation
CrawlerResponse crawlerResponse
boolean shareDownloadedFiles
Set<String> watchedDirectories
MuWireSettings() {
float downloadSequentialRatio
int hostClearInterval, hostHopelessInterval, hostRejectInterval
int meshExpiration
boolean embeddedRouter
int inBw, outBw
Set<String> watchedKeywords
Set<String> watchedRegexes
MuWireSettings() {
this(new Properties())
}
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.get("allowUntrusted","true"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
MuWireSettings(Properties props) {
isLeaf = Boolean.valueOf(props.get("leaf","false"))
allowUntrusted = Boolean.valueOf(props.getProperty("allowUntrusted","true"))
searchExtraHop = Boolean.valueOf(props.getProperty("searchExtraHop","false"))
allowTrustLists = Boolean.valueOf(props.getProperty("allowTrustLists","true"))
trustListInterval = Integer.valueOf(props.getProperty("trustListInterval","1"))
crawlerResponse = CrawlerResponse.valueOf(props.get("crawlerResponse","REGISTERED"))
nickname = props.getProperty("nickname","MuWireUser")
downloadLocation = new File((String)props.getProperty("downloadLocation",
downloadLocation = new File((String)props.getProperty("downloadLocation",
System.getProperty("user.home")))
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","5"))
updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","36"))
downloadRetryInterval = Integer.parseInt(props.getProperty("downloadRetryInterval","60"))
updateCheckInterval = Integer.parseInt(props.getProperty("updateCheckInterval","24"))
autoDownloadUpdate = Boolean.parseBoolean(props.getProperty("autoDownloadUpdate","true"))
updateType = props.getProperty("updateType","jar")
shareDownloadedFiles = Boolean.parseBoolean(props.getProperty("shareDownloadedFiles","true"))
watchedDirectories = new HashSet<>()
if (props.containsKey("watchedDirectories")) {
String[] encoded = props.getProperty("watchedDirectories").split(",")
encoded.each { watchedDirectories << DataUtil.readi18nString(Base64.decode(it)) }
downloadSequentialRatio = Float.valueOf(props.getProperty("downloadSequentialRatio","0.8"))
hostClearInterval = Integer.valueOf(props.getProperty("hostClearInterval","15"))
hostHopelessInterval = Integer.valueOf(props.getProperty("hostHopelessInterval", "1440"))
hostRejectInterval = Integer.valueOf(props.getProperty("hostRejectInterval", "1"))
meshExpiration = Integer.valueOf(props.getProperty("meshExpiration","60"))
embeddedRouter = Boolean.valueOf(props.getProperty("embeddedRouter","false"))
inBw = Integer.valueOf(props.getProperty("inBw","256"))
outBw = Integer.valueOf(props.getProperty("outBw","128"))
watchedDirectories = readEncodedSet(props, "watchedDirectories")
watchedKeywords = readEncodedSet(props, "watchedKeywords")
watchedRegexes = readEncodedSet(props, "watchedRegexes")
trustSubscriptions = new HashSet<>()
if (props.containsKey("trustSubscriptions")) {
props.getProperty("trustSubscriptions").split(",").each {
trustSubscriptions.add(new Persona(new ByteArrayInputStream(Base64.decode(it))))
}
}
}
}
void write(OutputStream out) throws IOException {
Properties props = new Properties()
props.setProperty("leaf", isLeaf.toString())
props.setProperty("allowUntrusted", allowUntrusted.toString())
props.setProperty("searchExtraHop", String.valueOf(searchExtraHop))
props.setProperty("allowTrustLists", String.valueOf(allowTrustLists))
props.setProperty("trustListInterval", String.valueOf(trustListInterval))
props.setProperty("crawlerResponse", crawlerResponse.toString())
props.setProperty("nickname", nickname)
props.setProperty("downloadLocation", downloadLocation.getAbsolutePath())
props.setProperty("downloadRetryInterval", String.valueOf(downloadRetryInterval))
props.setProperty("updateCheckInterval", String.valueOf(updateCheckInterval))
props.setProperty("autoDownloadUpdate", String.valueOf(autoDownloadUpdate))
props.setProperty("updateType",String.valueOf(updateType))
props.setProperty("shareDownloadedFiles", String.valueOf(shareDownloadedFiles))
if (!watchedDirectories.isEmpty()) {
String encoded = watchedDirectories.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
props.setProperty("downloadSequentialRatio", String.valueOf(downloadSequentialRatio))
props.setProperty("hostClearInterval", String.valueOf(hostClearInterval))
props.setProperty("hostHopelessInterval", String.valueOf(hostHopelessInterval))
props.setProperty("hostRejectInterval", String.valueOf(hostRejectInterval))
props.setProperty("meshExpiration", String.valueOf(meshExpiration))
props.setProperty("embeddedRouter", String.valueOf(embeddedRouter))
props.setProperty("inBw", String.valueOf(inBw))
props.setProperty("outBw", String.valueOf(outBw))
writeEncodedSet(watchedDirectories, "watchedDirectories", props)
writeEncodedSet(watchedKeywords, "watchedKeywords", props)
writeEncodedSet(watchedRegexes, "watchedRegexes", props)
if (!trustSubscriptions.isEmpty()) {
String encoded = trustSubscriptions.stream().
map({it.toBase64()}).
collect(Collectors.joining(","))
props.setProperty("watchedDirectories", encoded)
props.setProperty("trustSubscriptions", encoded)
}
props.store(out, "")
}
boolean isLeaf() {
isLeaf
}
boolean allowUntrusted() {
allowUntrusted
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
private static Set<String> readEncodedSet(Properties props, String property) {
Set<String> rv = new HashSet<>()
if (props.containsKey(property)) {
String[] encoded = props.getProperty(property).split(",")
encoded.each { rv << DataUtil.readi18nString(Base64.decode(it)) }
}
rv
}
private static void writeEncodedSet(Set<String> set, String property, Properties props) {
if (set.isEmpty())
return
String encoded = set.stream().
map({Base64.encode(DataUtil.encodei18nString(it))}).
collect(Collectors.joining(","))
props.setProperty(property, encoded)
}
boolean isLeaf() {
isLeaf
}
boolean allowUntrusted() {
allowUntrusted
}
void setAllowUntrusted(boolean allowUntrusted) {
this.allowUntrusted = allowUntrusted
}
CrawlerResponse getCrawlerResponse() {
crawlerResponse
}
void setCrawlerResponse(CrawlerResponse crawlerResponse) {
this.crawlerResponse = crawlerResponse
}
String getNickname() {
nickname
}

View File

@ -7,11 +7,11 @@ import java.nio.charset.StandardCharsets
*/
public class Name {
final String name
Name(String name) {
this.name = name
}
Name(InputStream nameStream) throws IOException {
DataInputStream dis = new DataInputStream(nameStream)
int length = dis.readUnsignedShort()
@ -19,22 +19,22 @@ public class Name {
dis.readFully(nameBytes)
this.name = new String(nameBytes, StandardCharsets.UTF_8)
}
public void write(OutputStream out) throws IOException {
DataOutputStream dos = new DataOutputStream(out)
dos.writeShort(name.length())
dos.write(name.getBytes(StandardCharsets.UTF_8))
}
public getName() {
name
}
@Override
public int hashCode() {
name.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Name))

View File

@ -9,7 +9,7 @@ import net.i2p.data.SigningPublicKey
public class Persona {
private static final int SIG_LEN = Constants.SIG_TYPE.getSigLen()
private final byte version
private final Name name
private final Destination destination
@ -17,12 +17,12 @@ public class Persona {
private volatile String humanReadableName
private volatile String base64
private volatile byte[] payload
public Persona(InputStream personaStream) throws IOException, InvalidSignatureException {
version = (byte) (personaStream.read() & 0xFF)
if (version != Constants.PERSONA_VERSION)
throw new IOException("Unknown version "+version)
name = new Name(personaStream)
destination = Destination.create(personaStream)
sig = new byte[SIG_LEN]
@ -31,7 +31,7 @@ public class Persona {
if (!verify(version, name, destination, sig))
throw new InvalidSignatureException(getHumanReadableName() + " didn't verify")
}
private static boolean verify(byte version, Name name, Destination destination, byte [] sig) {
ByteArrayOutputStream baos = new ByteArrayOutputStream()
baos.write(version)
@ -42,7 +42,7 @@ public class Persona {
Signature signature = new Signature(Constants.SIG_TYPE, sig)
DSAEngine.getInstance().verifySignature(signature, payload, spk)
}
public void write(OutputStream out) throws IOException {
if (payload == null) {
ByteArrayOutputStream baos = new ByteArrayOutputStream()
@ -54,13 +54,13 @@ public class Persona {
}
out.write(payload)
}
public String getHumanReadableName() {
if (humanReadableName == null)
if (humanReadableName == null)
humanReadableName = name.getName() + "@" + destination.toBase32().substring(0,32)
humanReadableName
}
public String toBase64() {
if (base64 == null) {
def baos = new ByteArrayOutputStream()
@ -69,12 +69,12 @@ public class Persona {
}
base64
}
@Override
public int hashCode() {
name.hashCode() ^ destination.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Persona))
@ -82,4 +82,13 @@ public class Persona {
Persona other = (Persona)o
name.equals(other.name) && destination.equals(other.destination)
}
public static void main(String []args) {
if (args.length != 1) {
println "This utility decodes a bas64-encoded persona"
System.exit(1)
}
Persona p = new Persona(new ByteArrayInputStream(Base64.decode(args[0])))
println p.getHumanReadableName()
}
}

View File

@ -0,0 +1,4 @@
package com.muwire.core
class RouterDisconnectedEvent extends Event {
}

View File

@ -2,12 +2,12 @@ package com.muwire.core
abstract class Service {
volatile boolean loaded
abstract void load()
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
volatile boolean loaded
abstract void load()
void waitForLoad() {
while (!loaded)
Thread.sleep(10)
}
}

View File

@ -0,0 +1,7 @@
package com.muwire.core
class SplitPattern {
public static final String SPLIT_PATTERN = "[\\*\\+\\-,\\.:;\\(\\)=_/\\\\\\!\\\"\\\'\\\$%\\|\\[\\]\\{\\}\\?]";
}

View File

@ -22,104 +22,108 @@ import net.i2p.data.Destination
@Log
abstract class Connection implements Closeable {
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
private static final int SEARCHES = 10
private static final long INTERVAL = 1000
final EventBus eventBus
final Endpoint endpoint
final boolean incoming
final HostCache hostCache
final TrustService trustService
final MuWireSettings settings
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
protected final String name
long lastPingSentTime, lastPongReceivedTime
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
private final AtomicBoolean running = new AtomicBoolean()
private final BlockingQueue messages = new LinkedBlockingQueue()
private final Thread reader, writer
private final LinkedList<Long> searchTimestamps = new LinkedList<>()
protected final String name
long lastPingSentTime, lastPongReceivedTime
Connection(EventBus eventBus, Endpoint endpoint, boolean incoming,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.eventBus = eventBus
this.incoming = incoming
this.endpoint = endpoint
this.hostCache = hostCache
this.trustService = trustService
this.settings = settings
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
this.name = endpoint.destination.toBase32().substring(0,8)
this.reader = new Thread({readLoop()} as Runnable)
this.reader.setName("reader-$name")
this.reader.setDaemon(true)
this.writer = new Thread({writeLoop()} as Runnable)
this.writer.setName("writer-$name")
this.writer.setDaemon(true)
}
/**
* starts the connection threads
*/
void start() {
if (!running.compareAndSet(false, true)) {
log.log(Level.WARNING,"$name already running", new Exception())
return
}
reader.start()
writer.start()
}
@Override
public void close() {
if (!running.compareAndSet(true, false)) {
log.log(Level.WARNING, "$name already closed", new Exception() )
return
}
log.info("closing $name")
reader.interrupt()
writer.interrupt()
endpoint.close()
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
reader.interrupt()
writer.interrupt()
endpoint.close()
eventBus.publish(new DisconnectionEvent(destination: endpoint.destination))
}
protected void readLoop() {
try {
while(running.get()) {
read()
}
} catch (SocketTimeoutException e) {
} catch (Exception e) {
log.log(Level.WARNING,"unhandled exception in reader",e)
} finally {
close()
}
}
protected abstract void read()
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
}
protected abstract void read()
protected void writeLoop() {
try {
while(running.get()) {
def message = messages.take()
write(message)
}
} catch (Exception e) {
log.log(Level.WARNING, "unhandled exception in writer",e)
} finally {
close()
}
}
protected abstract void write(def message);
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
}
protected abstract void write(def message);
void sendPing() {
def ping = [:]
ping.type = "Ping"
ping.version = 1
messages.put(ping)
lastPingSentTime = System.currentTimeMillis()
}
void sendQuery(QueryEvent e) {
def query = [:]
query.type = "Search"
@ -135,35 +139,53 @@ abstract class Connection implements Closeable {
query.originator = e.originator.toBase64()
messages.put(query)
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
protected void handlePing() {
log.fine("$name received ping")
def pong = [:]
pong.type = "Pong"
pong.version = 1
pong.pongs = hostCache.getGoodHosts(10).collect { d -> d.toBase64() }
messages.put(pong)
}
protected void handlePong(def pong) {
log.fine("$name received pong")
lastPongReceivedTime = System.currentTimeMillis()
if (pong.pongs == null)
throw new Exception("Pong doesn't have pongs")
pong.pongs.each {
def dest = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: dest))
}
}
private boolean throttleSearch() {
final long now = System.currentTimeMillis()
if (searchTimestamps.size() < SEARCHES) {
searchTimestamps.addLast(now)
return false
}
Long oldest = searchTimestamps.getFirst()
if (now - oldest.longValue() < INTERVAL)
return true
searchTimestamps.addLast(now)
searchTimestamps.removeFirst()
false
}
protected void handleSearch(def search) {
if (throttleSearch()) {
log.info("dropping excessive search")
return
}
UUID uuid = UUID.fromString(search.uuid)
byte [] infohash = null
if (search.infohash != null) {
search.keywords = null
infohash = Base64.decode(search.infohash)
}
Destination replyTo = new Destination(search.replyTo)
TrustLevel trustLevel = trustService.getLevel(replyTo)
if (trustLevel == TrustLevel.DISTRUSTED) {
@ -174,7 +196,7 @@ abstract class Connection implements Closeable {
log.info("dropping search from neutral peer")
return
}
Persona originator = null
if (search.originator != null) {
originator = new Persona(new ByteArrayInputStream(Base64.decode(search.originator)))
@ -183,11 +205,11 @@ abstract class Connection implements Closeable {
return
}
}
boolean oob = false
if (search.oobInfohash != null)
oob = search.oobInfohash
SearchEvent searchEvent = new SearchEvent(searchTerms : search.keywords,
searchHash : infohash,
uuid : uuid,
@ -198,6 +220,6 @@ abstract class Connection implements Closeable {
receivedOn : endpoint.destination,
firstHop : search.firstHop )
eventBus.publish(event)
}
}

View File

@ -14,6 +14,7 @@ import com.muwire.core.hostcache.HostCache
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.upload.UploadManager
import com.muwire.core.util.DataUtil
import com.muwire.core.search.InvalidSearchResultException
import com.muwire.core.search.ResultsParser
import com.muwire.core.search.SearchManager
@ -28,173 +29,176 @@ import groovy.util.logging.Log
@Log
class ConnectionAcceptor {
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final SearchManager searchManager
final EventBus eventBus
final UltrapeerConnectionManager manager
final MuWireSettings settings
final I2PAcceptor acceptor
final HostCache hostCache
final TrustService trustService
final SearchManager searchManager
final UploadManager uploadManager
final ConnectionEstablisher establisher
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
final ExecutorService acceptorThread
final ExecutorService handshakerThreads
private volatile shutdown
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
ConnectionAcceptor(EventBus eventBus, UltrapeerConnectionManager manager,
MuWireSettings settings, I2PAcceptor acceptor, HostCache hostCache,
TrustService trustService, SearchManager searchManager, UploadManager uploadManager,
ConnectionEstablisher establisher) {
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
this.eventBus = eventBus
this.manager = manager
this.settings = settings
this.acceptor = acceptor
this.hostCache = hostCache
this.trustService = trustService
this.searchManager = searchManager
this.uploadManager = uploadManager
this.establisher = establisher
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
this.establisher = establisher
acceptorThread = Executors.newSingleThreadExecutor { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor")
rv
}
handshakerThreads = Executors.newCachedThreadPool { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("acceptor-processor-${System.currentTimeMillis()}")
rv
}
}
void start() {
acceptorThread.execute({acceptLoop()} as Runnable)
}
void stop() {
shutdown = true
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
private void acceptLoop() {
acceptorThread.shutdownNow()
handshakerThreads.shutdownNow()
}
private void acceptLoop() {
try {
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
while(true) {
def incoming = acceptor.accept()
log.info("accepted connection from ${incoming.destination.toBase32()}")
switch(trustService.getLevel(incoming.destination)) {
case TrustLevel.TRUSTED : break
case TrustLevel.NEUTRAL :
if (settings.allowUntrusted())
break
case TrustLevel.DISTRUSTED :
log.info("Disallowing distrusted connection")
incoming.close()
continue
}
handshakerThreads.execute({processIncoming(incoming)} as Runnable)
}
} catch (Exception e) {
log.log(Level.WARNING, "exception in accept loop",e)
if (!shutdown)
throw e
}
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
}
private void processIncoming(Endpoint e) {
InputStream is = e.inputStream
try {
int read = is.read()
switch(read) {
case (byte)'M':
if (settings.isLeaf())
throw new IOException("Incoming connection as leaf")
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
processMuWire(e)
break
case (byte)'G':
processGET(e)
break
case (byte)'H':
processHashList(e)
break
case (byte)'P':
processPOST(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
case (byte)'T':
processTRUST(e)
break
default:
throw new Exception("Invalid read $read")
}
} catch (Exception ex) {
log.log(Level.WARNING, "incoming connection failed",ex)
e.close()
eventBus.publish new ConnectionEvent(endpoint: e, incoming: true, leaf: null, status: ConnectionAttemptStatus.FAILED)
}
}
private void processMuWire(Endpoint e) {
byte[] uWire = "uWire ".bytes
for (int i = 0; i < uWire.length; i++) {
int read = e.inputStream.read()
if (read != uWire[i]) {
throw new IOException("unexpected value $read at position $i")
}
}
byte[] type = new byte[4]
DataInputStream dis = new DataInputStream(e.inputStream)
dis.readFully(type)
if (type == "leaf".bytes)
handleIncoming(e, true)
else if (type == "peer".bytes)
handleIncoming(e, false)
else
else
throw new IOException("unknown connection type $type")
}
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) &&
private void handleIncoming(Endpoint e, boolean leaf) {
boolean accept = !manager.isConnected(e.destination) &&
!establisher.isInProgress(e.destination) &&
(leaf ? manager.hasLeafSlots() : manager.hasPeerSlots())
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
if (accept) {
log.info("accepting connection, leaf:$leaf")
e.outputStream.write("OK".bytes)
e.outputStream.flush()
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.SUCCESSFUL))
} else {
log.info("rejecting connection, leaf:$leaf")
e.outputStream.write("REJECT".bytes)
def hosts = hostCache.getGoodHosts(10)
if (!hosts.isEmpty()) {
def json = [:]
json.tryHosts = hosts.collect { d -> d.toBase64() }
json = JsonOutput.toJson(json)
def os = new DataOutputStream(e.outputStream)
os.writeShort(json.bytes.length)
os.write(json.bytes)
}
e.outputStream.flush()
e.close()
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: true, leaf: leaf, status: ConnectionAttemptStatus.REJECTED))
}
}
private void processGET(Endpoint e) {
byte[] et = new byte[3]
final DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(et)
if (et != "ET ".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid GET connection")
uploadManager.processGET(e)
}
}
private void processHashList(Endpoint e) {
byte[] ashList = new byte[8]
final DataInputStream dis = new DataInputStream(e.getInputStream())
@ -203,7 +207,7 @@ class ConnectionAcceptor {
throw new IOException("Invalid HASHLIST connection")
uploadManager.processHashList(e)
}
private void processPOST(final Endpoint e) throws IOException {
byte [] ost = new byte[4]
final DataInputStream dis = new DataInputStream(e.getInputStream())
@ -242,5 +246,44 @@ class ConnectionAcceptor {
e.close()
}
}
private void processTRUST(Endpoint e) {
byte[] RUST = new byte[6]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(RUST)
if (RUST != "RUST\r\n".getBytes(StandardCharsets.US_ASCII))
throw new IOException("Invalid TRUST connection")
String header
while ((header = DataUtil.readTillRN(dis)) != ""); // ignore headers for now
OutputStream os = e.getOutputStream()
if (!settings.allowTrustLists) {
os.write("403 Not Allowed\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
e.close()
return
}
os.write("200 OK\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
List<Persona> good = new ArrayList<>(trustService.good.values())
int size = Math.min(Short.MAX_VALUE * 2, good.size())
good = good.subList(0, size)
DataOutputStream dos = new DataOutputStream(os)
dos.writeShort(size)
good.each {
it.write(dos)
}
List<Persona> bad = new ArrayList<>(trustService.bad.values())
size = Math.min(Short.MAX_VALUE * 2, bad.size())
bad = bad.subList(0, size)
dos.writeShort(size)
bad.each {
it.write(dos)
}
dos.flush()
e.close()
}
}

View File

@ -21,164 +21,169 @@ import net.i2p.util.ConcurrentHashSet
@Log
class ConnectionEstablisher {
private static final int CONCURRENT = 4
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final Timer timer
final ExecutorService executor
final Set inProgress = new ConcurrentHashSet()
private static final int CONCURRENT = 4
final EventBus eventBus
final I2PConnector i2pConnector
final MuWireSettings settings
final ConnectionManager connectionManager
final HostCache hostCache
final Timer timer
final ExecutorService executor, closer
final Set inProgress = new ConcurrentHashSet()
ConnectionEstablisher(){}
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
}
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
void stop() {
timer.cancel()
executor.shutdownNow()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
ConnectionEstablisher(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings,
ConnectionManager connectionManager, HostCache hostCache) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
this.connectionManager = connectionManager
this.hostCache = hostCache
timer = new Timer("connection-timer",true)
executor = Executors.newFixedThreadPool(CONCURRENT, { r ->
def rv = new Thread(r)
rv.setDaemon(true)
rv.setName("connector-${System.currentTimeMillis()}")
rv
} as ThreadFactory)
closer = Executors.newSingleThreadExecutor()
}
def json = new JsonSlurper()
json = json.parse(payload)
void start() {
timer.schedule({connectIfNeeded()} as TimerTask, 100, 1000)
}
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
void stop() {
timer.cancel()
executor.shutdownNow()
closer.shutdown()
}
private void connectIfNeeded() {
if (!connectionManager.needsConnections())
return
if (inProgress.size() >= CONCURRENT)
return
def toTry = null
for (int i = 0; i < 5; i++) {
toTry = hostCache.getHosts(1)
if (toTry.isEmpty())
return
toTry = toTry[0]
if (!connectionManager.isConnected(toTry) &&
!inProgress.contains(toTry)) {
break
}
}
if (toTry == null)
return
if (!connectionManager.isConnected(toTry) && inProgress.add(toTry))
executor.execute({connect(toTry)} as Runnable)
}
private void connect(Destination toTry) {
log.info("starting connect to ${toTry.toBase32()}")
try {
def endpoint = i2pConnector.connect(toTry)
log.info("successful transport connect to ${toTry.toBase32()}")
// outgoing handshake
endpoint.outputStream.write("MuWire ".bytes)
def type = settings.isLeaf() ? "leaf" : "peer"
endpoint.outputStream.write(type.bytes)
endpoint.outputStream.flush()
InputStream is = endpoint.inputStream
int read = is.read()
if (read == -1) {
fail endpoint
return
}
switch(read) {
case (byte)'O': readK(endpoint); break
case (byte)'R': readEJECT(endpoint); break
default :
log.warning("unknown response $read")
fail endpoint
}
} catch (Exception e) {
log.log(Level.WARNING, "Couldn't connect to ${toTry.toBase32()}", e)
def endpoint = new Endpoint(toTry, null, null, null)
fail(endpoint)
} finally {
inProgress.remove(toTry)
}
}
private void fail(Endpoint endpoint) {
closer.execute {
endpoint.close()
eventBus.publish(new ConnectionEvent(endpoint: endpoint, incoming: false, leaf: false, status: ConnectionAttemptStatus.FAILED))
} as Runnable
}
private void readK(Endpoint e) {
int read = e.inputStream.read()
if (read != 'K') {
log.warning("unknown response after O: $read")
fail e
return
}
log.info("connection to ${e.destination.toBase32()} established")
// wrap into deflater / inflater streams and publish
def wrapped = new Endpoint(e.destination, new InflaterInputStream(e.inputStream), new DeflaterOutputStream(e.outputStream, true), e.toClose)
eventBus.publish(new ConnectionEvent(endpoint: wrapped, incoming: false, leaf: false, status: ConnectionAttemptStatus.SUCCESSFUL))
}
private void readEJECT(Endpoint e) {
byte[] eject = "EJECT".bytes
for (int i = 0; i < eject.length; i++) {
int read = e.inputStream.read()
if (read != eject[i]) {
log.warning("Unknown response after R at position $i")
fail e
return
}
}
log.info("connection to ${e.destination.toBase32()} rejected")
eventBus.publish(new ConnectionEvent(endpoint: e, incoming: false, leaf: false, status: ConnectionAttemptStatus.REJECTED))
try {
DataInputStream dais = new DataInputStream(e.inputStream)
int payloadSize = dais.readUnsignedShort()
byte[] payload = new byte[payloadSize]
dais.readFully(payload)
def json = new JsonSlurper()
json = json.parse(payload)
if (json.tryHosts == null) {
log.warning("post-rejection json didn't contain hosts to try")
return
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
closer.execute({e.close()} as Runnable)
}
}
json.tryHosts.asList().each {
Destination suggested = new Destination(it)
eventBus.publish(new HostDiscoveredEvent(destination: suggested))
}
} catch (Exception ignore) {
log.log(Level.WARNING,"Problem parsing post-rejection payload",ignore)
} finally {
// the end
e.close()
}
}
public boolean isInProgress(Destination d) {
inProgress.contains(d)
}

View File

@ -6,14 +6,14 @@ import net.i2p.data.Destination
class ConnectionEvent extends Event {
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
Endpoint endpoint
boolean incoming
Boolean leaf // can be null if uknown
ConnectionAttemptStatus status
@Override
public String toString() {
"ConnectionEvent ${super.toString()} endpoint: $endpoint incoming: $incoming leaf : $leaf status : $status"
}
}

View File

@ -11,64 +11,64 @@ import com.muwire.core.trust.TrustLevel
import net.i2p.data.Destination
abstract class ConnectionManager {
private static final int PING_TIME = 20000
final EventBus eventBus
private final Timer timer
protected final HostCache hostCache
private static final int PING_TIME = 20000
final EventBus eventBus
private final Timer timer
protected final HostCache hostCache
protected final Persona me
protected final MuWireSettings settings
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
ConnectionManager() {}
ConnectionManager(EventBus eventBus, Persona me, HostCache hostCache, MuWireSettings settings) {
this.eventBus = eventBus
this.me = me
this.hostCache = hostCache
this.hostCache = hostCache
this.settings = settings
this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
this.timer = new Timer("connections-pinger",true)
}
void start() {
timer.schedule({sendPings()} as TimerTask, 1000,1000)
}
void stop() {
timer.cancel()
getConnections().each { it.close() }
}
void onTrustEvent(TrustEvent e) {
if (e.level == TrustLevel.DISTRUSTED)
drop(e.persona.destination)
}
abstract void drop(Destination d)
abstract Collection<Connection> getConnections()
protected abstract int getDesiredConnections()
boolean needsConnections() {
return getConnections().size() < getDesiredConnections()
}
abstract boolean isConnected(Destination d)
abstract void onConnectionEvent(ConnectionEvent e)
abstract void onDisconnectionEvent(DisconnectionEvent e)
abstract void shutdown()
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
protected void sendPings() {
final long now = System.currentTimeMillis()
getConnections().each {
if (now - it.lastPingSentTime > PING_TIME)
it.sendPing()
}
}
}

View File

@ -5,11 +5,11 @@ import com.muwire.core.Event
import net.i2p.data.Destination
class DisconnectionEvent extends Event {
Destination destination
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
Destination destination
@Override
public String toString() {
"DisconnectionEvent ${super.toString()} destination:${destination.toBase32()}"
}
}

View File

@ -8,39 +8,39 @@ import net.i2p.data.Destination
@Log
class Endpoint implements Closeable {
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final Destination destination
final InputStream inputStream
final OutputStream outputStream
final def toClose
private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
private final AtomicBoolean closed = new AtomicBoolean()
Endpoint(Destination destination, InputStream inputStream, OutputStream outputStream, def toClose) {
this.destination = destination
this.inputStream = inputStream
this.outputStream = outputStream
this.toClose = toClose
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
}
@Override
public void close() {
if (!closed.compareAndSet(false, true)) {
log.log(Level.WARNING,"Close loop detected for ${destination.toBase32()}", new Exception())
return
}
if (inputStream != null) {
try {inputStream.close()} catch (Exception ignore) {}
}
if (outputStream != null) {
try {outputStream.close()} catch (Exception ignore) {}
}
if (toClose != null) {
try {toClose.reset()} catch (Exception ignore) {}
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}
}
@Override
public String toString() {
"destination: ${destination.toBase32()}"
}
}

View File

@ -5,18 +5,18 @@ import net.i2p.client.streaming.I2PSocketManager
class I2PAcceptor {
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
final I2PSocketManager socketManager
final I2PServerSocket serverSocket
I2PAcceptor() {}
I2PAcceptor(I2PSocketManager socketManager) {
this.socketManager = socketManager
this.serverSocket = socketManager.getServerSocket()
}
Endpoint accept() {
def socket = serverSocket.accept()
new Endpoint(socket.getPeerDestination(), socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@ -4,18 +4,18 @@ import net.i2p.client.streaming.I2PSocketManager
import net.i2p.data.Destination
class I2PConnector {
final I2PSocketManager socketManager
I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
final I2PSocketManager socketManager
I2PConnector() {}
I2PConnector(I2PSocketManager socketManager) {
this.socketManager = socketManager
}
Endpoint connect(Destination dest) {
def socket = socketManager.connect(dest)
new Endpoint(dest, socket.getInputStream(), socket.getOutputStream(), socket)
}
}

View File

@ -11,27 +11,27 @@ import com.muwire.core.trust.TrustService
import net.i2p.data.Destination
/**
* Connection where the other side is a leaf.
* Connection where the other side is a leaf.
* Such connections can only be incoming.
* @author zab
*/
class LeafConnection extends Connection {
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
public LeafConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache,
TrustService trustService, MuWireSettings settings) {
super(eventBus, endpoint, true, hostCache, trustService, settings);
}
super(eventBus, endpoint, true, hostCache, trustService, settings);
}
@Override
protected void read() {
// TODO Auto-generated method stub
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
}
}
@Override
protected void write(Object message) {
// TODO Auto-generated method stub
}
}

View File

@ -13,68 +13,68 @@ import net.i2p.data.Destination
@Log
class LeafConnectionManager extends ConnectionManager {
final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
final int maxConnections
final Map<Destination, UltrapeerConnection> connections = new ConcurrentHashMap()
public LeafConnectionManager(EventBus eventBus, Persona me, int maxConnections,
HostCache hostCache, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
}
super(eventBus, me, hostCache, settings)
this.maxConnections = maxConnections
}
@Override
public void drop(Destination d) {
// TODO Auto-generated method stub
}
void onQueryEvent(QueryEvent e) {
if (me.destination == e.receivedOn) {
connections.values().each { it.sendQuery(e) }
}
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
public Collection<Connection> getConnections() {
connections.values()
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
protected int getDesiredConnections() {
return maxConnections;
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public boolean isConnected(Destination d) {
connections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (e.incoming || e.leaf) {
log.severe("Got inconsistent event as a leaf! $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = new UltrapeerConnection(eventBus, e.endpoint)
connections.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = connections.remove(e.destination)
if (removed == null)
log.severe("removed destination not present in connection manager ${e.destination.toBase32()}")
}
@Override
void shutdown() {
}
}

View File

@ -20,63 +20,63 @@ import net.i2p.data.Destination
*/
@Log
class PeerConnection extends Connection {
private final DataInputStream dis
private final DataOutputStream dos
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService,
private final DataInputStream dis
private final DataOutputStream dos
private final byte[] readHeader = new byte[3]
private final byte[] writeHeader = new byte[3]
private final JsonSlurper slurper = new JsonSlurper()
public PeerConnection(EventBus eventBus, Endpoint endpoint,
boolean incoming, HostCache hostCache, TrustService trustService,
MuWireSettings settings) {
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
super(eventBus, endpoint, incoming, hostCache, trustService, settings)
this.dis = new DataInputStream(endpoint.inputStream)
this.dos = new DataOutputStream(endpoint.outputStream)
}
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
@Override
protected void read() {
dis.readFully(readHeader)
int length = DataUtil.readLength(readHeader)
log.fine("$name read length $length")
byte[] payload = new byte[length]
dis.readFully(payload)
if ((readHeader[0] & (byte)0x80) == 0x80) {
// TODO process binary
} else {
def json = slurper.parse(payload)
if (json.type == null)
throw new Exception("missing json type")
switch(json.type) {
case "Ping" : handlePing(); break;
case "Pong" : handlePong(json); break;
case "Search": handleSearch(json); break
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
default :
throw new Exception("unknown json type ${json.type}")
}
}
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
@Override
protected void write(Object message) {
byte[] payload
if (message instanceof Map) {
payload = JsonOutput.toJson(message).bytes
DataUtil.packHeader(payload.length, writeHeader)
log.fine "$name writing message type ${message.type} length $payload.length"
writeHeader[0] &= (byte)0x7F
} else {
// TODO: write binary
}
dos.write(writeHeader)
dos.write(payload)
dos.flush()
}
}

View File

@ -17,30 +17,30 @@ import net.i2p.data.Destination
*/
class UltrapeerConnection extends Connection {
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
public UltrapeerConnection(EventBus eventBus, Endpoint endpoint, HostCache hostCache, TrustService trustService) {
super(eventBus, endpoint, false, hostCache, trustService)
}
@Override
protected void read() {
// TODO Auto-generated method stub
}
@Override
protected void read() {
// TODO Auto-generated method stub
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
}
private void writeJsonMessage(def message) {
}
private void writeBinaryMessage(def message) {
}
@Override
protected void write(Object message) {
if (message instanceof Map) {
writeJsonMessage(message)
} else {
writeBinaryMessage(message)
}
}
private void writeJsonMessage(def message) {
}
private void writeBinaryMessage(def message) {
}
}

View File

@ -15,28 +15,28 @@ import net.i2p.data.Destination
@Log
class UltrapeerConnectionManager extends ConnectionManager {
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
final int maxPeers, maxLeafs
final TrustService trustService
final Map<Destination, PeerConnection> peerConnections = new ConcurrentHashMap()
final Map<Destination, LeafConnection> leafConnections = new ConcurrentHashMap()
UltrapeerConnectionManager() {}
public UltrapeerConnectionManager(EventBus eventBus, Persona me, int maxPeers, int maxLeafs,
HostCache hostCache, TrustService trustService, MuWireSettings settings) {
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
super(eventBus, me, hostCache, settings)
this.maxPeers = maxPeers
this.maxLeafs = maxLeafs
this.trustService = trustService
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
}
@Override
public void drop(Destination d) {
peerConnections.get(d)?.close()
leafConnections.get(d)?.close()
}
}
void onQueryEvent(QueryEvent e) {
forwardQueryToLeafs(e)
if (!e.firstHop)
@ -50,58 +50,58 @@ class UltrapeerConnectionManager extends ConnectionManager {
}
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
public Collection<Connection> getConnections() {
def rv = new ArrayList(peerConnections.size() + leafConnections.size())
rv.addAll(peerConnections.values())
rv.addAll(leafConnections.values())
rv
}
boolean hasLeafSlots() {
leafConnections.size() < maxLeafs
}
boolean hasPeerSlots() {
peerConnections.size() < maxPeers
}
@Override
protected int getDesiredConnections() {
return maxPeers / 2;
}
@Override
public boolean isConnected(Destination d) {
peerConnections.containsKey(d) || leafConnections.containsKey(d)
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
public void onConnectionEvent(ConnectionEvent e) {
if (!e.incoming && e.leaf) {
log.severe("Inconsistent event $e")
return
}
if (e.status != ConnectionAttemptStatus.SUCCESSFUL)
return
Connection c = e.leaf ?
new LeafConnection(eventBus, e.endpoint, hostCache, trustService, settings) :
new PeerConnection(eventBus, e.endpoint, e.incoming, hostCache, trustService, settings)
def map = e.leaf ? leafConnections : peerConnections
map.put(e.endpoint.destination, c)
c.start()
}
@Override
public void onDisconnectionEvent(DisconnectionEvent e) {
def removed = peerConnections.remove(e.destination)
if (removed == null)
removed = leafConnections.remove(e.destination)
if (removed == null)
log.severe("Removed connection not present in either leaf or peer map ${e.destination.toBase32()}")
}
@Override
void shutdown() {
peerConnections.values().stream().parallel().forEach({v -> v.close()})
@ -109,8 +109,8 @@ class UltrapeerConnectionManager extends ConnectionManager {
peerConnections.clear()
leafConnections.clear()
}
void forwardQueryToLeafs(QueryEvent e) {
}
void forwardQueryToLeafs(QueryEvent e) {
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Event
class ContentControlEvent extends Event {
String term
boolean regex
boolean add
}

View File

@ -0,0 +1,30 @@
package com.muwire.core.content
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.search.QueryEvent
import net.i2p.util.ConcurrentHashSet
class ContentManager {
Set<Matcher> matchers = new ConcurrentHashSet()
void onContentControlEvent(ContentControlEvent e) {
Matcher m
if (e.regex)
m = new RegexMatcher(e.term)
else
m = new KeywordMatcher(e.term)
if (e.add)
matchers.add(m)
else
matchers.remove(m)
}
void onQueryEvent(QueryEvent e) {
if (e.searchEvent.searchTerms == null)
return
matchers.each { it.process(e) }
}
}

View File

@ -0,0 +1,36 @@
package com.muwire.core.content
class KeywordMatcher extends Matcher {
private final String keyword
KeywordMatcher(String keyword) {
this.keyword = keyword
}
@Override
protected boolean match(List<String> searchTerms) {
boolean found = false
searchTerms.each {
if (keyword == it)
found = true
}
found
}
@Override
public String getTerm() {
keyword
}
@Override
public int hashCode() {
keyword.hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof KeywordMatcher))
return false
KeywordMatcher other = (KeywordMatcher) o
keyword.equals(other.keyword)
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.content
import com.muwire.core.Persona
class Match {
Persona persona
String [] keywords
long timestamp
}

View File

@ -0,0 +1,20 @@
package com.muwire.core.content
import com.muwire.core.search.QueryEvent
abstract class Matcher {
final List<Match> matches = Collections.synchronizedList(new ArrayList<>())
final Set<UUID> uuids = new HashSet<>()
protected abstract boolean match(List<String> searchTerms);
public abstract String getTerm();
public void process(QueryEvent qe) {
def terms = qe.searchEvent.searchTerms
if (match(terms) && uuids.add(qe.searchEvent.uuid)) {
long now = System.currentTimeMillis()
matches << new Match(persona : qe.originator, keywords : terms, timestamp : now)
}
}
}

View File

@ -0,0 +1,35 @@
package com.muwire.core.content
import java.util.regex.Pattern
import java.util.stream.Collectors
class RegexMatcher extends Matcher {
private final Pattern pattern
RegexMatcher(String pattern) {
this.pattern = Pattern.compile(pattern)
}
@Override
protected boolean match(List<String> keywords) {
String combined = keywords.join(" ")
return pattern.matcher(combined).find()
}
@Override
public String getTerm() {
pattern.pattern()
}
@Override
public int hashCode() {
pattern.pattern().hashCode()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RegexMatcher))
return false
RegexMatcher other = (RegexMatcher) o
pattern.pattern() == other.pattern.pattern()
}
}

View File

@ -21,5 +21,5 @@ class BadHashException extends Exception {
public BadHashException(Throwable cause) {
super(cause);
}
}

View File

@ -3,6 +3,10 @@ package com.muwire.core.download
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileHasher
import com.muwire.core.mesh.Mesh
import com.muwire.core.mesh.MeshManager
import com.muwire.core.trust.TrustLevel
import com.muwire.core.trust.TrustService
import com.muwire.core.util.DataUtil
import groovy.json.JsonBuilder
@ -14,31 +18,40 @@ import net.i2p.util.ConcurrentHashSet
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.Executor
import java.util.concurrent.Executors
public class DownloadManager {
private final EventBus eventBus
private final TrustService trustService
private final MeshManager meshManager
private final MuWireSettings muSettings
private final I2PConnector connector
private final Executor executor
private final File incompletes, home
private final Persona me
private final Set<Downloader> downloaders = new ConcurrentHashSet<>()
public DownloadManager(EventBus eventBus, I2PConnector connector, File home, Persona me) {
private final Map<InfoHash, Downloader> downloaders = new ConcurrentHashMap<>()
public DownloadManager(EventBus eventBus, TrustService trustService, MeshManager meshManager, MuWireSettings muSettings,
I2PConnector connector, File home, Persona me) {
this.eventBus = eventBus
this.trustService = trustService
this.meshManager = meshManager
this.muSettings = muSettings
this.connector = connector
this.incompletes = new File(home,"incompletes")
this.home = home
this.me = me
incompletes.mkdir()
this.executor = Executors.newCachedThreadPool({ r ->
Thread rv = new Thread(r)
rv.setName("download-worker")
@ -46,39 +59,49 @@ public class DownloadManager {
rv
})
}
public void onUIDownloadEvent(UIDownloadEvent e) {
def size = e.result[0].size
def infohash = e.result[0].infohash
def pieceSize = e.result[0].pieceSize
Set<Destination> destinations = new HashSet<>()
e.result.each {
e.result.each {
destinations.add(it.sender.destination)
}
destinations.addAll(e.sources)
destinations.remove(me.destination)
Pieces pieces = getPieces(infohash, size, pieceSize, e.sequential)
def downloader = new Downloader(eventBus, this, me, e.target, size,
infohash, pieceSize, connector, destinations,
incompletes)
downloaders.add(downloader)
incompletes, pieces)
downloaders.put(infohash, downloader)
persistDownloaders()
executor.execute({downloader.download()} as Runnable)
eventBus.publish(new DownloadStartedEvent(downloader : downloader))
}
public void onUIDownloadCancelledEvent(UIDownloadCancelledEvent e) {
downloaders.remove(e.downloader)
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
public void onUIDownloadPausedEvent(UIDownloadPausedEvent e) {
persistDownloaders()
}
public void onUIDownloadResumedEvent(UIDownloadResumedEvent e) {
persistDownloaders()
}
void resume(Downloader downloader) {
executor.execute({downloader.download() as Runnable})
}
void onUILoadedEvent(UILoadedEvent e) {
File downloadsFile = new File(home, "downloads.json")
if (!downloadsFile.exists())
@ -88,7 +111,7 @@ public class DownloadManager {
def json = slurper.parseText(it)
File file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
def destinations = new HashSet<>()
json.destinations.each { destination ->
json.destinations.each { destination ->
destinations.add new Destination(destination)
}
InfoHash infoHash
@ -99,23 +122,58 @@ public class DownloadManager {
byte [] root = Base64.decode(json.hashRoot)
infoHash = new InfoHash(root)
}
boolean sequential = false
if (json.sequential != null)
sequential = json.sequential
Pieces pieces = getPieces(infoHash, (long)json.length, json.pieceSizePow2, sequential)
def downloader = new Downloader(eventBus, this, me, file, (long)json.length,
infoHash, json.pieceSizePow2, connector, destinations, incompletes)
downloaders.add(downloader)
downloader.download()
infoHash, json.pieceSizePow2, connector, destinations, incompletes, pieces)
if (json.paused != null)
downloader.paused = json.paused
downloaders.put(infoHash, downloader)
downloader.readPieces()
if (!downloader.paused)
downloader.download()
eventBus.publish(new DownloadStartedEvent(downloader : downloader))
}
}
private Pieces getPieces(InfoHash infoHash, long length, int pieceSizePow2, boolean sequential) {
int pieceSize = 0x1 << pieceSizePow2
int nPieces = (int)(length / pieceSize)
if (length % pieceSize != 0)
nPieces++
Mesh mesh = meshManager.getOrCreate(infoHash, nPieces, sequential)
mesh.pieces
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Downloader downloader = downloaders.get(e.infoHash)
if (downloader == null)
return
boolean ok = false
switch(trustService.getLevel(e.source.destination)) {
case TrustLevel.TRUSTED: ok = true; break
case TrustLevel.NEUTRAL: ok = muSettings.allowUntrusted; break
case TrustLevel.DISTRUSTED: ok = false; break
}
if (ok)
downloader.addSource(e.source.destination)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
downloaders.remove(e.downloader)
downloaders.remove(e.downloader.infoHash)
persistDownloaders()
}
private void persistDownloaders() {
File downloadsFile = new File(home,"downloads.json")
downloadsFile.withPrintWriter { writer ->
downloaders.each { downloader ->
downloadsFile.withPrintWriter { writer ->
downloaders.values().each { downloader ->
if (!downloader.cancelled) {
def json = [:]
json.file = Base64.encode(DataUtil.encodei18nString(downloader.file.getAbsolutePath()))
@ -126,20 +184,25 @@ public class DownloadManager {
destinations << it.toBase64()
}
json.destinations = destinations
InfoHash infoHash = downloader.getInfoHash()
if (infoHash.hashList != null)
json.hashList = Base64.encode(infoHash.hashList)
else
json.hashRoot = Base64.encode(infoHash.getRoot())
json.paused = downloader.paused
json.sequential = downloader.pieces.ratio == 0f
writer.println(JsonOutput.toJson(json))
}
}
}
}
public void shutdown() {
downloaders.each { it.stop() }
downloaders.values().each { it.stop() }
Downloader.executorService.shutdownNow()
}
}

View File

@ -3,13 +3,18 @@ package com.muwire.core.download;
import net.i2p.data.Base64
import com.muwire.core.Constants
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint
import com.muwire.core.util.DataUtil
import static com.muwire.core.util.DataUtil.readTillRN
import groovy.util.logging.Log
import java.nio.ByteBuffer
import java.nio.MappedByteBuffer
import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
@ -20,9 +25,8 @@ import java.util.logging.Level
@Log
class DownloadSession {
private static int SAMPLES = 10
private final EventBus eventBus
private final String meB64
private final Pieces pieces
private final InfoHash infoHash
@ -30,15 +34,17 @@ class DownloadSession {
private final File file
private final int pieceSize
private final long fileLength
private final Set<Integer> available
private final MessageDigest digest
private final LinkedList<Long> timestamps = new LinkedList<>()
private final LinkedList<Integer> reads = new LinkedList<>()
private ByteBuffer mapped
DownloadSession(String meB64, Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength) {
private long lastSpeedRead = System.currentTimeMillis()
private long dataSinceLastRead
private MappedByteBuffer mapped
DownloadSession(EventBus eventBus, String meB64, Pieces pieces, InfoHash infoHash, Endpoint endpoint, File file,
int pieceSize, long fileLength, Set<Integer> available) {
this.eventBus = eventBus
this.meB64 = meB64
this.pieces = pieces
this.endpoint = endpoint
@ -46,6 +52,7 @@ class DownloadSession {
this.file = file
this.pieceSize = pieceSize
this.fileLength = fileLength
this.available = available
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
@ -53,7 +60,7 @@ class DownloadSession {
System.exit(1)
}
}
/**
* @return if the request will proceed. The only time it may not
* is if all the pieces have been claimed by other sessions.
@ -62,74 +69,115 @@ class DownloadSession {
public boolean request() throws IOException {
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
int piece = pieces.claim()
if (piece == -1)
int[] pieceAndPosition
if (available.isEmpty())
pieceAndPosition = pieces.claim()
else
pieceAndPosition = pieces.claim(new HashSet<>(available))
if (pieceAndPosition == null)
return false
int piece = pieceAndPosition[0]
int position = pieceAndPosition[1]
boolean steal = pieceAndPosition[2] == 1
boolean unclaim = true
log.info("will download piece $piece")
long start = piece * pieceSize
long end = Math.min(fileLength, start + pieceSize) - 1
long length = end - start + 1
log.info("will download piece $piece from position $position steal $steal")
long pieceStart = piece * ((long)pieceSize)
long end = Math.min(fileLength, pieceStart + pieceSize) - 1
long start = pieceStart + position
String root = Base64.encode(infoHash.getRoot())
try {
os.write("GET $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Range: $start-$end\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n".getBytes(StandardCharsets.US_ASCII))
String xHave = DataUtil.encodeXHave(pieces.getDownloaded(), pieces.nPieces)
os.write("X-Have: $xHave\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String code = readTillRN(is)
if (code.startsWith("404 ")) {
String codeString = readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0, space)
int code = Integer.parseInt(codeString.trim())
if (code == 404) {
log.warning("file not found")
endpoint.close()
return false
}
if (code.startsWith("416 ")) {
log.warning("range $start-$end cannot be satisfied")
return // leave endpoint open
}
if (!code.startsWith("200 ")) {
if (!(code == 200 || code == 416)) {
log.warning("unknown code $code")
endpoint.close()
return false
}
// parse all headers
Set<String> headers = new HashSet<>()
Map<String,String> headers = new HashMap<>()
String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS)
headers.add(header)
long receivedStart = -1
long receivedEnd = -1
for (String receivedHeader : headers) {
def group = (receivedHeader =~ /^Content-Range: (\d+)-(\d+)$/)
if (group.size() != 1) {
log.info("ignoring header $receivedHeader")
continue
}
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS) {
int colon = header.indexOf(':')
if (colon == -1 || colon == header.length() - 1)
throw new IOException("invalid header $header")
String key = header.substring(0, colon)
String value = header.substring(colon + 1)
headers[key] = value.trim()
}
// prase X-Alt if present
if (headers.containsKey("X-Alt")) {
headers["X-Alt"].split(",").each {
if (it.length() > 0) {
byte [] raw = Base64.decode(it)
Persona source = new Persona(new ByteArrayInputStream(raw))
eventBus.publish(new SourceDiscoveredEvent(infoHash : infoHash, source : source))
}
}
}
// parse X-Have if present
if (headers.containsKey("X-Have")) {
DataUtil.decodeXHave(headers["X-Have"]).each {
available.add(it)
}
if (!available.contains(piece))
return true // try again next time
} else {
if (code != 200)
throw new IOException("Code $code but no X-Have")
available.clear()
}
if (code != 200)
return true
String range = headers["Content-Range"]
if (range == null)
throw new IOException("Code 200 but no Content-Range")
def group = (range =~ /^(\d+)-(\d+)$/)
if (group.size() != 1)
throw new IOException("invalid Content-Range header $range")
long receivedStart = Long.parseLong(group[0][1])
long receivedEnd = Long.parseLong(group[0][2])
if (receivedStart != start || receivedEnd != end) {
log.warning("We don't support mismatching ranges yet")
endpoint.close()
return false
}
// start the download
FileChannel channel
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ, StandardOpenOption.WRITE,
StandardOpenOption.SPARSE, StandardOpenOption.CREATE)) // TODO: double-check, maybe CREATE_NEW
mapped = channel.map(FileChannel.MapMode.READ_WRITE, start, end - start + 1)
StandardOpenOption.SPARSE, StandardOpenOption.CREATE))
mapped = channel.map(FileChannel.MapMode.READ_WRITE, pieceStart, end - pieceStart + 1)
mapped.position(position)
byte[] tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
@ -140,13 +188,8 @@ class DownloadSession {
throw new IOException()
synchronized(this) {
mapped.put(tmp, 0, read)
if (timestamps.size() == SAMPLES) {
timestamps.removeFirst()
reads.removeFirst()
}
timestamps.addLast(System.currentTimeMillis())
reads.addLast(read)
dataSinceLastRead += read
pieces.markPartial(piece, mapped.position())
}
}
@ -155,45 +198,35 @@ class DownloadSession {
byte [] hash = digest.digest()
byte [] expected = new byte[32]
System.arraycopy(infoHash.getHashList(), piece * 32, expected, 0, 32)
if (hash != expected)
throw new BadHashException()
if (hash != expected) {
pieces.markPartial(piece, 0)
throw new BadHashException("bad hash on piece $piece")
}
} finally {
try { channel?.close() } catch (IOException ignore) {}
DataUtil.tryUnmap(mapped)
}
pieces.markDownloaded(piece)
unclaim = false
} finally {
if (unclaim)
if (unclaim && !steal)
pieces.unclaim(piece)
}
return true
}
synchronized int positionInPiece() {
if (mapped == null)
return 0
mapped.position()
}
synchronized int speed() {
if (timestamps.size() < SAMPLES)
return 0
int totalRead = 0
int idx = 0
final long now = System.currentTimeMillis()
while(idx < SAMPLES && timestamps.get(idx) < now - 1000)
idx++
if (idx == SAMPLES)
return 0
if (idx == SAMPLES - 1)
return reads[idx]
long interval = timestamps.last - timestamps[idx]
if (interval == 0)
interval = 1
for (int i = idx; i < SAMPLES; i++)
totalRead += reads[idx]
(int)(totalRead * 1000.0 / interval)
long interval = Math.max(1000, now - lastSpeedRead)
lastSpeedRead = now;
int rv = (int) (dataSinceLastRead * 1000.0 / interval)
dataSinceLastRead = 0
rv
}
}

View File

@ -7,6 +7,7 @@ import com.muwire.core.connection.Endpoint
import java.nio.file.AtomicMoveNotSupportedException
import java.nio.file.Files
import java.nio.file.StandardCopyOption
import java.time.Instant
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
@ -18,6 +19,7 @@ import com.muwire.core.DownloadedFile
import com.muwire.core.EventBus
import com.muwire.core.connection.I2PConnector
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
@ -25,9 +27,9 @@ import net.i2p.util.ConcurrentHashSet
@Log
public class Downloader {
public enum DownloadState { CONNECTING, HASHLIST, DOWNLOADING, FAILED, CANCELLED, FINISHED }
public enum DownloadState { CONNECTING, HASHLIST, DOWNLOADING, FAILED, CANCELLED, PAUSED, FINISHED }
private enum WorkerState { CONNECTING, HASHLIST, DOWNLOADING, FINISHED}
private static final ExecutorService executorService = Executors.newCachedThreadPool({r ->
Thread rv = new Thread(r)
rv.setName("download worker")
@ -36,8 +38,8 @@ public class Downloader {
})
private final EventBus eventBus
private final DownloadManager downloadManager
private final Persona me
private final DownloadManager downloadManager
private final Persona me
private final File file
private final Pieces pieces
private final long length
@ -51,16 +53,21 @@ public class Downloader {
final int pieceSizePow2
private final Map<Destination, DownloadWorker> activeWorkers = new ConcurrentHashMap<>()
private final Set<Destination> successfulDestinations = new ConcurrentHashSet<>()
private volatile boolean cancelled
private volatile boolean cancelled, paused
private final AtomicBoolean eventFired = new AtomicBoolean()
private boolean piecesFileClosed
public Downloader(EventBus eventBus, DownloadManager downloadManager,
Persona me, File file, long length, InfoHash infoHash,
private ArrayList speedArr = new ArrayList<Integer>()
private int speedPos = 0
private int speedAvg = 0
private long timestamp = Instant.now().toEpochMilli()
public Downloader(EventBus eventBus, DownloadManager downloadManager,
Persona me, File file, long length, InfoHash infoHash,
int pieceSizePow2, I2PConnector connector, Set<Destination> destinations,
File incompletes) {
File incompletes, Pieces pieces) {
this.eventBus = eventBus
this.me = me
this.downloadManager = downloadManager
@ -73,25 +80,22 @@ public class Downloader {
this.incompleteFile = new File(incompletes, file.getName()+".part")
this.pieceSizePow2 = pieceSizePow2
this.pieceSize = 1 << pieceSizePow2
int nPieces
if (length % pieceSize == 0)
nPieces = length / pieceSize
else
nPieces = length / pieceSize + 1
this.nPieces = nPieces
pieces = new Pieces(nPieces, Constants.DOWNLOAD_SEQUENTIAL_RATIO)
this.pieces = pieces
this.nPieces = pieces.nPieces
// default size suitable for an average of 5 seconds / 5 elements / 5 interval units
// it's easily adjustable by resizing the size of speedArr
this.speedArr = [ 0, 0, 0, 0, 0 ]
}
public synchronized InfoHash getInfoHash() {
infoHash
}
private synchronized void setInfoHash(InfoHash infoHash) {
this.infoHash = infoHash
}
void download() {
readPieces()
destinations.each {
@ -102,49 +106,77 @@ public class Downloader {
}
}
}
void readPieces() {
if (!piecesFile.exists())
return
piecesFile.eachLine {
int piece = Integer.parseInt(it)
pieces.markDownloaded(piece)
piecesFile.eachLine {
String [] split = it.split(",")
int piece = Integer.parseInt(split[0])
if (split.length == 1)
pieces.markDownloaded(piece)
else {
int position = Integer.parseInt(split[1])
pieces.markPartial(piece, position)
}
}
}
void writePieces() {
synchronized(piecesFile) {
if (piecesFileClosed)
return
piecesFile.withPrintWriter { writer ->
pieces.getDownloaded().each { piece ->
writer.println(piece)
}
pieces.write(writer)
}
}
}
public long donePieces() {
pieces.donePieces()
}
public int speed() {
int total = 0
int currSpeed = 0
if (getCurrentState() == DownloadState.DOWNLOADING) {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING)
total += it.speed()
currSpeed += it.speed()
}
}
total
// normalize to speedArr.size
currSpeed /= speedArr.size()
// compute new speedAvg and update speedArr
if ( speedArr[speedPos] > speedAvg ) {
speedAvg = 0
} else {
speedAvg -= speedArr[speedPos]
}
speedAvg += currSpeed
speedArr[speedPos] = currSpeed
// this might be necessary due to rounding errors
if (speedAvg < 0)
speedAvg = 0
// rolling index over the speedArr
speedPos++
if (speedPos >= speedArr.size())
speedPos=0
speedAvg
}
public DownloadState getCurrentState() {
if (cancelled)
return DownloadState.CANCELLED
if (paused)
return DownloadState.PAUSED
boolean allFinished = true
activeWorkers.values().each {
activeWorkers.values().each {
allFinished &= it.currentState == WorkerState.FINISHED
}
if (allFinished) {
@ -152,22 +184,22 @@ public class Downloader {
return DownloadState.FINISHED
return DownloadState.FAILED
}
// if at least one is downloading...
boolean oneDownloading = false
activeWorkers.values().each {
activeWorkers.values().each {
if (it.currentState == WorkerState.DOWNLOADING) {
oneDownloading = true
return
return
}
}
if (oneDownloading)
return DownloadState.DOWNLOADING
// at least one is requesting hashlist
boolean oneHashlist = false
activeWorkers.values().each {
activeWorkers.values().each {
if (it.currentState == WorkerState.HASHLIST) {
oneHashlist = true
return
@ -175,10 +207,10 @@ public class Downloader {
}
if (oneHashlist)
return DownloadState.HASHLIST
return DownloadState.CONNECTING
}
public void cancel() {
cancelled = true
stop()
@ -187,24 +219,32 @@ public class Downloader {
piecesFile.delete()
}
incompleteFile.delete()
pieces.clearAll()
}
public void pause() {
paused = true
stop()
}
void stop() {
activeWorkers.values().each {
activeWorkers.values().each {
it.cancel()
}
}
public int activeWorkers() {
int active = 0
activeWorkers.values().each {
activeWorkers.values().each {
if (it.currentState != WorkerState.FINISHED)
active++
}
active
}
public void resume() {
paused = false
readPieces()
destinations.each { destination ->
def worker = activeWorkers.get(destination)
if (worker != null) {
@ -220,18 +260,27 @@ public class Downloader {
}
}
}
void addSource(Destination d) {
if (activeWorkers.containsKey(d))
return
DownloadWorker newWorker = new DownloadWorker(d)
activeWorkers.put(d, newWorker)
executorService.submit(newWorker)
}
class DownloadWorker implements Runnable {
private final Destination destination
private volatile WorkerState currentState
private volatile Thread downloadThread
private Endpoint endpoint
private volatile DownloadSession currentSession
private final Set<Integer> available = new HashSet<>()
DownloadWorker(Destination destination) {
this.destination = destination
}
public void run() {
downloadThread = Thread.currentThread()
currentState = WorkerState.CONNECTING
@ -247,7 +296,8 @@ public class Downloader {
currentState = WorkerState.DOWNLOADING
boolean requestPerformed
while(!pieces.isComplete()) {
currentSession = new DownloadSession(me.toBase64(), pieces, getInfoHash(), endpoint, incompleteFile, pieceSize, length)
currentSession = new DownloadSession(eventBus, me.toBase64(), pieces, getInfoHash(),
endpoint, incompleteFile, pieceSize, length, available)
requestPerformed = currentSession.request()
if (!requestPerformed)
break
@ -255,14 +305,19 @@ public class Downloader {
writePieces()
}
} catch (Exception bad) {
log.log(Level.WARNING,"Exception while downloading",bad)
log.log(Level.WARNING,"Exception while downloading",DataUtil.findRoot(bad))
} finally {
writePieces()
currentState = WorkerState.FINISHED
if (pieces.isComplete() && eventFired.compareAndSet(false, true)) {
synchronized(piecesFile) {
piecesFileClosed = true
piecesFile.delete()
}
activeWorkers.values().each {
if (it.destination != destination)
it.cancel()
}
try {
Files.move(incompleteFile.toPath(), file.toPath(), StandardCopyOption.ATOMIC_MOVE)
} catch (AtomicMoveNotSupportedException e) {
@ -271,20 +326,20 @@ public class Downloader {
}
eventBus.publish(
new FileDownloadedEvent(
downloadedFile : new DownloadedFile(file, getInfoHash(), pieceSizePow2, successfulDestinations),
downloadedFile : new DownloadedFile(file.getCanonicalFile(), getInfoHash(), pieceSizePow2, successfulDestinations),
downloader : Downloader.this))
}
}
endpoint?.close()
}
}
int speed() {
if (currentSession == null)
return 0
currentSession.speed()
}
void cancel() {
downloadThread?.interrupt()
}

View File

@ -20,32 +20,32 @@ class HashListSession {
private final String meB64
private final InfoHash infoHash
private final Endpoint endpoint
HashListSession(String meB64, InfoHash infoHash, Endpoint endpoint) {
this.meB64 = meB64
this.infoHash = infoHash
this.endpoint = endpoint
}
InfoHash request() throws IOException {
InputStream is = endpoint.getInputStream()
OutputStream os = endpoint.getOutputStream()
String root = Base64.encode(infoHash.getRoot())
os.write("HASHLIST $root\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("X-Persona: $meB64\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String code = readTillRN(is)
if (!code.startsWith("200"))
throw new IOException("unknown code $code")
// parse all headers
Set<String> headers = new HashSet<>()
String header
while((header = readTillRN(is)) != "" && headers.size() < Constants.MAX_HEADERS)
headers.add(header)
long receivedStart = -1
long receivedEnd = -1
for (String receivedHeader : headers) {
@ -58,10 +58,10 @@ class HashListSession {
receivedStart = Long.parseLong(group[0][1])
receivedEnd = Long.parseLong(group[0][2])
}
if (receivedStart != 0)
throw new IOException("hashlist started at $receivedStart")
byte[] hashList = new byte[receivedEnd]
ByteBuffer hashListBuf = ByteBuffer.wrap(hashList)
byte[] tmp = new byte[0x1 << 13]
@ -73,7 +73,7 @@ class HashListSession {
throw new IOException()
hashListBuf.put(tmp, 0, read)
}
InfoHash received = InfoHash.fromHashList(hashList)
if (received.getRoot() != infoHash.getRoot())
throw new IOException("fetched list doesn't match root")

View File

@ -5,11 +5,12 @@ class Pieces {
private final int nPieces
private final float ratio
private final Random random = new Random()
private final Map<Integer,Integer> partials = new HashMap<>()
Pieces(int nPieces) {
this(nPieces, 1.0f)
}
Pieces(int nPieces, float ratio) {
this.nPieces = nPieces
this.ratio = ratio
@ -17,27 +18,54 @@ class Pieces {
claimed = new BitSet(nPieces)
}
synchronized int claim() {
synchronized int[] claim() {
int claimedCardinality = claimed.cardinality()
if (claimedCardinality == nPieces)
return -1
if (claimedCardinality == nPieces) {
// steal
int downloadedCardinality = done.cardinality()
if (downloadedCardinality == nPieces)
return null
int rv = done.nextClearBit(0)
return [rv, partials.getOrDefault(rv, 0), 1]
}
// if fuller than ratio just do sequential
if ( (1.0f * claimedCardinality) / nPieces > ratio) {
if ( (1.0f * claimedCardinality) / nPieces >= ratio) {
int rv = claimed.nextClearBit(0)
claimed.set(rv)
return rv
return [rv, partials.getOrDefault(rv, 0), 0]
}
while(true) {
int start = random.nextInt(nPieces)
if (claimed.get(start))
continue
claimed.set(start)
return start
return [start, partials.getOrDefault(start,0), 0]
}
}
synchronized int[] claim(Set<Integer> available) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1))
available.remove(i)
if (available.isEmpty())
return null
Set<Integer> availableCopy = new HashSet<>(available)
for (int i = claimed.nextSetBit(0); i >= 0; i = claimed.nextSetBit(i+1))
availableCopy.remove(i)
if (availableCopy.isEmpty()) {
// steal
int rv = available.first()
return [rv, partials.getOrDefault(rv, 0), 1]
}
List<Integer> toList = availableCopy.toList()
if (ratio > 0f)
Collections.shuffle(toList)
int rv = toList[0]
claimed.set(rv)
[rv, partials.getOrDefault(rv, 0), 0]
}
synchronized def getDownloaded() {
def rv = []
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
@ -45,21 +73,45 @@ class Pieces {
}
rv
}
synchronized void markDownloaded(int piece) {
done.set(piece)
claimed.set(piece)
partials.remove(piece)
}
synchronized void markPartial(int piece, int position) {
partials.put(piece, position)
}
synchronized void unclaim(int piece) {
claimed.clear(piece)
}
synchronized boolean isComplete() {
done.cardinality() == nPieces
}
synchronized int donePieces() {
done.cardinality()
}
synchronized boolean isDownloaded(int piece) {
done.get(piece)
}
synchronized void clearAll() {
done.clear()
claimed.clear()
partials.clear()
}
synchronized void write(PrintWriter writer) {
for (int i = done.nextSetBit(0); i >= 0; i = done.nextSetBit(i+1)) {
writer.println(i)
}
partials.each { piece, position ->
writer.println("$piece,$position")
}
}
}

View File

@ -0,0 +1,10 @@
package com.muwire.core.download
import com.muwire.core.Event
import com.muwire.core.InfoHash
import com.muwire.core.Persona
class SourceDiscoveredEvent extends Event {
InfoHash infoHash
Persona source
}

View File

@ -6,8 +6,9 @@ import com.muwire.core.search.UIResultEvent
import net.i2p.data.Destination
class UIDownloadEvent extends Event {
UIResultEvent[] result
Set<Destination> sources
File target
boolean sequential
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadPausedEvent extends Event {
}

View File

@ -0,0 +1,6 @@
package com.muwire.core.download
import com.muwire.core.Event
class UIDownloadResumedEvent extends Event {
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.files
import com.muwire.core.Event
class DirectoryUnsharedEvent extends Event {
File directory
}

View File

@ -20,9 +20,9 @@ import net.i2p.util.SystemVersion
@Log
class DirectoryWatcher {
private static final long WAIT_TIME = 1000
private static final WatchEvent.Kind[] kinds
static {
if (SystemVersion.isMac())
@ -30,14 +30,15 @@ class DirectoryWatcher {
else
kinds = [ENTRY_CREATE, ENTRY_MODIFY, ENTRY_DELETE]
}
private final EventBus eventBus
private final FileManager fileManager
private final Thread watcherThread, publisherThread
private final Map<File, Long> waitingFiles = new ConcurrentHashMap<>()
private final Map<File, WatchKey> watchedDirectories = new ConcurrentHashMap<>()
private WatchService watchService
private volatile boolean shutdown
DirectoryWatcher(EventBus eventBus, FileManager fileManager) {
this.eventBus = eventBus
this.fileManager = fileManager
@ -46,26 +47,32 @@ class DirectoryWatcher {
this.publisherThread = new Thread({publish()} as Runnable, "watched-files-publisher")
publisherThread.setDaemon(true)
}
void start() {
void onAllFilesLoadedEvent(AllFilesLoadedEvent e) {
watchService = FileSystems.getDefault().newWatchService()
watcherThread.start()
publisherThread.start()
}
void stop() {
shutdown = true
watcherThread.interrupt()
publisherThread.interrupt()
watchService.close()
watcherThread?.interrupt()
publisherThread?.interrupt()
watchService?.close()
}
void onFileSharedEvent(FileSharedEvent e) {
if (!e.file.isDirectory())
return
Path path = e.file.getCanonicalFile().toPath()
path.register(watchService, kinds)
WatchKey wk = path.register(watchService, kinds)
watchedDirectories.put(e.file, wk)
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
WatchKey wk = watchedDirectories.remove(e.directory)
wk?.cancel()
}
private void watch() {
@ -86,7 +93,7 @@ class DirectoryWatcher {
throw e
}
}
private void processCreated(Path parent, Path path) {
File f= join(parent, path)
@ -96,13 +103,13 @@ class DirectoryWatcher {
else
waitingFiles.put(f, System.currentTimeMillis())
}
private void processModified(Path parent, Path path) {
File f = join(parent, path)
log.fine("modified entry $f")
waitingFiles.put(f, System.currentTimeMillis())
}
private void processDeleted(Path parent, Path path) {
File f = join(parent, path)
log.fine("deleted entry $f")
@ -110,12 +117,12 @@ class DirectoryWatcher {
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
private static File join(Path parent, Path path) {
File parentFile = parent.toFile().getCanonicalFile()
new File(parentFile, path.toFile().getName())
new File(parentFile, path.toFile().getName()).getCanonicalFile()
}
private void publish() {
try {
while(!shutdown) {

View File

@ -8,5 +8,5 @@ import net.i2p.data.Destination
class FileDownloadedEvent extends Event {
Downloader downloader
DownloadedFile downloadedFile
DownloadedFile downloadedFile
}

View File

@ -5,6 +5,12 @@ import com.muwire.core.SharedFile
class FileHashedEvent extends Event {
SharedFile sharedFile
String error
SharedFile sharedFile
String error
@Override
public String toString() {
super.toString() + " sharedFile " + sharedFile?.file.getAbsolutePath() + " error: $error"
}
}

View File

@ -1,6 +1,7 @@
package com.muwire.core.files
import com.muwire.core.InfoHash
import com.muwire.core.util.DataUtil
import net.i2p.data.Base64
@ -12,72 +13,75 @@ import java.security.NoSuchAlgorithmException
class FileHasher {
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 30)
return 17
for (int i = 31; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-13
}
}
throw new IllegalArgumentException("File too large $size")
}
final MessageDigest digest
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
digest.update buf
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * ((long)size)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
/** max size of shared file is 128 GB */
public static final long MAX_SIZE = 0x1L << 37
/**
* @param size of the file to be shared
* @return the size of each piece in power of 2
* piece size is minimum 128 KBytees and maximum 16 MBytes in power of 2 steps (2^17 - 2^24)
* there can be up to 8192 pieces maximum per file
*/
static int getPieceSize(long size) {
if (size <= 0x1 << 30)
return 17
for (int i = 31; i <= 37; i++) {
if (size <= 0x1L << i) {
return i-13
}
}
throw new IllegalArgumentException("File too large $size")
}
final MessageDigest digest
FileHasher() {
try {
digest = MessageDigest.getInstance("SHA-256")
} catch (NoSuchAlgorithmException impossible) {
digest = null
System.exit(1)
}
}
InfoHash hashFile(File file) {
final long length = file.length()
final int size = 0x1 << getPieceSize(length)
int numPieces = (int) (length / size)
if (numPieces * size < length)
numPieces++
def output = new ByteArrayOutputStream()
RandomAccessFile raf = new RandomAccessFile(file, "r")
try {
MappedByteBuffer buf
for (int i = 0; i < numPieces - 1; i++) {
buf = raf.getChannel().map(MapMode.READ_ONLY, ((long)size) * i, size)
digest.update buf
DataUtil.tryUnmap(buf)
output.write(digest.digest(), 0, 32)
}
def lastPieceLength = length - (numPieces - 1) * ((long)size)
buf = raf.getChannel().map(MapMode.READ_ONLY, length - lastPieceLength, lastPieceLength)
digest.update buf
output.write(digest.digest(), 0, 32)
} finally {
raf.close()
}
byte [] hashList = output.toByteArray()
InfoHash.fromHashList(hashList)
}
public static void main(String[] args) {
if (args.length != 1) {
println "This utility computes an infohash of a file"
println "Pass absolute path to a file as an argument"
System.exit(1)
}
def file = new File(args[0])
file = file.getAbsoluteFile()
def hasher = new FileHasher()

View File

@ -0,0 +1,15 @@
package com.muwire.core.files
import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileHashingEvent extends Event {
File hashingFile
@Override
public String toString() {
super.toString() + " hashingFile " + hashingFile.getAbsolutePath()
}
}

View File

@ -5,5 +5,5 @@ import com.muwire.core.SharedFile
class FileLoadedEvent extends Event {
SharedFile loadedFile
SharedFile loadedFile
}

View File

@ -15,124 +15,136 @@ import groovy.util.logging.Log
class FileManager {
final EventBus eventBus
final EventBus eventBus
final MuWireSettings settings
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus, MuWireSettings settings) {
final Map<InfoHash, Set<SharedFile>> rootToFiles = Collections.synchronizedMap(new HashMap<>())
final Map<File, SharedFile> fileToSharedFile = Collections.synchronizedMap(new HashMap<>())
final Map<String, Set<File>> nameToFiles = new HashMap<>()
final SearchIndex index = new SearchIndex()
FileManager(EventBus eventBus, MuWireSettings settings) {
this.settings = settings
this.eventBus = eventBus
}
this.eventBus = eventBus
}
void onFileHashedEvent(FileHashedEvent e) {
if (e.sharedFile != null)
addToIndex(e.sharedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileLoadedEvent(FileLoadedEvent e) {
addToIndex(e.loadedFile)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (settings.shareDownloadedFiles) {
addToIndex(e.downloadedFile)
}
}
private void addToIndex(SharedFile sf) {
private void addToIndex(SharedFile sf) {
log.info("Adding shared file " + sf.getFile())
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing == null) {
log.info("adding new root")
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
index.add(name)
}
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
fileToSharedFile.remove(sf.file)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
index.remove(name)
}
Map<File, SharedFile> getSharedFiles() {
existing = new HashSet<>()
rootToFiles.put(infoHash, existing);
}
existing.add(sf)
fileToSharedFile.put(sf.file, sf)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles == null) {
existingFiles = new HashSet<>()
nameToFiles.put(name, existingFiles)
}
existingFiles.add(sf.getFile())
index.add(name)
}
void onFileUnsharedEvent(FileUnsharedEvent e) {
SharedFile sf = e.unsharedFile
InfoHash infoHash = sf.getInfoHash()
Set<SharedFile> existing = rootToFiles.get(infoHash)
if (existing != null) {
existing.remove(sf)
if (existing.isEmpty()) {
rootToFiles.remove(infoHash)
}
}
fileToSharedFile.remove(sf.file)
String name = sf.getFile().getName()
Set<File> existingFiles = nameToFiles.get(name)
if (existingFiles != null) {
existingFiles.remove(sf.file)
if (existingFiles.isEmpty()) {
nameToFiles.remove(name)
}
}
index.remove(name)
}
Map<File, SharedFile> getSharedFiles() {
synchronized(fileToSharedFile) {
return new HashMap<>(fileToSharedFile)
}
}
}
Set<SharedFile> getSharedFiles(byte []root) {
return rootToFiles.get(new InfoHash(root))
}
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
void onSearchEvent(SearchEvent e) {
// hash takes precedence
ResultsEvent re = null
if (e.searchHash != null) {
Set<SharedFile> found
found = rootToFiles.get new InfoHash(e.searchHash)
found = filter(found, e.oobInfohash)
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each { files.addAll nameToFiles.getOrDefault(it, []) }
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
if (found != null && !found.isEmpty())
re = new ResultsEvent(results: found.asList(), uuid: e.uuid, searchEvent: e)
} else {
def names = index.search e.searchTerms
Set<File> files = new HashSet<>()
names.each { files.addAll nameToFiles.getOrDefault(it, []) }
Set<SharedFile> sharedFiles = new HashSet<>()
files.each { sharedFiles.add fileToSharedFile[it] }
files = filter(sharedFiles, e.oobInfohash)
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
}
if (re != null)
eventBus.publish(re)
}
if (!sharedFiles.isEmpty())
re = new ResultsEvent(results: sharedFiles.asList(), uuid: e.uuid, searchEvent: e)
}
if (re != null)
eventBus.publish(re)
}
private static Set<SharedFile> filter(Set<SharedFile> files, boolean oob) {
if (!oob)
return files
Set<SharedFile> rv = new HashSet<>()
files.each {
files.each {
if (it.getPieceSize() != 0)
rv.add(it)
}
rv
}
void onDirectoryUnsharedEvent(DirectoryUnsharedEvent e) {
e.directory.listFiles().each {
if (it.isDirectory())
eventBus.publish(new DirectoryUnsharedEvent(directory : it))
else {
SharedFile sf = fileToSharedFile.get(it)
if (sf != null)
eventBus.publish(new FileUnsharedEvent(unsharedFile : sf))
}
}
}
}

View File

@ -4,5 +4,10 @@ import com.muwire.core.Event
class FileSharedEvent extends Event {
File file
File file
@Override
public String toString() {
return super.toString() + " file: "+file.getAbsolutePath()
}
}

View File

@ -4,5 +4,5 @@ import com.muwire.core.Event
import com.muwire.core.SharedFile
class FileUnsharedEvent extends Event {
SharedFile unsharedFile
SharedFile unsharedFile
}

View File

@ -8,40 +8,41 @@ import com.muwire.core.SharedFile
class HasherService {
final FileHasher hasher
final EventBus eventBus
final FileHasher hasher
final EventBus eventBus
final FileManager fileManager
Executor executor
HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager) {
this.hasher = hasher
this.eventBus = eventBus
Executor executor
HasherService(FileHasher hasher, EventBus eventBus, FileManager fileManager) {
this.hasher = hasher
this.eventBus = eventBus
this.fileManager = fileManager
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void onFileSharedEvent(FileSharedEvent evt) {
if (fileManager.fileToSharedFile.containsKey(evt.file))
}
void start() {
executor = Executors.newSingleThreadExecutor()
}
void onFileSharedEvent(FileSharedEvent evt) {
if (fileManager.fileToSharedFile.containsKey(evt.file.getCanonicalFile()))
return
executor.execute( { -> process(evt.file) } as Runnable)
}
private void process(File f) {
f = f.getCanonicalFile()
if (f.isDirectory()) {
f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
executor.execute( { -> process(evt.file) } as Runnable)
}
private void process(File f) {
f = f.getCanonicalFile()
if (f.isDirectory()) {
f.listFiles().each {eventBus.publish new FileSharedEvent(file: it) }
} else {
if (f.length() == 0) {
eventBus.publish new FileHashedEvent(error: "Not sharing empty file $f")
} else if (f.length() > FileHasher.MAX_SIZE) {
eventBus.publish new FileHashedEvent(error: "$f is too large to be shared ${f.length()}")
} else {
eventBus.publish new FileHashingEvent(hashingFile: f)
def hash = hasher.hashFile f
eventBus.publish new FileHashedEvent(sharedFile: new SharedFile(f, hash, FileHasher.getPieceSize(f.length())))
}
}
}
}

View File

@ -23,135 +23,136 @@ import net.i2p.data.Destination
@Log
class PersisterService extends Service {
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister", true)
}
void stop() {
timer.cancel()
}
final File location
final EventBus listener
final int interval
final Timer timer
final FileManager fileManager
PersisterService(File location, EventBus listener, int interval, FileManager fileManager) {
this.location = location
this.listener = listener
this.interval = interval
this.fileManager = fileManager
timer = new Timer("file persister", true)
}
void stop() {
timer.cancel()
}
void onUILoadedEvent(UILoadedEvent e) {
timer.schedule({load()} as TimerTask, 1)
}
void load() {
if (location.exists() && location.isFile()) {
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
void load() {
Thread.currentThread().setPriority(Thread.MIN_PRIORITY)
if (location.exists() && location.isFile()) {
int loaded = 0
def slurper = new JsonSlurper()
try {
location.eachLine {
if (it.trim().length() > 0) {
def parsed = slurper.parseText it
def event = fromJson parsed
if (event != null) {
log.fine("loaded file $event.loadedFile.file")
listener.publish event
}
}
}
listener.publish event
loaded++
if (loaded % 10 == 0)
Thread.sleep(20)
}
}
}
listener.publish(new AllFilesLoadedEvent())
} catch (IllegalArgumentException|NumberFormatException e) {
} catch (IllegalArgumentException|NumberFormatException e) {
log.log(Level.WARNING, "couldn't load files",e)
}
} else {
}
} else {
listener.publish(new AllFilesLoadedEvent())
}
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
timer.schedule({persistFiles()} as TimerTask, 0, interval)
loaded = true
}
private static FileLoadedEvent fromJson(def json) {
if (json.file == null || json.length == null || json.infoHash == null || json.hashList == null)
throw new IllegalArgumentException()
if (!(json.hashList instanceof List))
throw new IllegalArgumentException()
def file = new File(DataUtil.readi18nString(Base64.decode(json.file)))
file = file.getCanonicalFile()
if (!file.exists() || file.isDirectory())
return null
long length = Long.valueOf(json.length)
if (length != file.length())
return null
List hashList = (List) json.hashList
ByteArrayOutputStream baos = new ByteArrayOutputStream()
hashList.each {
byte [] hash = Base64.decode it.toString()
if (hash == null)
throw new IllegalArgumentException()
baos.write hash
}
byte[] hashListBytes = baos.toByteArray()
InfoHash ih = InfoHash.fromHashList(hashListBytes)
byte [] root = Base64.decode(json.infoHash.toString())
if (root == null)
throw new IllegalArgumentException()
if (!Arrays.equals(root, ih.getRoot()))
return null
int pieceSize = 0
if (json.pieceSize != null)
pieceSize = json.pieceSize
if (json.sources != null) {
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
return new FileLoadedEvent(loadedFile : df)
}
SharedFile sf = new SharedFile(file, ih, pieceSize)
return new FileLoadedEvent(loadedFile: sf)
}
private void persistFiles() {
def sharedFiles = fileManager.getSharedFiles()
List sources = (List)json.sources
Set<Destination> sourceSet = sources.stream().map({d -> new Destination(d.toString())}).collect Collectors.toSet()
DownloadedFile df = new DownloadedFile(file, ih, pieceSize, sourceSet)
return new FileLoadedEvent(loadedFile : df)
}
SharedFile sf = new SharedFile(file, ih, pieceSize)
return new FileLoadedEvent(loadedFile: sf)
}
private void persistFiles() {
def sharedFiles = fileManager.getSharedFiles()
File tmp = File.createTempFile("muwire-files", "tmp")
tmp.deleteOnExit()
tmp.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
tmp.withPrintWriter { writer ->
sharedFiles.each { k, v ->
def json = toJson(k,v)
json = JsonOutput.toJson(json)
writer.println json
}
}
Files.copy(tmp.toPath(), location.toPath(), StandardCopyOption.REPLACE_EXISTING)
tmp.delete()
}
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = Base64.encode DataUtil.encodei18nString(f.getCanonicalFile().toString())
json.length = f.length()
InfoHash ih = sf.getInfoHash()
json.infoHash = Base64.encode ih.getRoot()
}
private def toJson(File f, SharedFile sf) {
def json = [:]
json.file = sf.getB64EncodedFileName()
json.length = sf.getCachedLength()
InfoHash ih = sf.getInfoHash()
json.infoHash = sf.getB64EncodedHashRoot()
json.pieceSize = sf.getPieceSize()
byte [] tmp = new byte [32]
json.hashList = []
for (int i = 0;i < ih.getHashList().length / 32; i++) {
System.arraycopy(ih.getHashList(), i * 32, tmp, 0, 32)
json.hashList.add Base64.encode(tmp)
}
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
json.hashList = sf.getB64EncodedHashList()
if (sf instanceof DownloadedFile) {
json.sources = sf.sources.stream().map( {d -> d.toBase64()}).collect(Collectors.toList())
}
json
}
}

View File

@ -17,177 +17,177 @@ import net.i2p.data.Destination
@Log
class CacheClient {
private static final int CRAWLER_RETURN = 10
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
void stop() {
timer.cancel()
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
private static final int CRAWLER_RETURN = 10
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
final EventBus eventBus
final HostCache cache
final ConnectionManager manager
final I2PSession session
final long interval
final MuWireSettings settings
final Timer timer
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
public CacheClient(EventBus eventBus, HostCache cache,
ConnectionManager manager, I2PSession session,
MuWireSettings settings, long interval) {
this.eventBus = eventBus
this.cache = cache
this.manager = manager
this.session = session
this.settings = settings
this.interval = interval
this.timer = new Timer("hostcache-client",true)
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 0)
timer.schedule({queryIfNeeded()} as TimerTask, 1, interval)
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
void stop() {
timer.cancel()
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
private void queryIfNeeded() {
if (!manager.getConnections().isEmpty())
return
if (!cache.getHosts(1).isEmpty())
return
log.info "Will query hostcaches"
def ping = [type: "Ping", version: 1, leaf: settings.isLeaf()]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
CacheServers.getCacheServers().each {
log.info "Querying hostcache ${it.toBase32()}"
session.sendMessage(it, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 1, 0, options)
}
}
class Listener implements I2PSessionMuxedListener {
private final JsonSlurper slurper = new JsonSlurper()
@Override
public void messageAvailable(I2PSession session, int msgId, long size) {
}
@Override
public void messageAvailable(I2PSession session, int msgId, long size, int proto, int fromport, int toport) {
if (proto != I2PSession.PROTO_DATAGRAM) {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
dissector.loadI2PDatagram(payload)
def sender = dissector.getSender()
log.info("Received something from ${sender.toBase32()}")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.type == null) {
log.warning("type missing")
return
}
switch(payload.type) {
case "Pong" : handlePong(sender, payload); break
case "CrawlerPing": handleCrawlerPing(session, sender, payload); break
default : log.warning("unknown type ${payload.type}")
}
} catch (Exception e) {
log.warning("Invalid datagram $e")
}
}
@Override
public void reportAbuse(I2PSession session, int severity) {
}
@Override
public void disconnected(I2PSession session) {
log.severe "I2P session disconnected"
}
@Override
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.severe "I2P error occured $message $error"
}
}
private void handlePong(Destination from, def pong) {
if (!CacheServers.isRegistered(from)) {
log.warning("received pong from non-registered destination")
return
}
if (pong.pongs == null) {
log.warning("malformed pong - no pongs")
return
}
pong.pongs.asList().each {
Destination dest = new Destination(it)
if (!session.getMyDestination().equals(dest))
eventBus.publish(new HostDiscoveredEvent(destination: dest, fromHostcache : true))
}
}
private void handleCrawlerPing(I2PSession session, Destination from, def ping) {
if (settings.isLeaf()) {
log.warning("Received crawler ping but I'm a leaf")
return
}
switch(settings.getCrawlerResponse()) {
case CrawlerResponse.NONE:
log.info("Responding to crawlers is disabled by user")
break
case CrawlerResponse.ALL:
respondToCrawler(session, from, ping)
break;
case CrawlerResponse.REGISTERED:
if (CacheServers.isRegistered(from))
respondToCrawler(session, from, ping)
else
log.warning("Ignoring crawler ping from non-registered crawler")
break
}
}
private void respondToCrawler(I2PSession session, Destination from, def ping) {
log.info "responding to crawler ping"
def neighbors = manager.getConnections().collect { c -> c.endpoint.destination.toBase64() }
Collections.shuffle(neighbors)
if (neighbors.size() > CRAWLER_RETURN)
neighbors = neighbors[0..CRAWLER_RETURN - 1]
def upManager = (UltrapeerConnectionManager) manager;
def pong = [:]
pong.peers = neighbors
pong.uuid = ping.uuid
pong.type = "CrawlerPong"
pong.version = 1
pong.leafSlots = upManager.hasLeafSlots()
pong.peerSlots = upManager.hasPeerSlots()
pong = JsonOutput.toJson(pong)
def maker = new I2PDatagramMaker(session)
pong = maker.makeI2PDatagram(pong.bytes)
session.sendMessage(from, pong, I2PSession.PROTO_DATAGRAM, 0, 0)
}
}

View File

@ -4,20 +4,25 @@ import net.i2p.data.Destination
class CacheServers {
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA")
]
private static final int TO_GIVE = 3
private static Set<Destination> CACHES = [
// zlatinb
new Destination("Wddh2E6FyyXBF7SvUYHKdN-vjf3~N6uqQWNeBDTM0P33YjiQCOsyedrjmDZmWFrXUJfJLWnCb5bnKezfk4uDaMyj~uvDG~yvLVcFgcPWSUd7BfGgym-zqcG1q1DcM8vfun-US7YamBlmtC6MZ2j-~Igqzmgshita8aLPCfNAA6S6e2UMjjtG7QIXlxpMec75dkHdJlVWbzrk9z8Qgru3YIk0UztYgEwDNBbm9wInsbHhr3HtAfa02QcgRVqRN2PnQXuqUJs7R7~09FZPEviiIcUpkY3FeyLlX1sgQFBeGeA96blaPvZNGd6KnNdgfLgMebx5SSxC-N4KZMSMBz5cgonQF3~m2HHFRSI85zqZNG5X9bJN85t80ltiv1W1es8ZnQW4es11r7MrvJNXz5bmSH641yJIvS6qI8OJJNpFVBIQSXLD-96TayrLQPaYw~uNZ-eXaE6G5dYhiuN8xHsFI1QkdaUaVZnvDGfsRbpS5GtpUbBDbyLkdPurG0i7dN1wAAAA"),
// sNL
new Destination("JC63wJNOqSJmymkj4~UJWywBTvDGikKMoYP0HX2Wz9c5l3otXSkwnxWAFL4cKr~Ygh3BNNi2t93vuLIiI1W8AsE42kR~PwRx~Y-WvIHXR6KUejRmOp-n8WidtjKg9k4aDy428uSOedqXDxys5mpoeQXwDsv1CoPTTwnmb1GWFy~oTGIsCguCl~aJWGnqiKarPO3GJQ~ev-NbvAQzUfC3HeP1e6pdI5CGGjExahTCID5UjpJw8GaDXWlGmYWWH303Xu4x-vAHQy1dJLsOBCn8dZravsn5BKJk~j0POUon45CCx-~NYtaPe0Itt9cMdD2ciC76Rep1D0X0sm1SjlSs8sZ52KmF3oaLZ6OzgI9QLMIyBUrfi41sK5I0qTuUVBAkvW1xr~L-20dYJ9TrbOaOb2-vDIfKaxVi6xQOuhgQDiSBhd3qv2m0xGu-BM9DQYfNA0FdMjnZmqjmji9RMavzQSsVFIbQGLbrLepiEFlb7TseCK5UtRp8TxnG7L4gbYevBQAEAAcAAA=="),
// dark_trion
new Destination("Gec9L29FVcQvYDgpcYuEYdltJn06PPoOWAcAM8Af-gDm~ehlrJcwlLXXs0hidq~yP2A0X7QcDi6i6shAfuEofTchxGJl8LRNqj9lio7WnB7cIixXWL~uCkD7Np5LMX0~akNX34oOb9RcBYVT2U5rFGJmJ7OtBv~IBkGeLhsMrqaCjahd0jdBO~QJ-t82ZKZhh044d24~JEfF9zSJxdBoCdAcXzryGNy7sYtFVDFsPKJudAxSW-UsSQiGw2~k-TxyF0r-iAt1IdzfNu8Lu0WPqLdhDYJWcPldx2PR5uJorI~zo~z3I5RX3NwzarlbD4nEP5s65ahPSfVCEkzmaJUBgP8DvBqlFaX89K4nGRYc7jkEjJ8cX4L6YPXUpTPWcfKkW259WdQY3YFh6x7rzijrGZewpczOLCrt-bZRYgDrUibmZxKZmNhy~lQu4gYVVjkz1i4tL~DWlhIc4y0x2vItwkYLArPPi~ejTnt-~Lhb7oPMXRcWa3UrwGKpFvGZY4NXBQAEAAcAAA==")
]
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
static List<Destination> getCacheServers() {
List<Destination> allCaches = new ArrayList<>(CACHES)
Collections.shuffle(allCaches)
if (allCaches.size() <= TO_GIVE)
return allCaches
allCaches[0..TO_GIVE-1]
}
static boolean isRegistered(Destination d) {
return CACHES.contains(d)
}
}

View File

@ -4,42 +4,67 @@ import net.i2p.data.Destination
class Host {
private static final int MAX_FAILURES = 3
private static final int CLEAR_INTERVAL = 60 * 60 * 1000
final Destination destination
int failures,successes
long lastAttempt
public Host(Destination destination) {
this.destination = destination
}
private static final int MAX_FAILURES = 3
synchronized void onConnect() {
failures = 0
successes++
lastAttempt = System.currentTimeMillis()
}
synchronized void onFailure() {
failures++
successes = 0
lastAttempt = System.currentTimeMillis()
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
final Destination destination
private final int clearInterval, hopelessInterval, rejectionInterval
int failures,successes
long lastAttempt
long lastSuccessfulAttempt
long lastRejection
public Host(Destination destination, int clearInterval, int hopelessInterval, int rejectionInterval) {
this.destination = destination
this.clearInterval = clearInterval
this.hopelessInterval = hopelessInterval
this.rejectionInterval = rejectionInterval
}
private void connectSuccessful() {
failures = 0
successes++
lastAttempt = System.currentTimeMillis()
}
synchronized void onConnect() {
connectSuccessful()
lastSuccessfulAttempt = lastAttempt
}
synchronized void onReject() {
connectSuccessful()
lastRejection = lastAttempt;
}
synchronized void onFailure() {
failures++
successes = 0
lastAttempt = System.currentTimeMillis()
}
synchronized boolean isFailed() {
failures >= MAX_FAILURES
}
synchronized boolean hasSucceeded() {
successes > 0
}
synchronized void clearFailures() {
failures = 0
}
synchronized boolean canTryAgain() {
lastSuccessfulAttempt > 0 &&
System.currentTimeMillis() - lastAttempt > (clearInterval * 60 * 1000)
}
synchronized void canTryAgain() {
System.currentTimeMillis() - lastAttempt > CLEAR_INTERVAL
synchronized boolean isHopeless() {
isFailed() &&
System.currentTimeMillis() - lastSuccessfulAttempt > (hopelessInterval * 60 * 1000)
}
synchronized boolean isRecentlyRejected() {
System.currentTimeMillis() - lastRejection < (rejectionInterval * 60 * 1000)
}
}

View File

@ -15,141 +15,151 @@ import net.i2p.data.Destination
class HostCache extends Service {
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
HostCache(){}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
final TrustService trustService
final File storage
final int interval
final Timer timer
final MuWireSettings settings
final Destination myself
final Map<Destination, Host> hosts = new ConcurrentHashMap<>()
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination)) {
HostCache(){}
public HostCache(TrustService trustService, File storage, int interval,
MuWireSettings settings, Destination myself) {
this.trustService = trustService
this.storage = storage
this.interval = interval
this.settings = settings
this.myself = myself
this.timer = new Timer("host-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void onHostDiscoveredEvent(HostDiscoveredEvent e) {
if (myself == e.destination)
return
if (hosts.containsKey(e.destination)) {
if (!e.fromHostcache)
return
hosts.get(e.destination).clearFailures()
return
}
Host host = new Host(e.destination)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest)
hosts.put(dest, host)
}
}
Host host = new Host(e.destination, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
if (allowHost(host)) {
hosts.put(e.destination, host)
}
}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
case ConnectionAttemptStatus.REJECTED:
host.onConnect()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
void onConnectionEvent(ConnectionEvent e) {
if (e.leaf)
return
Destination dest = e.endpoint.destination
Host host = hosts.get(dest)
if (host == null) {
host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
hosts.put(dest, host)
}
switch(e.status) {
case ConnectionAttemptStatus.SUCCESSFUL:
host.onConnect()
break
case ConnectionAttemptStatus.REJECTED:
host.onReject()
break
case ConnectionAttemptStatus.FAILED:
host.onFailure()
break
}
}
List<Destination> getHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {allowHost(hosts[it])}
rv.removeAll {
def h = hosts[it];
(h.isFailed() && !h.canTryAgain()) || h.isRecentlyRejected()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
List<Destination> getGoodHosts(int n) {
List<Destination> rv = new ArrayList<>(hosts.keySet())
rv.retainAll {
Host host = hosts[it]
allowHost(host) && host.hasSucceeded()
}
if (rv.size() <= n)
return rv
Collections.shuffle(rv)
rv[0..n-1]
}
void load() {
if (storage.exists()) {
JsonSlurper slurper = new JsonSlurper()
storage.eachLine {
def entry = slurper.parseText(it)
Destination dest = new Destination(entry.destination)
Host host = new Host(dest, settings.hostClearInterval, settings.hostHopelessInterval, settings.hostRejectInterval)
host.failures = Integer.valueOf(String.valueOf(entry.failures))
host.successes = Integer.valueOf(String.valueOf(entry.successes))
if (entry.lastAttempt != null)
host.lastAttempt = entry.lastAttempt
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.isFailed() && !host.canTryAgain())
return false
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host)) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
if (entry.lastSuccessfulAttempt != null)
host.lastSuccessfulAttempt = entry.lastSuccessfulAttempt
if (entry.lastRejection != null)
host.lastRejection = entry.lastRejection
if (allowHost(host))
hosts.put(dest, host)
}
}
timer.schedule({save()} as TimerTask, interval, interval)
loaded = true
}
private boolean allowHost(Host host) {
if (host.destination == myself)
return false
TrustLevel trust = trustService.getLevel(host.destination)
switch(trust) {
case TrustLevel.DISTRUSTED :
return false
case TrustLevel.TRUSTED :
return true
case TrustLevel.NEUTRAL :
return settings.allowUntrusted()
}
false
}
private void save() {
storage.delete()
storage.withPrintWriter { writer ->
hosts.each { dest, host ->
if (allowHost(host) && !host.isHopeless()) {
def map = [:]
map.destination = dest.toBase64()
map.failures = host.failures
map.successes = host.successes
map.lastAttempt = host.lastAttempt
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
map.lastSuccessfulAttempt = host.lastSuccessfulAttempt
map.lastRejection = host.lastRejection
def json = JsonOutput.toJson(map)
writer.println json
}
}
}
}
}

View File

@ -6,11 +6,11 @@ import net.i2p.data.Destination
class HostDiscoveredEvent extends Event {
Destination destination
Destination destination
boolean fromHostcache
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
@Override
public String toString() {
"HostDiscoveredEvent ${super.toString()} destination:${destination.toBase32()} from hostcache $fromHostcache"
}
}

View File

@ -0,0 +1,28 @@
package com.muwire.core.mesh
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import net.i2p.data.Destination
import net.i2p.util.ConcurrentHashSet
class Mesh {
private final InfoHash infoHash
private final Set<Persona> sources = new ConcurrentHashSet<>()
private final Pieces pieces
Mesh(InfoHash infoHash, Pieces pieces) {
this.infoHash = infoHash
this.pieces = pieces
}
Set<Persona> getRandom(int n, Persona exclude) {
List<Persona> tmp = new ArrayList<>(sources)
tmp.remove(exclude)
Collections.shuffle(tmp)
if (tmp.size() < n)
return tmp
tmp[0..n-1]
}
}

View File

@ -0,0 +1,103 @@
package com.muwire.core.mesh
import java.util.stream.Collectors
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.Pieces
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.files.FileManager
import com.muwire.core.util.DataUtil
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
import net.i2p.data.Base64
class MeshManager {
private final Map<InfoHash, Mesh> meshes = Collections.synchronizedMap(new HashMap<>())
private final FileManager fileManager
private final File home
private final MuWireSettings settings
MeshManager(FileManager fileManager, File home, MuWireSettings settings) {
this.fileManager = fileManager
this.home = home
this.settings = settings
load()
}
Mesh get(InfoHash infoHash) {
meshes.get(infoHash)
}
Mesh getOrCreate(InfoHash infoHash, int nPieces, boolean sequential) {
synchronized(meshes) {
if (meshes.containsKey(infoHash))
return meshes.get(infoHash)
float ratio = sequential ? 0f : settings.downloadSequentialRatio
Pieces pieces = new Pieces(nPieces, ratio)
if (fileManager.rootToFiles.containsKey(infoHash)) {
for (int i = 0; i < nPieces; i++)
pieces.markDownloaded(i)
}
Mesh rv = new Mesh(infoHash, pieces)
meshes.put(infoHash, rv)
return rv
}
}
void onSourceDiscoveredEvent(SourceDiscoveredEvent e) {
Mesh mesh = meshes.get(e.infoHash)
if (mesh == null)
return
mesh.sources.add(e.source)
save()
}
private void save() {
File meshFile = new File(home, "mesh.json")
synchronized(meshes) {
meshFile.withPrintWriter { writer ->
meshes.values().each { mesh ->
def json = [:]
json.timestamp = System.currentTimeMillis()
json.infoHash = Base64.encode(mesh.infoHash.getRoot())
json.sources = mesh.sources.stream().map({it.toBase64()}).collect(Collectors.toList())
json.nPieces = mesh.pieces.nPieces
json.xHave = DataUtil.encodeXHave(mesh.pieces.downloaded, mesh.pieces.nPieces)
writer.println(JsonOutput.toJson(json))
}
}
}
}
private void load() {
File meshFile = new File(home, "mesh.json")
if (!meshFile.exists())
return
long now = System.currentTimeMillis()
JsonSlurper slurper = new JsonSlurper()
meshFile.eachLine {
def json = slurper.parseText(it)
if (now - json.timestamp > settings.meshExpiration * 60 * 1000)
return
InfoHash infoHash = new InfoHash(Base64.decode(json.infoHash))
Pieces pieces = new Pieces(json.nPieces, settings.downloadSequentialRatio)
Mesh mesh = new Mesh(infoHash, pieces)
json.sources.each { source ->
Persona persona = new Persona(new ByteArrayInputStream(Base64.decode(source)))
mesh.sources.add(persona)
}
if (json.xHave != null)
DataUtil.decodeXHave(json.xHave).each { pieces.markDownloaded(it) }
if (!mesh.sources.isEmpty())
meshes.put(infoHash, mesh)
}
}
}

View File

@ -6,11 +6,11 @@ import net.i2p.data.Base32
import net.i2p.data.Destination
class DeleteEvent extends Event {
byte [] infoHash
Destination leaf
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
byte [] infoHash
Destination leaf
@Override
public String toString() {
"DeleteEvent ${super.toString()} infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@ -21,5 +21,5 @@ class InvalidSearchResultException extends Exception {
super(cause);
// TODO Auto-generated constructor stub
}
}

View File

@ -6,33 +6,33 @@ import com.muwire.core.connection.UltrapeerConnectionManager
import net.i2p.data.Destination
class LeafSearcher {
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
final UltrapeerConnectionManager connectionManager
final SearchIndex searchIndex = new SearchIndex()
final Map<String, Set<byte[]>> fileNameToHashes = new HashMap<>()
final Map<byte[], Set<Destination>> hashToLeafs = new HashMap<>()
final Map<Destination, Map<byte[], Set<String>>> leafToFiles = new HashMap<>()
LeafSearcher(UltrapeerConnectionManager connectionManager) {
this.connectionManager = connectionManager
}
void onUpsertEvent(UpsertEvent e) {
// TODO: implement
}
void onDeleteEvent(DeleteEvent e) {
// TODO: implement
}
void onDisconnectionEvent(DisconnectionEvent e) {
// TODO: implement
}
void onQueryEvent(QueryEvent e) {
// TODO: implement
}
}

View File

@ -6,12 +6,12 @@ import com.muwire.core.Persona
import net.i2p.data.Destination
class QueryEvent extends Event {
SearchEvent searchEvent
boolean firstHop
Destination replyTo
boolean firstHop
Destination replyTo
Persona originator
Destination receivedOn
Destination receivedOn
String toString() {
"searchEvent: $searchEvent firstHop:$firstHop, replyTo:${replyTo.toBase32()}" +

View File

@ -6,6 +6,6 @@ import com.muwire.core.SharedFile
class ResultsEvent extends Event {
SearchEvent searchEvent
SharedFile[] results
UUID uuid
SharedFile[] results
UUID uuid
}

View File

@ -26,7 +26,7 @@ class ResultsParser {
}
}
private static parseV1(Persona p, UUID uuid, def json) {
if (json.name == null)
throw new InvalidSearchResultException("name missing")
@ -55,7 +55,7 @@ class ResultsParser {
InfoHash parsedIH = InfoHash.fromHashList(hashList)
if (parsedIH.getRoot() != infoHash)
throw new InvalidSearchControlsException("infohash root doesn't match")
return new UIResultEvent( sender : p,
name : name,
size : size,
@ -67,7 +67,7 @@ class ResultsParser {
throw new InvalidSearchResultException("parsing search result failed",e)
}
}
private static UIResultEvent parseV2(Persona p, UUID uuid, def json) {
if (json.name == null)
throw new InvalidSearchResultException("name missing")
@ -86,11 +86,11 @@ class ResultsParser {
if (infoHash.length != InfoHash.SIZE)
throw new InvalidSearchResultException("invalid infohash size $infoHash.length")
int pieceSize = json.pieceSize
Set<Destination> sources = Collections.emptySet()
if (json.sources != null)
if (json.sources != null)
sources = json.sources.stream().map({new Destination(it)}).collect(Collectors.toSet())
return new UIResultEvent( sender : p,
name : name,
size : size,

View File

@ -11,6 +11,7 @@ import java.util.concurrent.Executor
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger
import java.util.logging.Level
import java.util.stream.Collectors
import com.muwire.core.DownloadedFile
@ -24,9 +25,9 @@ import net.i2p.data.Destination
@Log
class ResultsSender {
private static final AtomicInteger THREAD_NO = new AtomicInteger()
private final Executor executor = Executors.newCachedThreadPool(
new ThreadFactory() {
@Override
@ -37,27 +38,27 @@ class ResultsSender {
rv
}
})
private final I2PConnector connector
private final Persona me
private final EventBus eventBus
ResultsSender(EventBus eventBus, I2PConnector connector, Persona me) {
this.connector = connector;
this.eventBus = eventBus
this.me = me
}
void sendResults(UUID uuid, SharedFile[] results, Destination target, boolean oobInfohash) {
log.info("Sending $results.length results for uuid $uuid to ${target.toBase32()} oobInfohash : $oobInfohash")
if (target.equals(me.destination)) {
results.each {
results.each {
long length = it.getFile().length()
int pieceSize = it.getPieceSize()
if (pieceSize == 0)
pieceSize = FileHasher.getPieceSize(length)
Set<Destination> suggested = Collections.emptySet()
if (it instanceof DownloadedFile)
if (it instanceof DownloadedFile)
suggested = it.sources
def uiResultEvent = new UIResultEvent( sender : me,
name : it.getFile().getName(),
@ -70,63 +71,67 @@ class ResultsSender {
eventBus.publish(uiResultEvent)
}
} else {
executor.execute(new ResultSendJob(uuid : uuid, results : results,
executor.execute(new ResultSendJob(uuid : uuid, results : results,
target: target, oobInfohash : oobInfohash))
}
}
private class ResultSendJob implements Runnable {
UUID uuid
SharedFile [] results
Destination target
boolean oobInfohash
@Override
public void run() {
byte [] tmp = new byte[InfoHash.SIZE]
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = oobInfohash ? 2 : 1
obj.name = encodedName
obj.infohash = Base64.encode(it.getInfoHash().getRoot())
obj.size = it.getFile().length()
obj.pieceSize = it.getPieceSize()
if (!oobInfohash) {
byte [] hashList = it.getInfoHash().getHashList()
def hashListB64 = []
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) {
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE)
hashListB64 << Base64.encode(tmp)
byte [] tmp = new byte[InfoHash.SIZE]
JsonOutput jsonOutput = new JsonOutput()
Endpoint endpoint = null;
try {
endpoint = connector.connect(target)
DataOutputStream os = new DataOutputStream(endpoint.getOutputStream())
os.write("POST $uuid\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
me.write(os)
os.writeShort((short)results.length)
results.each {
byte [] name = it.getFile().getName().getBytes(StandardCharsets.UTF_8)
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) name.length)
daos.write(name)
daos.flush()
String encodedName = Base64.encode(baos.toByteArray())
def obj = [:]
obj.type = "Result"
obj.version = oobInfohash ? 2 : 1
obj.name = encodedName
obj.infohash = Base64.encode(it.getInfoHash().getRoot())
obj.size = it.getFile().length()
obj.pieceSize = it.getPieceSize()
if (!oobInfohash) {
byte [] hashList = it.getInfoHash().getHashList()
def hashListB64 = []
for (int i = 0; i < hashList.length / InfoHash.SIZE; i++) {
System.arraycopy(hashList, InfoHash.SIZE * i, tmp, 0, InfoHash.SIZE)
hashListB64 << Base64.encode(tmp)
}
obj.hashList = hashListB64
}
obj.hashList = hashListB64
if (it instanceof DownloadedFile)
obj.sources = it.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
}
if (it instanceof DownloadedFile)
obj.sources = it.sources.stream().map({dest -> dest.toBase64()}).collect(Collectors.toSet())
def json = jsonOutput.toJson(obj)
os.writeShort((short)json.length())
os.write(json.getBytes(StandardCharsets.US_ASCII))
os.flush()
} finally {
endpoint?.close()
}
os.flush()
} finally {
endpoint?.close()
} catch (Exception e) {
log.log(Level.WARNING, "problem sending results",e)
}
}
}

View File

@ -5,11 +5,11 @@ import com.muwire.core.InfoHash
class SearchEvent extends Event {
List<String> searchTerms
byte [] searchHash
UUID uuid
List<String> searchTerms
byte [] searchHash
UUID uuid
boolean oobInfohash
String toString() {
def infoHash = null
if (searchHash != null)

View File

@ -1,56 +1,59 @@
package com.muwire.core.search
import com.muwire.core.Constants
import com.muwire.core.SplitPattern
class SearchIndex {
final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
private static String[] split(String source) {
source = source.replaceAll(Constants.SPLIT_PATTERN, " ").toLowerCase()
source.split(" ")
}
String[] search(List<String> terms) {
Set<String> rv = null;
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = new HashSet<>(forWord)
} else {
rv.retainAll(forWord)
}
}
if (rv != null)
return rv.asList()
[]
}
final Map<String, Set<String>> keywords = new HashMap<>()
void add(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get(it)
if (existing == null) {
existing = new HashSet<>()
keywords.put(it, existing)
}
existing.add(string)
}
}
void remove(String string) {
String [] split = split(string)
split.each {
Set<String> existing = keywords.get it
if (existing != null) {
existing.remove(string)
if (existing.isEmpty()) {
keywords.remove(it)
}
}
}
}
private static String[] split(String source) {
source = source.replaceAll(SplitPattern.SPLIT_PATTERN, " ").toLowerCase()
String [] split = source.split(" ")
def rv = []
split.each { if (it.length() > 0) rv << it }
rv.toArray(new String[0])
}
String[] search(List<String> terms) {
Set<String> rv = null;
terms.each {
Set<String> forWord = keywords.getOrDefault(it,[])
if (rv == null) {
rv = new HashSet<>(forWord)
} else {
rv.retainAll(forWord)
}
}
if (rv != null)
return rv.asList()
[]
}
}

View File

@ -8,17 +8,17 @@ import net.i2p.data.Destination
@Log
public class SearchManager {
private static final int EXPIRE_TIME = 60 * 1000 * 1000
private static final int CHECK_INTERVAL = 60 * 1000
private final EventBus eventBus
private final Persona me
private final ResultsSender resultsSender
private final Map<UUID, QueryEvent> responderAddress = Collections.synchronizedMap(new HashMap<>())
SearchManager(){}
SearchManager(EventBus eventBus, Persona me, ResultsSender resultsSender) {
this.eventBus = eventBus
this.me = me
@ -26,7 +26,7 @@ public class SearchManager {
Timer timer = new Timer("query-expirer", true)
timer.schedule({cleanup()} as TimerTask, CHECK_INTERVAL, CHECK_INTERVAL)
}
void onQueryEvent(QueryEvent event) {
if (responderAddress.containsKey(event.searchEvent.uuid)) {
log.info("Dropping duplicate search uuid $event.searchEvent.uuid")
@ -35,7 +35,7 @@ public class SearchManager {
responderAddress.put(event.searchEvent.uuid, event)
eventBus.publish(event.searchEvent)
}
void onResultsEvent(ResultsEvent event) {
Destination target = responderAddress.get(event.uuid)?.replyTo
if (target == null)
@ -46,11 +46,11 @@ public class SearchManager {
}
resultsSender.sendResults(event.uuid, event.results, target, event.searchEvent.oobInfohash)
}
boolean hasLocalSearch(UUID uuid) {
me.destination.equals(responderAddress.get(uuid)?.replyTo)
}
private void cleanup() {
final long now = System.currentTimeMillis()
synchronized(responderAddress) {

View File

@ -14,7 +14,7 @@ class UIResultEvent extends Event {
long size
InfoHash infohash
int pieceSize
@Override
public String toString() {
super.toString() + "name:$name size:$size sender:${sender.getHumanReadableName()} pieceSize $pieceSize"

View File

@ -18,5 +18,5 @@ class UnexpectedResultsException extends Exception {
public UnexpectedResultsException(String message) {
super(message);
}
}

View File

@ -7,12 +7,12 @@ import net.i2p.data.Destination
class UpsertEvent extends Event {
Set<String> names
byte [] infoHash
Destination leaf
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
Set<String> names
byte [] infoHash
Destination leaf
@Override
public String toString() {
"UpsertEvent ${super.toString()} names:$names infoHash:${Base32.encode(infoHash)} leaf:${leaf.toBase32()}"
}
}

View File

@ -0,0 +1,31 @@
package com.muwire.core.trust
import java.util.concurrent.ConcurrentHashMap
import com.muwire.core.Persona
import net.i2p.util.ConcurrentHashSet
class RemoteTrustList {
public enum Status { NEW, UPDATING, UPDATED, UPDATE_FAILED }
private final Persona persona
private final Set<Persona> good, bad
volatile long timestamp
volatile boolean forceUpdate
Status status = Status.NEW
RemoteTrustList(Persona persona) {
this.persona = persona
good = new ConcurrentHashSet<>()
bad = new ConcurrentHashSet<>()
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteTrustList))
return false
RemoteTrustList other = (RemoteTrustList)o
persona == other.persona
}
}

View File

@ -5,6 +5,6 @@ import com.muwire.core.Persona
class TrustEvent extends Event {
Persona persona
TrustLevel level
Persona persona
TrustLevel level
}

View File

@ -11,87 +11,87 @@ import net.i2p.util.ConcurrentHashSet
class TrustService extends Service {
final File persistGood, persistBad
final long persistInterval
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer
TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
final File persistGood, persistBad
final long persistInterval
final Map<Destination, Persona> good = new ConcurrentHashMap<>()
final Map<Destination, Persona> bad = new ConcurrentHashMap<>()
final Timer timer
TrustService() {}
TrustService(File persistGood, File persistBad, long persistInterval) {
this.persistBad = persistBad
this.persistGood = persistGood
this.persistInterval = persistInterval
this.timer = new Timer("trust-persister",true)
}
void start() {
timer.schedule({load()} as TimerTask, 1)
}
void stop() {
timer.cancel()
}
void load() {
if (persistGood.exists()) {
persistGood.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
good.put(persona.destination, persona)
}
}
if (persistBad.exists()) {
persistBad.eachLine {
byte [] decoded = Base64.decode(it)
Persona persona = new Persona(new ByteArrayInputStream(decoded))
bad.put(persona.destination, persona)
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
}
}
timer.schedule({persist()} as TimerTask, persistInterval, persistInterval)
loaded = true
}
private void persist() {
persistGood.delete()
persistGood.withPrintWriter { writer ->
good.each {k,v ->
writer.println v.toBase64()
}
}
persistBad.delete()
persistBad.withPrintWriter { writer ->
bad.each { k,v ->
writer.println v.toBase64()
}
}
}
TrustLevel getLevel(Destination dest) {
if (good.containsKey(dest))
return TrustLevel.TRUSTED
else if (bad.containsKey(dest))
return TrustLevel.DISTRUSTED
TrustLevel.NEUTRAL
}
void onTrustEvent(TrustEvent e) {
switch(e.level) {
case TrustLevel.TRUSTED:
bad.remove(e.persona.destination)
good.put(e.persona.destination, e.persona)
break
case TrustLevel.DISTRUSTED:
good.remove(e.persona.destination)
bad.put(e.persona.destination, e.persona)
break
case TrustLevel.NEUTRAL:
good.remove(e.persona.destination)
bad.remove(e.persona.destination)
break
}
}
}

View File

@ -0,0 +1,161 @@
package com.muwire.core.trust
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.UILoadedEvent
import com.muwire.core.connection.Endpoint
import com.muwire.core.connection.I2PConnector
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Destination
@Log
class TrustSubscriber {
private final EventBus eventBus
private final I2PConnector i2pConnector
private final MuWireSettings settings
private final Map<Destination, RemoteTrustList> remoteTrustLists = new ConcurrentHashMap<>()
private final Object waitLock = new Object()
private volatile boolean shutdown
private volatile Thread thread
private final ExecutorService updateThreads = Executors.newCachedThreadPool()
TrustSubscriber(EventBus eventBus, I2PConnector i2pConnector, MuWireSettings settings) {
this.eventBus = eventBus
this.i2pConnector = i2pConnector
this.settings = settings
}
void onUILoadedEvent(UILoadedEvent e) {
thread = new Thread({checkLoop()} as Runnable, "trust-subscriber")
thread.setDaemon(true)
thread.start()
}
void stop() {
shutdown = true
thread?.interrupt()
updateThreads.shutdownNow()
}
void onTrustSubscriptionEvent(TrustSubscriptionEvent e) {
if (!e.subscribe) {
remoteTrustLists.remove(e.persona.destination)
} else {
RemoteTrustList trustList = remoteTrustLists.putIfAbsent(e.persona.destination, new RemoteTrustList(e.persona))
trustList?.forceUpdate = true
synchronized(waitLock) {
waitLock.notify()
}
}
}
private void checkLoop() {
try {
while(!shutdown) {
synchronized(waitLock) {
waitLock.wait(60 * 1000)
}
final long now = System.currentTimeMillis()
remoteTrustLists.values().each { trustList ->
if (trustList.status == RemoteTrustList.Status.UPDATING)
return
if (!trustList.forceUpdate &&
now - trustList.timestamp < settings.trustListInterval * 60 * 60 * 1000)
return
trustList.forceUpdate = false
updateThreads.submit(new UpdateJob(trustList))
}
}
} catch (InterruptedException e) {
if (!shutdown)
throw e
}
}
private class UpdateJob implements Runnable {
private final RemoteTrustList trustList
UpdateJob(RemoteTrustList trustList) {
this.trustList = trustList
}
public void run() {
trustList.status = RemoteTrustList.Status.UPDATING
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
if (check(trustList, System.currentTimeMillis()))
trustList.status = RemoteTrustList.Status.UPDATED
else
trustList.status = RemoteTrustList.Status.UPDATE_FAILED
eventBus.publish(new TrustSubscriptionUpdatedEvent(trustList : trustList))
}
}
private boolean check(RemoteTrustList trustList, long now) {
log.info("fetching trust list from ${trustList.persona.getHumanReadableName()}")
Endpoint endpoint = null
try {
endpoint = i2pConnector.connect(trustList.persona.destination)
OutputStream os = endpoint.getOutputStream()
InputStream is = endpoint.getInputStream()
os.write("TRUST\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
String codeString = DataUtil.readTillRN(is)
int space = codeString.indexOf(' ')
if (space > 0)
codeString = codeString.substring(0,space)
int code = Integer.parseInt(codeString.trim())
if (code != 200) {
log.info("couldn't fetch trust list, code $code")
return false
}
// swallow any headers
String header
while (( header = DataUtil.readTillRN(is)) != "");
DataInputStream dis = new DataInputStream(is)
Set<Persona> good = new HashSet<>()
int nGood = dis.readUnsignedShort()
for (int i = 0; i < nGood; i++) {
Persona p = new Persona(dis)
good.add(p)
}
Set<Persona> bad = new HashSet<>()
int nBad = dis.readUnsignedShort()
for (int i = 0; i < nBad; i++) {
Persona p = new Persona(dis)
bad.add(p)
}
trustList.timestamp = now
trustList.good.clear()
trustList.good.addAll(good)
trustList.bad.clear()
trustList.bad.addAll(bad)
return true
} catch (Exception e) {
log.log(Level.WARNING,"exception fetching trust list from ${trustList.persona.getHumanReadableName()}",e)
return false
} finally {
endpoint?.close()
}
}
}

View File

@ -0,0 +1,9 @@
package com.muwire.core.trust
import com.muwire.core.Event
import com.muwire.core.Persona
class TrustSubscriptionEvent extends Event {
Persona persona
boolean subscribe
}

View File

@ -0,0 +1,7 @@
package com.muwire.core.trust
import com.muwire.core.Event
class TrustSubscriptionUpdatedEvent extends Event {
RemoteTrustList trustList
}

View File

@ -3,7 +3,15 @@ package com.muwire.core.update
import java.util.logging.Level
import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.MuWireSettings
import com.muwire.core.Persona
import com.muwire.core.download.UIDownloadEvent
import com.muwire.core.files.FileDownloadedEvent
import com.muwire.core.files.FileManager
import com.muwire.core.search.QueryEvent
import com.muwire.core.search.SearchEvent
import com.muwire.core.search.UIResultBatchEvent
import groovy.json.JsonOutput
import groovy.json.JsonSlurper
@ -13,6 +21,7 @@ import net.i2p.client.I2PSessionMuxedListener
import net.i2p.client.SendMessageOptions
import net.i2p.client.datagram.I2PDatagramDissector
import net.i2p.client.datagram.I2PDatagramMaker
import net.i2p.data.Base64
import net.i2p.util.VersionComparator
@Log
@ -21,28 +30,54 @@ class UpdateClient {
final I2PSession session
final String myVersion
final MuWireSettings settings
final FileManager fileManager
final Persona me
private final Timer timer
private long lastUpdateCheckTime
UpdateClient(EventBus eventBus, I2PSession session, String myVersion, MuWireSettings settings) {
private volatile InfoHash updateInfoHash
private volatile String version, signer
private volatile boolean updateDownloading
UpdateClient(EventBus eventBus, I2PSession session, String myVersion, MuWireSettings settings, FileManager fileManager, Persona me) {
this.eventBus = eventBus
this.session = session
this.myVersion = myVersion
this.settings = settings
this.fileManager = fileManager
this.me = me
timer = new Timer("update-client",true)
}
void start() {
session.addMuxedSessionListener(new Listener(), I2PSession.PROTO_DATAGRAM, 2)
timer.schedule({checkUpdate()} as TimerTask, 60000, 60 * 60 * 1000)
}
void stop() {
timer.cancel()
}
void onUIResultBatchEvent(UIResultBatchEvent results) {
if (results.results[0].infohash != updateInfoHash)
return
if (updateDownloading)
return
updateDownloading = true
def file = new File(settings.downloadLocation, results.results[0].name)
def downloadEvent = new UIDownloadEvent(result: results.results[0], sources : results.results[0].sources, target : file)
eventBus.publish(downloadEvent)
}
void onFileDownloadedEvent(FileDownloadedEvent e) {
if (e.downloadedFile.infoHash != updateInfoHash)
return
updateDownloading = false
eventBus.publish(new UpdateDownloadedEvent(version : version, signer : signer))
}
private void checkUpdate() {
final long now = System.currentTimeMillis()
if (lastUpdateCheckTime > 0) {
@ -50,20 +85,20 @@ class UpdateClient {
return
}
lastUpdateCheckTime = now
log.info("checking for update")
def ping = [version : 1, myVersion : myVersion]
ping = JsonOutput.toJson(ping)
def maker = new I2PDatagramMaker(session)
ping = maker.makeI2PDatagram(ping.bytes)
def options = new SendMessageOptions()
def options = new SendMessageOptions()
options.setSendLeaseSet(true)
session.sendMessage(UpdateServers.UPDATE_SERVER, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 2, 0, options)
session.sendMessage(UpdateServers.UPDATE_SERVER, ping, 0, ping.length, I2PSession.PROTO_DATAGRAM, 2, 0, options)
}
class Listener implements I2PSessionMuxedListener {
final JsonSlurper slurper = new JsonSlurper()
@Override
@ -76,7 +111,7 @@ class UpdateClient {
log.warning "Received unexpected protocol $proto"
return
}
def payload = session.receiveMessage(msgId)
def dissector = new I2PDatagramDissector()
try {
@ -86,29 +121,53 @@ class UpdateClient {
log.warning("received something not from update server " + sender.toBase32())
return
}
log.info("Received something from update server")
payload = dissector.getPayload()
payload = slurper.parse(payload)
if (payload.version == null) {
log.warning("version missing")
return
}
if (payload.signer == null) {
log.warning("signer missing")
}
if (VersionComparator.comp(myVersion, payload.version) >= 0) {
log.info("no new version available")
return
}
log.info("new version $payload.version available, publishing event")
eventBus.publish(new UpdateAvailableEvent(version : payload.version, signer : payload.signer, infoHash : payload.infoHash))
String infoHash
if (settings.updateType == "jar") {
infoHash = payload.infoHash
} else
infoHash = payload[settings.updateType]
if (!settings.autoDownloadUpdate) {
log.info("new version $payload.version available, publishing event")
eventBus.publish(new UpdateAvailableEvent(version : payload.version, signer : payload.signer, infoHash : infoHash))
} else {
log.info("new version $payload.version available")
updateInfoHash = new InfoHash(Base64.decode(infoHash))
if (fileManager.rootToFiles.containsKey(updateInfoHash))
eventBus.publish(new UpdateDownloadedEvent(version : payload.version, signer : payload.signer))
else {
updateDownloading = false
version = payload.version
signer = payload.signer
log.info("starting search for new version hash $payload.infoHash")
def searchEvent = new SearchEvent(searchHash : updateInfoHash.getRoot(), uuid : UUID.randomUUID(), oobInfohash : true)
def queryEvent = new QueryEvent(searchEvent : searchEvent, firstHop : true, replyTo : me.destination,
receivedOn : me.destination, originator : me)
eventBus.publish(queryEvent)
}
}
} catch (Exception e) {
log.log(Level.WARNING,"Invalid datagram",e)
}
@ -127,6 +186,6 @@ class UpdateClient {
public void errorOccurred(I2PSession session, String message, Throwable error) {
log.log(Level.SEVERE, message, error)
}
}
}

View File

@ -0,0 +1,8 @@
package com.muwire.core.update
import com.muwire.core.Event
class UpdateDownloadedEvent extends Event {
String version
String signer
}

View File

@ -3,5 +3,5 @@ package com.muwire.core.update
import net.i2p.data.Destination
class UpdateServers {
static final Destination UPDATE_SERVER = new Destination("pSWieSRB3czCl3Zz4WpKp4Z8tjv-05zbogRDS7SEnKcSdWOupVwjzQ92GsgQh1VqgoSRk1F8dpZOnHxxz5HFy9D7ri0uFdkMyXdSKoB7IgkkvCfTAyEmeaPwSYnurF3Zk7u286E7YG2rZkQZgJ77tow7ZS0mxFB7Z0Ti-VkZ9~GeGePW~howwNm4iSQACZA0DyTpI8iv5j4I0itPCQRgaGziob~Vfvjk49nd8N4jtaDGo9cEcafikVzQ2OgBgYWL6LRbrrItwuGqsDvITUHWaElUYIDhRQYUq8gYiUA6rwAJputfhFU0J7lIxFR9vVY7YzRvcFckfr0DNI4VQVVlPnRPkUxQa--BlldMaCIppWugjgKLwqiSiHywKpSMlBWgY2z1ry4ueEBo1WEP-mEf88wRk4cFQBCKtctCQnIG2GsnATqTl-VGUAsuzeNWZiFSwXiTy~gQ094yWx-K06fFZUDt4CMiLZVhGlixiInD~34FCRC9LVMtFcqiFB2M-Ql2AAAA")
static final Destination UPDATE_SERVER = new Destination("VJYAiCPZHNLraWvLkeRLxRiT4PHAqNqRO1nH240r7u1noBw8Pa~-lJOhKR7CccPkEN8ejSi4H6XjqKYLC8BKLVLeOgnAbedUVx81MV7DETPDdPEGV4RVu6YDFri7-tJOeqauGHxtlXT44YWuR69xKrTG3u4~iTWgxKnlBDht9Q3aVpSPFD2KqEizfVxolqXI0zmAZ2xMi8jfl0oe4GbgHrD9hR2FYj6yKfdqcUgHVobY4kDdJt-u31QqwWdsQMEj8Y3tR2XcNaITEVPiAjoKgBrYwB4jddWPNaT4XdHz76d9p9Iqes7dhOKq3OKpk6kg-bfIKiEOiA1mY49fn5h8pNShTqV7QBhh4CE4EDT3Szl~WsLdrlHUKJufSi7erEMh3coF7HORpF1wah2Xw7q470t~b8dKGKi7N7xQsqhGruDm66PH9oE9Kt9WBVBq2zORdPRtRM61I7EnrwDlbOkL0y~XpvQ3JKUQKdBQ3QsOJt8CHlhHHXMMbvqhntR61RSDBQAEAAcAAA==")
}

View File

@ -2,4 +2,5 @@ package com.muwire.core.upload
class ContentRequest extends Request {
Range range
int have
}

View File

@ -5,34 +5,58 @@ import java.nio.channels.FileChannel
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import java.nio.file.StandardOpenOption
import java.util.stream.Collectors
import com.muwire.core.Persona
import com.muwire.core.connection.Endpoint
import com.muwire.core.mesh.Mesh
import com.muwire.core.util.DataUtil
import net.i2p.data.Destination
class ContentUploader extends Uploader {
private final File file
private final ContentRequest request
ContentUploader(File file, ContentRequest request, Endpoint endpoint) {
private final Mesh mesh
private final int pieceSize
ContentUploader(File file, ContentRequest request, Endpoint endpoint, Mesh mesh, int pieceSize) {
super(endpoint)
this.file = file
this.request = request
this.mesh = mesh
this.pieceSize = pieceSize
}
@Override
void respond() {
OutputStream os = endpoint.getOutputStream()
Range range = request.getRange()
if (range.start >= file.length() || range.end >= file.length()) {
os.write("416 Range Not Satisfiable\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
boolean satisfiable = true
final long length = file.length()
if (range.start >= length || range.end >= length)
satisfiable = false
if (satisfiable) {
int startPiece = range.start / (0x1 << pieceSize)
int endPiece = range.end / (0x1 << pieceSize)
for (int i = startPiece; i <= endPiece; i++)
satisfiable &= mesh.pieces.isDownloaded(i)
}
if (!satisfiable) {
os.write("416 Range Not Satisfiable\r\n".getBytes(StandardCharsets.US_ASCII))
writeMesh(request.downloader)
os.write("\r\n".getBytes(StandardCharsets.US_ASCII))
os.flush()
return
}
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Content-Range: $range.start-$range.end\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Content-Range: $range.start-$range.end\r\n".getBytes(StandardCharsets.US_ASCII))
writeMesh(request.downloader)
os.write("\r\n".getBytes(StandardCharsets.US_ASCII))
FileChannel channel
FileChannel channel = null
try {
channel = Files.newByteChannel(file.toPath(), EnumSet.of(StandardOpenOption.READ))
mapped = channel.map(FileChannel.MapMode.READ_ONLY, range.start, range.end - range.start + 1)
@ -48,6 +72,21 @@ class ContentUploader extends Uploader {
} finally {
try {channel?.close() } catch (IOException ignored) {}
endpoint.getOutputStream().flush()
synchronized(this) {
DataUtil.tryUnmap(mapped)
mapped = null
}
}
}
private void writeMesh(Persona toExclude) {
String xHave = DataUtil.encodeXHave(mesh.pieces.getDownloaded(), mesh.pieces.nPieces)
endpoint.getOutputStream().write("X-Have: $xHave\r\n".getBytes(StandardCharsets.US_ASCII))
Set<Persona> sources = mesh.getRandom(9, toExclude)
if (!sources.isEmpty()) {
String xAlts = sources.stream().map({ it.toBase64() }).collect(Collectors.joining(","))
endpoint.getOutputStream().write("X-Alt: $xAlts\r\n".getBytes(StandardCharsets.US_ASCII))
}
}
@ -70,4 +109,18 @@ class ContentUploader extends Uploader {
request.downloader.getHumanReadableName()
}
@Override
public int getDonePieces() {
return request.have;
}
@Override
public int getTotalPieces() {
return mesh.pieces.nPieces;
}
@Override
public long getTotalSize() {
return file.length();
}
}

View File

@ -11,19 +11,19 @@ import net.i2p.data.Base64
class HashListUploader extends Uploader {
private final InfoHash infoHash
private final HashListRequest request
HashListUploader(Endpoint endpoint, InfoHash infoHash, HashListRequest request) {
super(endpoint)
this.infoHash = infoHash
mapped = ByteBuffer.wrap(infoHash.getHashList())
this.request = request
}
void respond() {
OutputStream os = endpoint.getOutputStream()
os.write("200 OK\r\n".getBytes(StandardCharsets.US_ASCII))
os.write("Content-Range: 0-${mapped.remaining()}\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
byte[]tmp = new byte[0x1 << 13]
while(mapped.hasRemaining()) {
int start = mapped.position()
@ -50,6 +50,19 @@ class HashListUploader extends Uploader {
public String getDownloader() {
request.downloader.getHumanReadableName()
}
@Override
public int getDonePieces() {
return 0;
}
@Override
public int getTotalPieces() {
return 1;
}
@Override
public long getTotalSize() {
return -1;
}
}

View File

@ -2,7 +2,7 @@ package com.muwire.core.upload
class Range {
final long start, end
Range(long start, long end) {
this.start = start
this.end = end

View File

@ -5,27 +5,28 @@ import java.nio.charset.StandardCharsets
import com.muwire.core.Constants
import com.muwire.core.InfoHash
import com.muwire.core.Persona
import com.muwire.core.util.DataUtil
import groovy.util.logging.Log
import net.i2p.data.Base64
@Log
class Request {
private static final byte R = "\r".getBytes(StandardCharsets.US_ASCII)[0]
private static final byte N = "\n".getBytes(StandardCharsets.US_ASCII)[0]
InfoHash infoHash
Persona downloader
Map<String, String> headers
static Request parseContentRequest(InfoHash infoHash, InputStream is) throws IOException {
Map<String, String> headers = parseHeaders(is)
if (!headers.containsKey("Range"))
throw new IOException("Range header not found")
String range = headers.get("Range").trim()
String[] split = range.split("-")
if (split.length != 2)
@ -38,20 +39,26 @@ class Request {
} catch (NumberFormatException nfe) {
throw new IOException(nfe)
}
if (start < 0 || end < start)
throw new IOException("Invalid range $start - $end")
Persona downloader = null
if (headers.containsKey("X-Persona")) {
def encoded = headers["X-Persona"].trim()
def decoded = Base64.decode(encoded)
downloader = new Persona(new ByteArrayInputStream(decoded))
}
new ContentRequest( infoHash : infoHash, range : new Range(start, end),
headers : headers, downloader : downloader)
int have = 0
if (headers.containsKey("X-Have")) {
def encoded = headers["X-Have"].trim()
have = DataUtil.decodeXHave(encoded).size()
}
new ContentRequest( infoHash : infoHash, range : new Range(start, end),
headers : headers, downloader : downloader, have : have)
}
static Request parseHashListRequest(InfoHash infoHash, InputStream is) throws IOException {
Map<String,String> headers = parseHeaders(is)
Persona downloader = null
@ -62,7 +69,7 @@ class Request {
}
new HashListRequest(infoHash : infoHash, headers : headers, downloader : downloader)
}
private static Map<String, String> parseHeaders(InputStream is) {
Map<String,String> headers = new HashMap<>()
byte [] tmp = new byte[Constants.MAX_HEADER_SIZE]
@ -74,7 +81,7 @@ class Request {
byte read = is.read()
if (read == -1)
throw new IOException("Stream closed")
if (!r && read == N)
throw new IOException("Received N before R")
if (read == R) {
@ -83,7 +90,7 @@ class Request {
r = true
continue
}
if (r && !n) {
if (read != N)
throw new IOException("R not followed by N")
@ -94,13 +101,13 @@ class Request {
throw new IOException("Header too long")
tmp[idx++] = read
}
if (idx == 0)
break
String header = new String(tmp, 0, idx, StandardCharsets.US_ASCII)
log.fine("Read header $header")
int keyIdx = header.indexOf(":")
if (keyIdx < 1)
throw new IOException("Header key not found")
@ -112,5 +119,5 @@ class Request {
}
headers
}
}

View File

@ -6,7 +6,12 @@ import com.muwire.core.EventBus
import com.muwire.core.InfoHash
import com.muwire.core.SharedFile
import com.muwire.core.connection.Endpoint
import com.muwire.core.download.DownloadManager
import com.muwire.core.download.Downloader
import com.muwire.core.download.SourceDiscoveredEvent
import com.muwire.core.files.FileManager
import com.muwire.core.mesh.Mesh
import com.muwire.core.mesh.MeshManager
import groovy.util.logging.Log
import net.i2p.data.Base64
@ -15,14 +20,19 @@ import net.i2p.data.Base64
public class UploadManager {
private final EventBus eventBus
private final FileManager fileManager
private final MeshManager meshManager
private final DownloadManager downloadManager
public UploadManager() {}
public UploadManager(EventBus eventBus, FileManager fileManager) {
public UploadManager(EventBus eventBus, FileManager fileManager,
MeshManager meshManager, DownloadManager downloadManager) {
this.eventBus = eventBus
this.fileManager = fileManager
this.meshManager = meshManager
this.downloadManager = downloadManager
}
public void processGET(Endpoint e) throws IOException {
byte [] infoHashStringBytes = new byte[44]
DataInputStream dis = new DataInputStream(e.getInputStream())
@ -44,8 +54,10 @@ public class UploadManager {
log.info("Responding to upload request for root $infoHashString")
byte [] infoHashRoot = Base64.decode(infoHashString)
InfoHash infoHash = new InfoHash(infoHashRoot)
Set<SharedFile> sharedFiles = fileManager.getSharedFiles(infoHashRoot)
if (sharedFiles == null || sharedFiles.isEmpty()) {
Downloader downloader = downloadManager.downloaders.get(infoHash)
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
@ -61,13 +73,31 @@ public class UploadManager {
return
}
Request request = Request.parseContentRequest(new InfoHash(infoHashRoot), e.getInputStream())
ContentRequest request = Request.parseContentRequest(infoHash, e.getInputStream())
if (request.downloader != null && request.downloader.destination != e.destination) {
log.info("Downloader persona doesn't match their destination")
e.close()
return
}
Uploader uploader = new ContentUploader(sharedFiles.iterator().next().file, request, e)
if (request.have > 0)
eventBus.publish(new SourceDiscoveredEvent(infoHash : request.infoHash, source : request.downloader))
Mesh mesh
File file
int pieceSize
if (downloader != null) {
mesh = meshManager.get(infoHash)
file = downloader.incompleteFile
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces, false)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}
Uploader uploader = new ContentUploader(file, request, e, mesh, pieceSize)
eventBus.publish(new UploadEvent(uploader : uploader))
try {
uploader.respond()
@ -76,24 +106,26 @@ public class UploadManager {
}
}
}
public void processHashList(Endpoint e) {
byte [] infoHashStringBytes = new byte[44]
DataInputStream dis = new DataInputStream(e.getInputStream())
dis.readFully(infoHashStringBytes)
String infoHashString = new String(infoHashStringBytes, StandardCharsets.US_ASCII)
log.info("Responding to hashlist request for root $infoHashString")
byte [] infoHashRoot = Base64.decode(infoHashString)
InfoHash infoHash = new InfoHash(infoHashRoot)
Downloader downloader = downloadManager.downloaders.get(infoHash)
Set<SharedFile> sharedFiles = fileManager.getSharedFiles(infoHashRoot)
if (sharedFiles == null || sharedFiles.isEmpty()) {
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
e.close()
return
}
byte [] rn = new byte[2]
dis.readFully(rn)
if (rn != "\r\n".getBytes(StandardCharsets.US_ASCII)) {
@ -101,21 +133,38 @@ public class UploadManager {
e.close()
return
}
Request request = Request.parseHashListRequest(new InfoHash(infoHashRoot), e.getInputStream())
Request request = Request.parseHashListRequest(infoHash, e.getInputStream())
if (request.downloader != null && request.downloader.destination != e.destination) {
log.info("Downloader persona doesn't match their destination")
e.close()
return
}
Uploader uploader = new HashListUploader(e, sharedFiles.iterator().next().infoHash, request)
InfoHash fullInfoHash
if (downloader == null) {
fullInfoHash = sharedFiles.iterator().next().infoHash
} else {
byte [] hashList = downloader.getInfoHash().getHashList()
if (hashList != null && hashList.length > 0)
fullInfoHash = downloader.getInfoHash()
else {
log.info("infohash not found in downloader")
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
e.close()
return
}
}
Uploader uploader = new HashListUploader(e, fullInfoHash, request)
eventBus.publish(new UploadEvent(uploader : uploader))
try {
uploader.respond()
} finally {
eventBus.publish(new UploadFinishedEvent(uploader : uploader))
}
// proceed with content
while(true) {
byte[] get = new byte[4]
@ -130,8 +179,10 @@ public class UploadManager {
log.info("Responding to upload request for root $infoHashString")
infoHashRoot = Base64.decode(infoHashString)
infoHash = new InfoHash(infoHashRoot)
sharedFiles = fileManager.getSharedFiles(infoHashRoot)
if (sharedFiles == null || sharedFiles.isEmpty()) {
downloader = downloadManager.downloaders.get(infoHash)
if (downloader == null && (sharedFiles == null || sharedFiles.isEmpty())) {
log.info "file not found"
e.getOutputStream().write("404 File Not Found\r\n\r\n".getBytes(StandardCharsets.US_ASCII))
e.getOutputStream().flush()
@ -153,7 +204,25 @@ public class UploadManager {
e.close()
return
}
uploader = new ContentUploader(sharedFiles.iterator().next().file, request, e)
if (request.have > 0)
eventBus.publish(new SourceDiscoveredEvent(infoHash : request.infoHash, source : request.downloader))
Mesh mesh
File file
int pieceSize
if (downloader != null) {
mesh = meshManager.get(infoHash)
file = downloader.incompleteFile
pieceSize = downloader.pieceSizePow2
} else {
SharedFile sharedFile = sharedFiles.iterator().next();
mesh = meshManager.getOrCreate(request.infoHash, sharedFile.NPieces, false)
file = sharedFile.file
pieceSize = sharedFile.pieceSize
}
uploader = new ContentUploader(file, request, e, mesh, pieceSize)
eventBus.publish(new UploadEvent(uploader : uploader))
try {
uploader.respond()

View File

@ -11,25 +11,31 @@ import com.muwire.core.connection.Endpoint
abstract class Uploader {
protected final Endpoint endpoint
protected ByteBuffer mapped
Uploader(Endpoint endpoint) {
this.endpoint = endpoint
}
abstract void respond()
public synchronized int getPosition() {
if (mapped == null)
return -1
mapped.position()
}
abstract String getName();
/**
* @return an integer between 0 and 100
*/
abstract int getProgress();
abstract String getDownloader();
abstract int getDonePieces();
abstract int getTotalPieces();
abstract long getTotalSize();
}

View File

@ -1,82 +0,0 @@
package com.muwire.core.util
import java.nio.charset.StandardCharsets
import com.muwire.core.Constants
class DataUtil {
private final static int MAX_SHORT = (0x1 << 16) - 1
static void writeUnsignedShort(int value, OutputStream os) {
if (value > MAX_SHORT || value < 0)
throw new IllegalArgumentException("$value invalid")
byte lsb = (byte) (value & 0xFF)
byte msb = (byte) (value >> 8)
os.write(msb)
os.write(lsb)
}
private final static int MAX_HEADER = 0x7FFFFF
static void packHeader(int length, byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
if (length < 0 || length > MAX_HEADER)
throw new IllegalArgumentException("length $length")
header[2] = (byte) (length & 0xFF)
header[1] = (byte) ((length >> 8) & 0xFF)
header[0] = (byte) ((length >> 16) & 0x7F)
}
static int readLength(byte [] header) {
if (header.length != 3)
throw new IllegalArgumentException("header length $header.length")
return (((int)(header[0] & 0x7F)) << 16) |
(((int)(header[1] & 0xFF) << 8)) |
((int)header[2] & 0xFF)
}
static String readi18nString(byte [] encoded) {
if (encoded.length < 2)
throw new IllegalArgumentException("encoding too short $encoded.length")
int length = ((encoded[0] & 0xFF) << 8) | (encoded[1] & 0xFF)
if (encoded.length != length + 2)
throw new IllegalArgumentException("encoding doesn't match length, expected $length found $encoded.length")
byte [] string = new byte[length]
System.arraycopy(encoded, 2, string, 0, length)
new String(string, StandardCharsets.UTF_8)
}
static byte[] encodei18nString(String string) {
byte [] utf8 = string.getBytes(StandardCharsets.UTF_8)
if (utf8.length > Short.MAX_VALUE)
throw new IllegalArgumentException("String in utf8 too long $utf8.length")
def baos = new ByteArrayOutputStream()
def daos = new DataOutputStream(baos)
daos.writeShort((short) utf8.length)
daos.write(utf8)
daos.close()
baos.toByteArray()
}
public static String readTillRN(InputStream is) {
def baos = new ByteArrayOutputStream()
while(baos.size() < (Constants.MAX_HEADER_SIZE)) {
byte read = is.read()
if (read == -1)
throw new IOException()
if (read == '\r') {
if (is.read() != '\n')
throw new IOException("invalid header")
break
}
baos.write(read)
}
new String(baos.toByteArray(), StandardCharsets.US_ASCII)
}
}

View File

@ -16,10 +16,10 @@ class JULLog extends Log {
I2P_TO_JUL.put(Log.ERROR, Level.SEVERE)
I2P_TO_JUL.put(Log.CRIT, Level.SEVERE)
}
private final Logger delegate
private final Level level
public JULLog(Class<?> cls) {
super(cls)
delegate = Logger.getLogger(cls.getName())
@ -31,7 +31,7 @@ class JULLog extends Log {
delegate = Logger.getLogger(name)
level = findLevel(delegate)
}
private static Level findLevel(Logger log) {
while (log.getLevel() == null)
log = log.getParent()

View File

@ -5,14 +5,14 @@ import net.i2p.util.Log
import net.i2p.util.LogManager
class MuWireLogManager extends LogManager {
private static final Map<Class<?>, Log> classLogs = new HashMap<>()
private static final Map<String, Log> stringLogs = new HashMap<>()
MuWireLogManager() {
super(I2PAppContext.getGlobalContext())
}
@Override
public synchronized Log getLog(Class<?> cls, String name) {
@ -24,7 +24,7 @@ class MuWireLogManager extends LogManager {
}
return rv
}
Log rv = stringLogs.get(name)
if (rv == null) {
rv = new JULLog(name)
@ -32,5 +32,5 @@ class MuWireLogManager extends LogManager {
}
rv
}
}

View File

@ -0,0 +1,11 @@
package com.muwire.core;
import net.i2p.crypto.SigType;
public class Constants {
public static final byte PERSONA_VERSION = (byte)1;
public static final SigType SIG_TYPE = SigType.EdDSA_SHA512_Ed25519;
public static final int MAX_HEADER_SIZE = 0x1 << 14;
public static final int MAX_HEADERS = 16;
}

Some files were not shown because too many files have changed in this diff Show More