1/* 2 * This file Copyright (C) Mnemosyne LLC 3 * 4 * This file is licensed by the GPL version 2. Works owned by the 5 * Transmission project are granted a special exemption to clause 2(b) 6 * so that the bulk of its code can remain under the MIT license. 7 * This exemption does not extend to derived works not owned by 8 * the Transmission project. 9 * 10 * $Id: bandwidth.c 13361 2012-07-01 02:17:35Z jordan $ 11 */ 12 13#include <assert.h> 14#include <limits.h> 15#include <string.h> /* memset() */ 16 17#include "transmission.h" 18#include "bandwidth.h" 19#include "crypto.h" /* tr_cryptoWeakRandInt() */ 20#include "peer-io.h" 21#include "utils.h" 22 23#define dbgmsg( ... ) \ 24 do { \ 25 if( tr_deepLoggingIsActive( ) ) \ 26 tr_deepLog( __FILE__, __LINE__, NULL, __VA_ARGS__ ); \ 27 } while( 0 ) 28 29/*** 30**** 31***/ 32 33static unsigned int 34getSpeed_Bps( const struct bratecontrol * r, unsigned int interval_msec, uint64_t now ) 35{ 36 if( !now ) 37 now = tr_time_msec(); 38 39 if( now != r->cache_time ) 40 { 41 int i = r->newest; 42 uint64_t bytes = 0; 43 const uint64_t cutoff = now - interval_msec; 44 struct bratecontrol * rvolatile = (struct bratecontrol*) r; 45 46 for( ;; ) 47 { 48 if( r->transfers[i].date <= cutoff ) 49 break; 50 51 bytes += r->transfers[i].size; 52 53 if( --i == -1 ) i = HISTORY_SIZE - 1; /* circular history */ 54 if( i == r->newest ) break; /* we've come all the way around */ 55 } 56 57 rvolatile->cache_val = (unsigned int)(( bytes * 1000u ) / interval_msec); 58 rvolatile->cache_time = now; 59 } 60 61 return r->cache_val; 62} 63 64static void 65bytesUsed( const uint64_t now, struct bratecontrol * r, size_t size ) 66{ 67 if( r->transfers[r->newest].date + GRANULARITY_MSEC >= now ) 68 r->transfers[r->newest].size += size; 69 else 70 { 71 if( ++r->newest == HISTORY_SIZE ) r->newest = 0; 72 r->transfers[r->newest].date = now; 73 r->transfers[r->newest].size = size; 74 } 75 76 /* invalidate cache_val*/ 77 r->cache_time = 0; 78} 79 80/****** 81******* 82******* 83******/ 84 85static int 86compareBandwidth( const void * va, const void * vb ) 87{ 88 const tr_bandwidth * a = va; 89 const tr_bandwidth * b = vb; 90 return a->uniqueKey - b->uniqueKey; 91} 92 93/*** 94**** 95***/ 96 97void 98tr_bandwidthConstruct( tr_bandwidth * b, tr_session * session, tr_bandwidth * parent ) 99{ 100 static unsigned int uniqueKey = 0; 101 102 b->session = session; 103 b->children = TR_PTR_ARRAY_INIT; 104 b->magicNumber = BANDWIDTH_MAGIC_NUMBER; 105 b->uniqueKey = uniqueKey++; 106 b->band[TR_UP].honorParentLimits = true; 107 b->band[TR_DOWN].honorParentLimits = true; 108 tr_bandwidthSetParent( b, parent ); 109} 110 111void 112tr_bandwidthDestruct( tr_bandwidth * b ) 113{ 114 assert( tr_isBandwidth( b ) ); 115 116 tr_bandwidthSetParent( b, NULL ); 117 tr_ptrArrayDestruct( &b->children, NULL ); 118 119 memset( b, ~0, sizeof( tr_bandwidth ) ); 120} 121 122/*** 123**** 124***/ 125 126void 127tr_bandwidthSetParent( tr_bandwidth * b, 128 tr_bandwidth * parent ) 129{ 130 assert( tr_isBandwidth( b ) ); 131 assert( b != parent ); 132 133 if( b->parent ) 134 { 135 void * removed; 136 137 assert( tr_isBandwidth( b->parent ) ); 138 139 removed = tr_ptrArrayRemoveSorted( &b->parent->children, b, compareBandwidth ); 140 assert( removed == b ); 141 assert( tr_ptrArrayFindSorted( &b->parent->children, b, compareBandwidth ) == NULL ); 142 143 b->parent = NULL; 144 } 145 146 if( parent ) 147 { 148 assert( tr_isBandwidth( parent ) ); 149 assert( parent->parent != b ); 150 151 assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == NULL ); 152 tr_ptrArrayInsertSorted( &parent->children, b, compareBandwidth ); 153 assert( tr_ptrArrayFindSorted( &parent->children, b, compareBandwidth ) == b ); 154 b->parent = parent; 155 } 156} 157 158/*** 159**** 160***/ 161 162static void 163allocateBandwidth( tr_bandwidth * b, 164 tr_priority_t parent_priority, 165 tr_direction dir, 166 unsigned int period_msec, 167 tr_ptrArray * peer_pool ) 168{ 169 const tr_priority_t priority = MAX( parent_priority, b->priority ); 170 171 assert( tr_isBandwidth( b ) ); 172 assert( tr_isDirection( dir ) ); 173 174 /* set the available bandwidth */ 175 if( b->band[dir].isLimited ) 176 { 177 const uint64_t nextPulseSpeed = b->band[dir].desiredSpeed_Bps; 178 b->band[dir].bytesLeft = (unsigned int)( nextPulseSpeed * period_msec ) / 1000u; 179 } 180 181 /* add this bandwidth's peer, if any, to the peer pool */ 182 if( b->peer != NULL ) { 183 b->peer->priority = priority; 184 tr_ptrArrayAppend( peer_pool, b->peer ); 185 } 186 187 /* traverse & repeat for the subtree */ 188 if( 1 ) { 189 int i; 190 struct tr_bandwidth ** children = (struct tr_bandwidth**) tr_ptrArrayBase( &b->children ); 191 const int n = tr_ptrArraySize( &b->children ); 192 for( i=0; i<n; ++i ) 193 allocateBandwidth( children[i], priority, dir, period_msec, peer_pool ); 194 } 195} 196 197static void 198phaseOne( tr_ptrArray * peerArray, tr_direction dir ) 199{ 200 int n; 201 int peerCount = tr_ptrArraySize( peerArray ); 202 struct tr_peerIo ** peers = (struct tr_peerIo**) tr_ptrArrayBase( peerArray ); 203 204 /* First phase of IO. Tries to distribute bandwidth fairly to keep faster 205 * peers from starving the others. Loop through the peers, giving each a 206 * small chunk of bandwidth. Keep looping until we run out of bandwidth 207 * and/or peers that can use it */ 208 n = peerCount; 209 dbgmsg( "%d peers to go round-robin for %s", n, (dir==TR_UP?"upload":"download") ); 210 while( n > 0 ) 211 { 212 const int i = tr_cryptoWeakRandInt( n ); /* pick a peer at random */ 213 214 /* value of 3000 bytes chosen so that when using uTP we'll send a full-size 215 * frame right away and leave enough buffered data for the next frame to go 216 * out in a timely manner. */ 217 const size_t increment = 3000; 218 219 const int bytesUsed = tr_peerIoFlush( peers[i], dir, increment ); 220 221 dbgmsg( "peer #%d of %d used %d bytes in this pass", i, n, bytesUsed ); 222 223 if( bytesUsed != (int)increment ) { 224 /* peer is done writing for now; move it to the end of the list */ 225 tr_peerIo * pio = peers[i]; 226 peers[i] = peers[n-1]; 227 peers[n-1] = pio; 228 --n; 229 } 230 } 231} 232 233void 234tr_bandwidthAllocate( tr_bandwidth * b, 235 tr_direction dir, 236 unsigned int period_msec ) 237{ 238 int i, peerCount; 239 tr_ptrArray tmp = TR_PTR_ARRAY_INIT; 240 tr_ptrArray low = TR_PTR_ARRAY_INIT; 241 tr_ptrArray high = TR_PTR_ARRAY_INIT; 242 tr_ptrArray normal = TR_PTR_ARRAY_INIT; 243 struct tr_peerIo ** peers; 244 245 /* allocateBandwidth() is a helper function with two purposes: 246 * 1. allocate bandwidth to b and its subtree 247 * 2. accumulate an array of all the peerIos from b and its subtree. */ 248 allocateBandwidth( b, TR_PRI_LOW, dir, period_msec, &tmp ); 249 peers = (struct tr_peerIo**) tr_ptrArrayBase( &tmp ); 250 peerCount = tr_ptrArraySize( &tmp ); 251 252 for( i=0; i<peerCount; ++i ) 253 { 254 tr_peerIo * io = peers[i]; 255 tr_peerIoRef( io ); 256 257 tr_peerIoFlushOutgoingProtocolMsgs( io ); 258 259 switch( io->priority ) { 260 case TR_PRI_HIGH: tr_ptrArrayAppend( &high, io ); /* fall through */ 261 case TR_PRI_NORMAL: tr_ptrArrayAppend( &normal, io ); /* fall through */ 262 default: tr_ptrArrayAppend( &low, io ); 263 } 264 } 265 266 /* First phase of IO. Tries to distribute bandwidth fairly to keep faster 267 * peers from starving the others. Loop through the peers, giving each a 268 * small chunk of bandwidth. Keep looping until we run out of bandwidth 269 * and/or peers that can use it */ 270 phaseOne( &high, dir ); 271 phaseOne( &normal, dir ); 272 phaseOne( &low, dir ); 273 274 /* Second phase of IO. To help us scale in high bandwidth situations, 275 * enable on-demand IO for peers with bandwidth left to burn. 276 * This on-demand IO is enabled until (1) the peer runs out of bandwidth, 277 * or (2) the next tr_bandwidthAllocate() call, when we start over again. */ 278 for( i=0; i<peerCount; ++i ) 279 tr_peerIoSetEnabled( peers[i], dir, tr_peerIoHasBandwidthLeft( peers[i], dir ) ); 280 281 for( i=0; i<peerCount; ++i ) 282 tr_peerIoUnref( peers[i] ); 283 284 /* cleanup */ 285 tr_ptrArrayDestruct( &normal, NULL ); 286 tr_ptrArrayDestruct( &high, NULL ); 287 tr_ptrArrayDestruct( &low, NULL ); 288 tr_ptrArrayDestruct( &tmp, NULL ); 289} 290 291void 292tr_bandwidthSetPeer( tr_bandwidth * b, tr_peerIo * peer ) 293{ 294 assert( tr_isBandwidth( b ) ); 295 assert( ( peer == NULL ) || tr_isPeerIo( peer ) ); 296 297 b->peer = peer; 298} 299 300/*** 301**** 302***/ 303 304static unsigned int 305bandwidthClamp( const tr_bandwidth * b, 306 uint64_t now, 307 tr_direction dir, 308 unsigned int byteCount ) 309{ 310 assert( tr_isBandwidth( b ) ); 311 assert( tr_isDirection( dir ) ); 312 313 if( b ) 314 { 315 if( b->band[dir].isLimited ) 316 { 317 byteCount = MIN( byteCount, b->band[dir].bytesLeft ); 318 319 /* if we're getting close to exceeding the speed limit, 320 * clamp down harder on the bytes available */ 321 if( byteCount > 0 ) 322 { 323 double current; 324 double desired; 325 double r; 326 327 if( now == 0 ) 328 now = tr_time_msec( ); 329 330 current = tr_bandwidthGetRawSpeed_Bps( b, now, TR_DOWN ); 331 desired = tr_bandwidthGetDesiredSpeed_Bps( b, TR_DOWN ); 332 r = desired >= 1 ? current / desired : 0; 333 334 if( r > 1.0 ) byteCount = 0; 335 else if( r > 0.9 ) byteCount *= 0.8; 336 else if( r > 0.8 ) byteCount *= 0.9; 337 } 338 } 339 340 if( b->parent && b->band[dir].honorParentLimits && ( byteCount > 0 ) ) 341 byteCount = bandwidthClamp( b->parent, now, dir, byteCount ); 342 } 343 344 return byteCount; 345} 346unsigned int 347tr_bandwidthClamp( const tr_bandwidth * b, 348 tr_direction dir, 349 unsigned int byteCount ) 350{ 351 return bandwidthClamp( b, 0, dir, byteCount ); 352} 353 354 355unsigned int 356tr_bandwidthGetRawSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir ) 357{ 358 assert( tr_isBandwidth( b ) ); 359 assert( tr_isDirection( dir ) ); 360 361 return getSpeed_Bps( &b->band[dir].raw, HISTORY_MSEC, now ); 362} 363 364unsigned int 365tr_bandwidthGetPieceSpeed_Bps( const tr_bandwidth * b, const uint64_t now, const tr_direction dir ) 366{ 367 assert( tr_isBandwidth( b ) ); 368 assert( tr_isDirection( dir ) ); 369 370 return getSpeed_Bps( &b->band[dir].piece, HISTORY_MSEC, now ); 371} 372 373void 374tr_bandwidthUsed( tr_bandwidth * b, 375 tr_direction dir, 376 size_t byteCount, 377 bool isPieceData, 378 uint64_t now ) 379{ 380 struct tr_band * band; 381 382 assert( tr_isBandwidth( b ) ); 383 assert( tr_isDirection( dir ) ); 384 385 band = &b->band[dir]; 386 387 if( band->isLimited && isPieceData ) 388 band->bytesLeft -= MIN( band->bytesLeft, byteCount ); 389 390#ifdef DEBUG_DIRECTION 391if( ( dir == DEBUG_DIRECTION ) && ( band->isLimited ) ) 392fprintf( stderr, "%p consumed %5zu bytes of %5s data... was %6zu, now %6zu left\n", 393 b, byteCount, (isPieceData?"piece":"raw"), oldBytesLeft, band->bytesLeft ); 394#endif 395 396 bytesUsed( now, &band->raw, byteCount ); 397 398 if( isPieceData ) 399 bytesUsed( now, &band->piece, byteCount ); 400 401 if( b->parent != NULL ) 402 tr_bandwidthUsed( b->parent, dir, byteCount, isPieceData, now ); 403} 404