1 /* 2 * Copyright (C) 2012 The Guava Authors 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 * in compliance with the License. You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software distributed under the License 10 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 * or implied. See the License for the specific language governing permissions and limitations under 12 * the License. 13 */ 14 15 package com.google.common.util.concurrent; 16 17 import static java.lang.Math.min; 18 import static java.util.concurrent.TimeUnit.SECONDS; 19 20 import com.google.common.annotations.GwtIncompatible; 21 import com.google.common.annotations.J2ktIncompatible; 22 import com.google.common.math.LongMath; 23 import java.util.concurrent.TimeUnit; 24 25 @J2ktIncompatible 26 @GwtIncompatible 27 @ElementTypesAreNonnullByDefault 28 abstract class SmoothRateLimiter extends RateLimiter { 29 /* 30 * How is the RateLimiter designed, and why? 31 * 32 * The primary feature of a RateLimiter is its "stable rate", the maximum rate that it should 33 * allow in normal conditions. This is enforced by "throttling" incoming requests as needed. For 34 * example, we could compute the appropriate throttle time for an incoming request, and make the 35 * calling thread wait for that time. 36 * 37 * The simplest way to maintain a rate of QPS is to keep the timestamp of the last granted 38 * request, and ensure that (1/QPS) seconds have elapsed since then. For example, for a rate of 39 * QPS=5 (5 tokens per second), if we ensure that a request isn't granted earlier than 200ms after 40 * the last one, then we achieve the intended rate. If a request comes and the last request was 41 * granted only 100ms ago, then we wait for another 100ms. At this rate, serving 15 fresh permits 42 * (i.e. for an acquire(15) request) naturally takes 3 seconds. 43 * 44 * It is important to realize that such a RateLimiter has a very superficial memory of the past: 45 * it only remembers the last request. What if the RateLimiter was unused for a long period of 46 * time, then a request arrived and was immediately granted? This RateLimiter would immediately 47 * forget about that past underutilization. This may result in either underutilization or 48 * overflow, depending on the real world consequences of not using the expected rate. 49 * 50 * Past underutilization could mean that excess resources are available. Then, the RateLimiter 51 * should speed up for a while, to take advantage of these resources. This is important when the 52 * rate is applied to networking (limiting bandwidth), where past underutilization typically 53 * translates to "almost empty buffers", which can be filled immediately. 54 * 55 * On the other hand, past underutilization could mean that "the server responsible for handling 56 * the request has become less ready for future requests", i.e. its caches become stale, and 57 * requests become more likely to trigger expensive operations (a more extreme case of this 58 * example is when a server has just booted, and it is mostly busy with getting itself up to 59 * speed). 60 * 61 * To deal with such scenarios, we add an extra dimension, that of "past underutilization", 62 * modeled by "storedPermits" variable. This variable is zero when there is no underutilization, 63 * and it can grow up to maxStoredPermits, for sufficiently large underutilization. So, the 64 * requested permits, by an invocation acquire(permits), are served from: 65 * 66 * - stored permits (if available) 67 * 68 * - fresh permits (for any remaining permits) 69 * 70 * How this works is best explained with an example: 71 * 72 * For a RateLimiter that produces 1 token per second, every second that goes by with the 73 * RateLimiter being unused, we increase storedPermits by 1. Say we leave the RateLimiter unused 74 * for 10 seconds (i.e., we expected a request at time X, but we are at time X + 10 seconds before 75 * a request actually arrives; this is also related to the point made in the last paragraph), thus 76 * storedPermits becomes 10.0 (assuming maxStoredPermits >= 10.0). At that point, a request of 77 * acquire(3) arrives. We serve this request out of storedPermits, and reduce that to 7.0 (how 78 * this is translated to throttling time is discussed later). Immediately after, assume that an 79 * acquire(10) request arriving. We serve the request partly from storedPermits, using all the 80 * remaining 7.0 permits, and the remaining 3.0, we serve them by fresh permits produced by the 81 * rate limiter. 82 * 83 * We already know how much time it takes to serve 3 fresh permits: if the rate is 84 * "1 token per second", then this will take 3 seconds. But what does it mean to serve 7 stored 85 * permits? As explained above, there is no unique answer. If we are primarily interested to deal 86 * with underutilization, then we want stored permits to be given out /faster/ than fresh ones, 87 * because underutilization = free resources for the taking. If we are primarily interested to 88 * deal with overflow, then stored permits could be given out /slower/ than fresh ones. Thus, we 89 * require a (different in each case) function that translates storedPermits to throttling time. 90 * 91 * This role is played by storedPermitsToWaitTime(double storedPermits, double permitsToTake). The 92 * underlying model is a continuous function mapping storedPermits (from 0.0 to maxStoredPermits) 93 * onto the 1/rate (i.e. intervals) that is effective at the given storedPermits. "storedPermits" 94 * essentially measure unused time; we spend unused time buying/storing permits. Rate is 95 * "permits / time", thus "1 / rate = time / permits". Thus, "1/rate" (time / permits) times 96 * "permits" gives time, i.e., integrals on this function (which is what storedPermitsToWaitTime() 97 * computes) correspond to minimum intervals between subsequent requests, for the specified number 98 * of requested permits. 99 * 100 * Here is an example of storedPermitsToWaitTime: If storedPermits == 10.0, and we want 3 permits, 101 * we take them from storedPermits, reducing them to 7.0, and compute the throttling for these as 102 * a call to storedPermitsToWaitTime(storedPermits = 10.0, permitsToTake = 3.0), which will 103 * evaluate the integral of the function from 7.0 to 10.0. 104 * 105 * Using integrals guarantees that the effect of a single acquire(3) is equivalent to { 106 * acquire(1); acquire(1); acquire(1); }, or { acquire(2); acquire(1); }, etc, since the integral 107 * of the function in [7.0, 10.0] is equivalent to the sum of the integrals of [7.0, 8.0], [8.0, 108 * 9.0], [9.0, 10.0] (and so on), no matter what the function is. This guarantees that we handle 109 * correctly requests of varying weight (permits), /no matter/ what the actual function is - so we 110 * can tweak the latter freely. (The only requirement, obviously, is that we can compute its 111 * integrals). 112 * 113 * Note well that if, for this function, we chose a horizontal line, at height of exactly (1/QPS), 114 * then the effect of the function is non-existent: we serve storedPermits at exactly the same 115 * cost as fresh ones (1/QPS is the cost for each). We use this trick later. 116 * 117 * If we pick a function that goes /below/ that horizontal line, it means that we reduce the area 118 * of the function, thus time. Thus, the RateLimiter becomes /faster/ after a period of 119 * underutilization. If, on the other hand, we pick a function that goes /above/ that horizontal 120 * line, then it means that the area (time) is increased, thus storedPermits are more costly than 121 * fresh permits, thus the RateLimiter becomes /slower/ after a period of underutilization. 122 * 123 * Last, but not least: consider a RateLimiter with rate of 1 permit per second, currently 124 * completely unused, and an expensive acquire(100) request comes. It would be nonsensical to just 125 * wait for 100 seconds, and /then/ start the actual task. Why wait without doing anything? A much 126 * better approach is to /allow/ the request right away (as if it was an acquire(1) request 127 * instead), and postpone /subsequent/ requests as needed. In this version, we allow starting the 128 * task immediately, and postpone by 100 seconds future requests, thus we allow for work to get 129 * done in the meantime instead of waiting idly. 130 * 131 * This has important consequences: it means that the RateLimiter doesn't remember the time of the 132 * _last_ request, but it remembers the (expected) time of the _next_ request. This also enables 133 * us to tell immediately (see tryAcquire(timeout)) whether a particular timeout is enough to get 134 * us to the point of the next scheduling time, since we always maintain that. And what we mean by 135 * "an unused RateLimiter" is also defined by that notion: when we observe that the 136 * "expected arrival time of the next request" is actually in the past, then the difference (now - 137 * past) is the amount of time that the RateLimiter was formally unused, and it is that amount of 138 * time which we translate to storedPermits. (We increase storedPermits with the amount of permits 139 * that would have been produced in that idle time). So, if rate == 1 permit per second, and 140 * arrivals come exactly one second after the previous, then storedPermits is _never_ increased -- 141 * we would only increase it for arrivals _later_ than the expected one second. 142 */ 143 144 /** 145 * This implements the following function where coldInterval = coldFactor * stableInterval. 146 * 147 * <pre> 148 * ^ throttling 149 * | 150 * cold + / 151 * interval | /. 152 * | / . 153 * | / . ← "warmup period" is the area of the trapezoid between 154 * | / . thresholdPermits and maxPermits 155 * | / . 156 * | / . 157 * | / . 158 * stable +----------/ WARM . 159 * interval | . UP . 160 * | . PERIOD. 161 * | . . 162 * 0 +----------+-------+--------------→ storedPermits 163 * 0 thresholdPermits maxPermits 164 * </pre> 165 * 166 * Before going into the details of this particular function, let's keep in mind the basics: 167 * 168 * <ol> 169 * <li>The state of the RateLimiter (storedPermits) is a vertical line in this figure. 170 * <li>When the RateLimiter is not used, this goes right (up to maxPermits) 171 * <li>When the RateLimiter is used, this goes left (down to zero), since if we have 172 * storedPermits, we serve from those first 173 * <li>When _unused_, we go right at a constant rate! The rate at which we move to the right is 174 * chosen as maxPermits / warmupPeriod. This ensures that the time it takes to go from 0 to 175 * maxPermits is equal to warmupPeriod. 176 * <li>When _used_, the time it takes, as explained in the introductory class note, is equal to 177 * the integral of our function, between X permits and X-K permits, assuming we want to 178 * spend K saved permits. 179 * </ol> 180 * 181 * <p>In summary, the time it takes to move to the left (spend K permits), is equal to the area of 182 * the function of width == K. 183 * 184 * <p>Assuming we have saturated demand, the time to go from maxPermits to thresholdPermits is 185 * equal to warmupPeriod. And the time to go from thresholdPermits to 0 is warmupPeriod/2. (The 186 * reason that this is warmupPeriod/2 is to maintain the behavior of the original implementation 187 * where coldFactor was hard coded as 3.) 188 * 189 * <p>It remains to calculate thresholdsPermits and maxPermits. 190 * 191 * <ul> 192 * <li>The time to go from thresholdPermits to 0 is equal to the integral of the function 193 * between 0 and thresholdPermits. This is thresholdPermits * stableIntervals. By (5) it is 194 * also equal to warmupPeriod/2. Therefore 195 * <blockquote> 196 * thresholdPermits = 0.5 * warmupPeriod / stableInterval 197 * </blockquote> 198 * <li>The time to go from maxPermits to thresholdPermits is equal to the integral of the 199 * function between thresholdPermits and maxPermits. This is the area of the pictured 200 * trapezoid, and it is equal to 0.5 * (stableInterval + coldInterval) * (maxPermits - 201 * thresholdPermits). It is also equal to warmupPeriod, so 202 * <blockquote> 203 * maxPermits = thresholdPermits + 2 * warmupPeriod / (stableInterval + coldInterval) 204 * </blockquote> 205 * </ul> 206 */ 207 static final class SmoothWarmingUp extends SmoothRateLimiter { 208 private final long warmupPeriodMicros; 209 /** 210 * The slope of the line from the stable interval (when permits == 0), to the cold interval 211 * (when permits == maxPermits) 212 */ 213 private double slope; 214 215 private double thresholdPermits; 216 private double coldFactor; 217 SmoothWarmingUp( SleepingStopwatch stopwatch, long warmupPeriod, TimeUnit timeUnit, double coldFactor)218 SmoothWarmingUp( 219 SleepingStopwatch stopwatch, long warmupPeriod, TimeUnit timeUnit, double coldFactor) { 220 super(stopwatch); 221 this.warmupPeriodMicros = timeUnit.toMicros(warmupPeriod); 222 this.coldFactor = coldFactor; 223 } 224 225 @Override doSetRate(double permitsPerSecond, double stableIntervalMicros)226 void doSetRate(double permitsPerSecond, double stableIntervalMicros) { 227 double oldMaxPermits = maxPermits; 228 double coldIntervalMicros = stableIntervalMicros * coldFactor; 229 thresholdPermits = 0.5 * warmupPeriodMicros / stableIntervalMicros; 230 maxPermits = 231 thresholdPermits + 2.0 * warmupPeriodMicros / (stableIntervalMicros + coldIntervalMicros); 232 slope = (coldIntervalMicros - stableIntervalMicros) / (maxPermits - thresholdPermits); 233 if (oldMaxPermits == Double.POSITIVE_INFINITY) { 234 // if we don't special-case this, we would get storedPermits == NaN, below 235 storedPermits = 0.0; 236 } else { 237 storedPermits = 238 (oldMaxPermits == 0.0) 239 ? maxPermits // initial state is cold 240 : storedPermits * maxPermits / oldMaxPermits; 241 } 242 } 243 244 @Override storedPermitsToWaitTime(double storedPermits, double permitsToTake)245 long storedPermitsToWaitTime(double storedPermits, double permitsToTake) { 246 double availablePermitsAboveThreshold = storedPermits - thresholdPermits; 247 long micros = 0; 248 // measuring the integral on the right part of the function (the climbing line) 249 if (availablePermitsAboveThreshold > 0.0) { 250 double permitsAboveThresholdToTake = min(availablePermitsAboveThreshold, permitsToTake); 251 // TODO(cpovirk): Figure out a good name for this variable. 252 double length = 253 permitsToTime(availablePermitsAboveThreshold) 254 + permitsToTime(availablePermitsAboveThreshold - permitsAboveThresholdToTake); 255 micros = (long) (permitsAboveThresholdToTake * length / 2.0); 256 permitsToTake -= permitsAboveThresholdToTake; 257 } 258 // measuring the integral on the left part of the function (the horizontal line) 259 micros += (long) (stableIntervalMicros * permitsToTake); 260 return micros; 261 } 262 permitsToTime(double permits)263 private double permitsToTime(double permits) { 264 return stableIntervalMicros + permits * slope; 265 } 266 267 @Override coolDownIntervalMicros()268 double coolDownIntervalMicros() { 269 return warmupPeriodMicros / maxPermits; 270 } 271 } 272 273 /** 274 * This implements a "bursty" RateLimiter, where storedPermits are translated to zero throttling. 275 * The maximum number of permits that can be saved (when the RateLimiter is unused) is defined in 276 * terms of time, in this sense: if a RateLimiter is 2qps, and this time is specified as 10 277 * seconds, we can save up to 2 * 10 = 20 permits. 278 */ 279 static final class SmoothBursty extends SmoothRateLimiter { 280 /** The work (permits) of how many seconds can be saved up if this RateLimiter is unused? */ 281 final double maxBurstSeconds; 282 SmoothBursty(SleepingStopwatch stopwatch, double maxBurstSeconds)283 SmoothBursty(SleepingStopwatch stopwatch, double maxBurstSeconds) { 284 super(stopwatch); 285 this.maxBurstSeconds = maxBurstSeconds; 286 } 287 288 @Override doSetRate(double permitsPerSecond, double stableIntervalMicros)289 void doSetRate(double permitsPerSecond, double stableIntervalMicros) { 290 double oldMaxPermits = this.maxPermits; 291 maxPermits = maxBurstSeconds * permitsPerSecond; 292 if (oldMaxPermits == Double.POSITIVE_INFINITY) { 293 // if we don't special-case this, we would get storedPermits == NaN, below 294 storedPermits = maxPermits; 295 } else { 296 storedPermits = 297 (oldMaxPermits == 0.0) 298 ? 0.0 // initial state 299 : storedPermits * maxPermits / oldMaxPermits; 300 } 301 } 302 303 @Override storedPermitsToWaitTime(double storedPermits, double permitsToTake)304 long storedPermitsToWaitTime(double storedPermits, double permitsToTake) { 305 return 0L; 306 } 307 308 @Override coolDownIntervalMicros()309 double coolDownIntervalMicros() { 310 return stableIntervalMicros; 311 } 312 } 313 314 /** The currently stored permits. */ 315 double storedPermits; 316 317 /** The maximum number of stored permits. */ 318 double maxPermits; 319 320 /** 321 * The interval between two unit requests, at our stable rate. E.g., a stable rate of 5 permits 322 * per second has a stable interval of 200ms. 323 */ 324 double stableIntervalMicros; 325 326 /** 327 * The time when the next request (no matter its size) will be granted. After granting a request, 328 * this is pushed further in the future. Large requests push this further than small requests. 329 */ 330 private long nextFreeTicketMicros = 0L; // could be either in the past or future 331 SmoothRateLimiter(SleepingStopwatch stopwatch)332 private SmoothRateLimiter(SleepingStopwatch stopwatch) { 333 super(stopwatch); 334 } 335 336 @Override doSetRate(double permitsPerSecond, long nowMicros)337 final void doSetRate(double permitsPerSecond, long nowMicros) { 338 resync(nowMicros); 339 double stableIntervalMicros = SECONDS.toMicros(1L) / permitsPerSecond; 340 this.stableIntervalMicros = stableIntervalMicros; 341 doSetRate(permitsPerSecond, stableIntervalMicros); 342 } 343 doSetRate(double permitsPerSecond, double stableIntervalMicros)344 abstract void doSetRate(double permitsPerSecond, double stableIntervalMicros); 345 346 @Override doGetRate()347 final double doGetRate() { 348 return SECONDS.toMicros(1L) / stableIntervalMicros; 349 } 350 351 @Override queryEarliestAvailable(long nowMicros)352 final long queryEarliestAvailable(long nowMicros) { 353 return nextFreeTicketMicros; 354 } 355 356 @Override reserveEarliestAvailable(int requiredPermits, long nowMicros)357 final long reserveEarliestAvailable(int requiredPermits, long nowMicros) { 358 resync(nowMicros); 359 long returnValue = nextFreeTicketMicros; 360 double storedPermitsToSpend = min(requiredPermits, this.storedPermits); 361 double freshPermits = requiredPermits - storedPermitsToSpend; 362 long waitMicros = 363 storedPermitsToWaitTime(this.storedPermits, storedPermitsToSpend) 364 + (long) (freshPermits * stableIntervalMicros); 365 366 this.nextFreeTicketMicros = LongMath.saturatedAdd(nextFreeTicketMicros, waitMicros); 367 this.storedPermits -= storedPermitsToSpend; 368 return returnValue; 369 } 370 371 /** 372 * Translates a specified portion of our currently stored permits which we want to spend/acquire, 373 * into a throttling time. Conceptually, this evaluates the integral of the underlying function we 374 * use, for the range of [(storedPermits - permitsToTake), storedPermits]. 375 * 376 * <p>This always holds: {@code 0 <= permitsToTake <= storedPermits} 377 */ storedPermitsToWaitTime(double storedPermits, double permitsToTake)378 abstract long storedPermitsToWaitTime(double storedPermits, double permitsToTake); 379 380 /** 381 * Returns the number of microseconds during cool down that we have to wait to get a new permit. 382 */ coolDownIntervalMicros()383 abstract double coolDownIntervalMicros(); 384 385 /** Updates {@code storedPermits} and {@code nextFreeTicketMicros} based on the current time. */ resync(long nowMicros)386 void resync(long nowMicros) { 387 // if nextFreeTicket is in the past, resync to now 388 if (nowMicros > nextFreeTicketMicros) { 389 double newPermits = (nowMicros - nextFreeTicketMicros) / coolDownIntervalMicros(); 390 storedPermits = min(maxPermits, storedPermits + newPermits); 391 nextFreeTicketMicros = nowMicros; 392 } 393 } 394 } 395