Congestion avoidance and control. Jacobson, V. ACM SIGCOMM Computer Communication Review, 18(4):314–329, 1988. arXiv: 1011.1669v3 Publisher: ACM Press Place: New York, New York, USA ISBN: 0897912799Paper doi abstract bibtex In October of '86, the Internet had the first of what became a series of 'congestion collapses'. During this period, the data throughput from LBL to UC Berkeley (sites separated by 400 yards and three IMP hops) dropped from 32 Kbps to 40 bps. Mike Karels1 and I were fascinated by this sudden factor-of-thousand drop in bandwidth and embarked on an investigation of why things had gotten so bad. We wondered, in particular, if the 4.3BSD (Berkeley UNIX) TCP was mis-behaving or if it could be tuned to work better under abysmal network conditions. The answer to both of these questions was yes. Since that time, we have put seven new algorithms into the 4BSD TCP: round-trip-time variance estimation exponential retransmit timer backoff slow-start more aggressive receiver ack policy dynamic window sizing on congestion Karn's clamped retransmit backoff fast retransmit Our measurements and the reports of beta testers suggest that the final product is fairly good at dealing with congested conditions on the Internet. This paper is a brief description of (i) - (v) and the rationale behind them. (vi) is an algorithm recently developed by Phil Karn of Bell Communications Research, described in KP87. (viii) is described in a soon-to-be-published RFC. Algorithms (i) - (v) spring from one observation: The flow on a TCP connection (or ISO TP-4 or Xerox NS SPP connection) should obey a 'conservation of packets' principle. And, if this principle were obeyed, congestion collapse would become the exception rather than the rule. Thus congestion control involves finding places that violate conservation and fixing them. By 'conservation of packets' I mean that for a connection 'in equilibrium', i.e., running stably with a full window of data in transit, the packet flow is what a physicist would call 'conservative': A new packet isn't put into the network until an old packet leaves. The physics of flow predicts that systems with this property should be robust in the face of congestion. Observation of the Internet suggests that it was not particularly robust. Why the discrepancy? There are only three ways for packet conservation to fail: The connection doesn't get to equilibrium, or A sender injects a new packet before an old packet has exited, or The equilibrium can't be reached because of resource limits along the path. In the following sections, we treat each of these in turn.
@article{jacobson_congestion_1988,
title = {Congestion avoidance and control},
volume = {18},
issn = {01464833},
url = {http://portal.acm.org/citation.cfm?doid=52324.52356},
doi = {10/dvz5hs},
abstract = {In October of '86, the Internet had the first of what became a series of 'congestion collapses'. During this period, the data throughput from LBL to UC Berkeley (sites separated by 400 yards and three IMP hops) dropped from 32 Kbps to 40 bps. Mike Karels1 and I were fascinated by this sudden factor-of-thousand drop in bandwidth and embarked on an investigation of why things had gotten so bad. We wondered, in particular, if the 4.3BSD (Berkeley UNIX) TCP was mis-behaving or if it could be tuned to work better under abysmal network conditions. The answer to both of these questions was yes. Since that time, we have put seven new algorithms into the 4BSD TCP: round-trip-time variance estimation exponential retransmit timer backoff slow-start more aggressive receiver ack policy dynamic window sizing on congestion Karn's clamped retransmit backoff fast retransmit Our measurements and the reports of beta testers suggest that the final product is fairly good at dealing with congested conditions on the Internet. This paper is a brief description of (i) - (v) and the rationale behind them. (vi) is an algorithm recently developed by Phil Karn of Bell Communications Research, described in KP87. (viii) is described in a soon-to-be-published RFC. Algorithms (i) - (v) spring from one observation: The flow on a TCP connection (or ISO TP-4 or Xerox NS SPP connection) should obey a 'conservation of packets' principle. And, if this principle were obeyed, congestion collapse would become the exception rather than the rule. Thus congestion control involves finding places that violate conservation and fixing them. By 'conservation of packets' I mean that for a connection 'in equilibrium', i.e., running stably with a full window of data in transit, the packet flow is what a physicist would call 'conservative': A new packet isn't put into the network until an old packet leaves. The physics of flow predicts that systems with this property should be robust in the face of congestion. Observation of the Internet suggests that it was not particularly robust. Why the discrepancy? There are only three ways for packet conservation to fail: The connection doesn't get to equilibrium, or A sender injects a new packet before an old packet has exited, or The equilibrium can't be reached because of resource limits along the path. In the following sections, we treat each of these in turn.},
number = {4},
urldate = {2017-02-05},
journal = {ACM SIGCOMM Computer Communication Review},
author = {Jacobson, V.},
year = {1988},
pmid = {25246403},
note = {arXiv: 1011.1669v3
Publisher: ACM Press
Place: New York, New York, USA
ISBN: 0897912799},
keywords = {Favorites},
pages = {314--329}
}
Downloads: 0
{"_id":"xCw4TPy8v6y2TZ6eF","bibbaseid":"jacobson-congestionavoidanceandcontrol-1988","downloads":0,"creationDate":"2015-12-04T23:32:54.010Z","title":"Congestion avoidance and control","author_short":["Jacobson, V."],"year":1988,"bibtype":"article","biburl":"https://bibbase.org/zotero/k4rtik","bibdata":{"bibtype":"article","type":"article","title":"Congestion avoidance and control","volume":"18","issn":"01464833","url":"http://portal.acm.org/citation.cfm?doid=52324.52356","doi":"10/dvz5hs","abstract":"In October of '86, the Internet had the first of what became a series of 'congestion collapses'. During this period, the data throughput from LBL to UC Berkeley (sites separated by 400 yards and three IMP hops) dropped from 32 Kbps to 40 bps. Mike Karels1 and I were fascinated by this sudden factor-of-thousand drop in bandwidth and embarked on an investigation of why things had gotten so bad. We wondered, in particular, if the 4.3BSD (Berkeley UNIX) TCP was mis-behaving or if it could be tuned to work better under abysmal network conditions. The answer to both of these questions was yes. Since that time, we have put seven new algorithms into the 4BSD TCP: round-trip-time variance estimation exponential retransmit timer backoff slow-start more aggressive receiver ack policy dynamic window sizing on congestion Karn's clamped retransmit backoff fast retransmit Our measurements and the reports of beta testers suggest that the final product is fairly good at dealing with congested conditions on the Internet. This paper is a brief description of (i) - (v) and the rationale behind them. (vi) is an algorithm recently developed by Phil Karn of Bell Communications Research, described in KP87. (viii) is described in a soon-to-be-published RFC. Algorithms (i) - (v) spring from one observation: The flow on a TCP connection (or ISO TP-4 or Xerox NS SPP connection) should obey a 'conservation of packets' principle. And, if this principle were obeyed, congestion collapse would become the exception rather than the rule. Thus congestion control involves finding places that violate conservation and fixing them. By 'conservation of packets' I mean that for a connection 'in equilibrium', i.e., running stably with a full window of data in transit, the packet flow is what a physicist would call 'conservative': A new packet isn't put into the network until an old packet leaves. The physics of flow predicts that systems with this property should be robust in the face of congestion. Observation of the Internet suggests that it was not particularly robust. Why the discrepancy? There are only three ways for packet conservation to fail: The connection doesn't get to equilibrium, or A sender injects a new packet before an old packet has exited, or The equilibrium can't be reached because of resource limits along the path. In the following sections, we treat each of these in turn.","number":"4","urldate":"2017-02-05","journal":"ACM SIGCOMM Computer Communication Review","author":[{"propositions":[],"lastnames":["Jacobson"],"firstnames":["V."],"suffixes":[]}],"year":"1988","pmid":"25246403","note":"arXiv: 1011.1669v3 Publisher: ACM Press Place: New York, New York, USA ISBN: 0897912799","keywords":"Favorites","pages":"314–329","bibtex":"@article{jacobson_congestion_1988,\n\ttitle = {Congestion avoidance and control},\n\tvolume = {18},\n\tissn = {01464833},\n\turl = {http://portal.acm.org/citation.cfm?doid=52324.52356},\n\tdoi = {10/dvz5hs},\n\tabstract = {In October of '86, the Internet had the first of what became a series of 'congestion collapses'. During this period, the data throughput from LBL to UC Berkeley (sites separated by 400 yards and three IMP hops) dropped from 32 Kbps to 40 bps. Mike Karels1 and I were fascinated by this sudden factor-of-thousand drop in bandwidth and embarked on an investigation of why things had gotten so bad. We wondered, in particular, if the 4.3BSD (Berkeley UNIX) TCP was mis-behaving or if it could be tuned to work better under abysmal network conditions. The answer to both of these questions was yes. Since that time, we have put seven new algorithms into the 4BSD TCP: round-trip-time variance estimation exponential retransmit timer backoff slow-start more aggressive receiver ack policy dynamic window sizing on congestion Karn's clamped retransmit backoff fast retransmit Our measurements and the reports of beta testers suggest that the final product is fairly good at dealing with congested conditions on the Internet. This paper is a brief description of (i) - (v) and the rationale behind them. (vi) is an algorithm recently developed by Phil Karn of Bell Communications Research, described in KP87. (viii) is described in a soon-to-be-published RFC. Algorithms (i) - (v) spring from one observation: The flow on a TCP connection (or ISO TP-4 or Xerox NS SPP connection) should obey a 'conservation of packets' principle. And, if this principle were obeyed, congestion collapse would become the exception rather than the rule. Thus congestion control involves finding places that violate conservation and fixing them. By 'conservation of packets' I mean that for a connection 'in equilibrium', i.e., running stably with a full window of data in transit, the packet flow is what a physicist would call 'conservative': A new packet isn't put into the network until an old packet leaves. The physics of flow predicts that systems with this property should be robust in the face of congestion. Observation of the Internet suggests that it was not particularly robust. Why the discrepancy? There are only three ways for packet conservation to fail: The connection doesn't get to equilibrium, or A sender injects a new packet before an old packet has exited, or The equilibrium can't be reached because of resource limits along the path. In the following sections, we treat each of these in turn.},\n\tnumber = {4},\n\turldate = {2017-02-05},\n\tjournal = {ACM SIGCOMM Computer Communication Review},\n\tauthor = {Jacobson, V.},\n\tyear = {1988},\n\tpmid = {25246403},\n\tnote = {arXiv: 1011.1669v3\nPublisher: ACM Press\nPlace: New York, New York, USA\nISBN: 0897912799},\n\tkeywords = {Favorites},\n\tpages = {314--329}\n}\n\n","author_short":["Jacobson, V."],"key":"jacobson_congestion_1988","id":"jacobson_congestion_1988","bibbaseid":"jacobson-congestionavoidanceandcontrol-1988","role":"author","urls":{"Paper":"http://portal.acm.org/citation.cfm?doid=52324.52356"},"keyword":["Favorites"],"downloads":0},"search_terms":["congestion","avoidance","control","jacobson"],"keywords":["favorites"],"authorIDs":[],"dataSources":["Z5Dp3qAJiMzxtvKMq"]}