1
0
Files
Greg Kroah-Hartman 9a966fb881 Merge 4.9.231 into android-4.9-q
Changes in 4.9.231
	KVM: s390: reduce number of IO pins to 1
	gpu: host1x: Detach driver on unregister
	spi: spidev: fix a race between spidev_release and spidev_remove
	spi: spidev: fix a potential use-after-free in spidev_release()
	s390/kasan: fix early pgm check handler execution
	cifs: update ctime and mtime during truncate
	ARM: imx6: add missing put_device() call in imx6q_suspend_init()
	scsi: mptscsih: Fix read sense data size
	net: cxgb4: fix return error value in t4_prep_fw
	smsc95xx: check return value of smsc95xx_reset
	smsc95xx: avoid memory leak in smsc95xx_bind
	ALSA: compress: fix partial_drain completion state
	arm64: kgdb: Fix single-step exception handling oops
	bnxt_en: fix NULL dereference in case SR-IOV configuration fails
	net: macb: mark device wake capable when "magic-packet" property present
	ALSA: opl3: fix infoleak in opl3
	ALSA: hda - let hs_mic be picked ahead of hp_mic
	ALSA: usb-audio: add quirk for MacroSilicon MS2109
	KVM: arm64: Fix definition of PAGE_HYP_DEVICE
	KVM: x86: bit 8 of non-leaf PDPEs is not reserved
	Revert "ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb"
	btrfs: fix fatal extent_buffer readahead vs releasepage race
	drm/radeon: fix double free
	ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE
	ARC: elf: use right ELF_ARCH
	s390/mm: fix huge pte soft dirty copying
	ipv4: fill fl4_icmp_{type,code} in ping_v4_sendmsg
	l2tp: remove skb_dst_set() from l2tp_xmit_skb()
	llc: make sure applications use ARPHRD_ETHER
	net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb
	net: usb: qmi_wwan: add support for Quectel EG95 LTE modem
	tcp: md5: add missing memory barriers in tcp_md5_do_add()/tcp_md5_hash_key()
	tcp: md5: refine tcp_md5_do_add()/tcp_md5_hash_key() barriers
	genetlink: remove genl_bind
	tcp: make sure listeners don't initialize congestion-control state
	tcp: md5: do not send silly options in SYNCOOKIES
	tcp: md5: allow changing MD5 keys in all socket states
	cgroup: fix cgroup_sk_alloc() for sk_clone_lock()
	cgroup: Fix sock_cgroup_data on big-endian.
	i2c: eg20t: Load module automatically if ID matches
	iio:magnetometer:ak8974: Fix alignment and data leak issues
	iio: magnetometer: ak8974: Fix runtime PM imbalance on error
	iio: mma8452: Add missed iio_device_unregister() call in mma8452_probe()
	iio: pressure: zpa2326: handle pm_runtime_get_sync failure
	iio:pressure:ms5611 Fix buffer element alignment
	iio:health:afe4403 Fix timestamp alignment and prevent data leak.
	spi: fix initial SPI_SR value in spi-fsl-dspi
	net: dsa: bcm_sf2: Fix node reference count
	Revert "usb/ehci-platform: Set PM runtime as active on resume"
	Revert "usb/xhci-plat: Set PM runtime as active on resume"
	Revert "usb/ohci-platform: Fix a warning when hibernating"
	iio:health:afe4404 Fix timestamp alignment and prevent data leak.
	spi: spi-sun6i: sun6i_spi_transfer_one(): fix setting of clock rate
	usb: gadget: udc: atmel: fix uninitialized read in debug printk
	staging: comedi: verify array index is correct before using it
	Revert "thermal: mediatek: fix register index error"
	ARM: dts: socfpga: Align L2 cache-controller nodename with dtschema
	perf stat: Zero all the 'ena' and 'run' array slot stats for interval mode
	mtd: rawnand: brcmnand: fix CS0 layout
	HID: magicmouse: do not set up autorepeat
	usb: core: Add a helper function to check the validity of EP type in URB
	ALSA: line6: Perform sanity check for each URB creation
	ALSA: usb-audio: Fix race against the error recovery URB submission
	USB: c67x00: fix use after free in c67x00_giveback_urb
	usb: dwc2: Fix shutdown callback in platform
	usb: chipidea: core: add wakeup support for extcon
	usb: gadget: function: fix missing spinlock in f_uac1_legacy
	USB: serial: iuu_phoenix: fix memory corruption
	USB: serial: cypress_m8: enable Simply Automated UPB PIM
	USB: serial: ch341: add new Product ID for CH340
	USB: serial: option: add GosunCn GM500 series
	USB: serial: option: add Quectel EG95 LTE modem
	virtio: virtio_console: add missing MODULE_DEVICE_TABLE() for rproc serial
	fuse: Fix parameter for FS_IOC_{GET,SET}FLAGS
	mei: bus: don't clean driver pointer
	Input: i8042 - add Lenovo XiaoXin Air 12 to i8042 nomux list
	uio_pdrv_genirq: fix use without device tree and no interrupt
	timer: Fix wheel index calculation on last level
	MIPS: Fix build for LTS kernel caused by backporting lpj adjustment
	hwmon: (emc2103) fix unable to change fan pwm1_enable attribute
	dmaengine: fsl-edma: Fix NULL pointer exception in fsl_edma_tx_handler
	misc: atmel-ssc: lock with mutex instead of spinlock
	arm64: ptrace: Override SPSR.SS when single-stepping is enabled
	sched/fair: handle case of task_h_load() returning 0
	irqchip/gic: Atomically update affinity
	x86/cpu: Move x86_cache_bits settings
	Linux 4.9.231

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I57b0f3c4ffcdc49231d1c48f3f25fae424daeeb9
2020-07-25 00:36:12 +03:00

449 lines
11 KiB
C

/*
* Pluggable TCP congestion control support and newReno
* congestion control.
* Based on ideas from I/O scheduler support and Web100.
*
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
#define pr_fmt(fmt) "TCP: " fmt
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <net/tcp.h>
static DEFINE_SPINLOCK(tcp_cong_list_lock);
static LIST_HEAD(tcp_cong_list);
/* Simple linear search, don't expect many entries! */
static struct tcp_congestion_ops *tcp_ca_find(const char *name)
{
struct tcp_congestion_ops *e;
list_for_each_entry_rcu(e, &tcp_cong_list, list) {
if (strcmp(e->name, name) == 0)
return e;
}
return NULL;
}
/* Must be called with rcu lock held */
static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
{
const struct tcp_congestion_ops *ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES
if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
ca = tcp_ca_find(name);
}
#endif
return ca;
}
/* Simple linear search, not much in here. */
struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
{
struct tcp_congestion_ops *e;
list_for_each_entry_rcu(e, &tcp_cong_list, list) {
if (e->key == key)
return e;
}
return NULL;
}
/*
* Attach new congestion control algorithm to the list
* of available options.
*/
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
spin_lock(&tcp_cong_list_lock);
if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
pr_notice("%s already registered or non-unique key\n",
ca->name);
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
pr_debug("%s registered\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
/*
* Remove congestion control algorithm, called from
* the module's remove function. Module ref counts are used
* to ensure that this can't be done till all sockets using
* that method are closed.
*/
void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
{
spin_lock(&tcp_cong_list_lock);
list_del_rcu(&ca->list);
spin_unlock(&tcp_cong_list_lock);
/* Wait for outstanding readers to complete before the
* module gets removed entirely.
*
* A try_module_get() should fail by now as our module is
* in "going" state since no refs are held anymore and
* module_exit() handler being called.
*/
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
{
const struct tcp_congestion_ops *ca;
u32 key = TCP_CA_UNSPEC;
might_sleep();
rcu_read_lock();
ca = __tcp_ca_find_autoload(name);
if (ca) {
key = ca->key;
*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
}
rcu_read_unlock();
return key;
}
EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
char *tcp_ca_get_name_by_key(u32 key, char *buffer)
{
const struct tcp_congestion_ops *ca;
char *ret = NULL;
rcu_read_lock();
ca = tcp_ca_find_key(key);
if (ca)
ret = strncpy(buffer, ca->name,
TCP_CA_NAME_MAX);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
/* Assign choice of congestion control. */
void tcp_assign_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_congestion_ops *ca;
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
if (likely(try_module_get(ca->owner))) {
icsk->icsk_ca_ops = ca;
goto out;
}
/* Fallback to next available. The last really
* guaranteed fallback is Reno from this list.
*/
}
out:
rcu_read_unlock();
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (ca->flags & TCP_CONG_NEEDS_ECN)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
}
void tcp_init_congestion_control(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
}
static void tcp_reinit_congestion_control(struct sock *sk,
const struct tcp_congestion_ops *ca)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_cleanup_congestion_control(sk);
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
tcp_init_congestion_control(sk);
}
/* Manage refcounts on socket close. */
void tcp_cleanup_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
module_put(icsk->icsk_ca_ops->owner);
}
/* Used by sysctl to change default congestion control */
int tcp_set_default_congestion_control(const char *name)
{
struct tcp_congestion_ops *ca;
int ret = -ENOENT;
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES
if (!ca && capable(CAP_NET_ADMIN)) {
spin_unlock(&tcp_cong_list_lock);
request_module("tcp_%s", name);
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
}
#endif
if (ca) {
ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */
list_move(&ca->list, &tcp_cong_list);
ret = 0;
}
spin_unlock(&tcp_cong_list_lock);
return ret;
}
/* Set default value from kernel configuration at bootup */
static int __init tcp_congestion_default(void)
{
return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
}
late_initcall(tcp_congestion_default);
/* Build string with list of available congestion control values */
void tcp_get_available_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
}
rcu_read_unlock();
}
/* Get current default congestion control */
void tcp_get_default_congestion_control(char *name)
{
struct tcp_congestion_ops *ca;
/* We will always have reno... */
BUG_ON(list_empty(&tcp_cong_list));
rcu_read_lock();
ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
strncpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
/* Built list of non-restricted congestion control values */
void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
*buf = '\0';
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
continue;
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
}
rcu_read_unlock();
}
/* Change list of non-restricted congestion control */
int tcp_set_allowed_congestion_control(char *val)
{
struct tcp_congestion_ops *ca;
char *saved_clone, *clone, *name;
int ret = 0;
saved_clone = clone = kstrdup(val, GFP_USER);
if (!clone)
return -ENOMEM;
spin_lock(&tcp_cong_list_lock);
/* pass 1 check for bad entries */
while ((name = strsep(&clone, " ")) && *name) {
ca = tcp_ca_find(name);
if (!ca) {
ret = -ENOENT;
goto out;
}
}
/* pass 2 clear old values */
list_for_each_entry_rcu(ca, &tcp_cong_list, list)
ca->flags &= ~TCP_CONG_NON_RESTRICTED;
/* pass 3 mark as allowed */
while ((name = strsep(&val, " ")) && *name) {
ca = tcp_ca_find(name);
WARN_ON(!ca);
if (ca)
ca->flags |= TCP_CONG_NON_RESTRICTED;
}
out:
spin_unlock(&tcp_cong_list_lock);
kfree(saved_clone);
return ret;
}
/* Change congestion control for socket */
int tcp_set_congestion_control(struct sock *sk, const char *name)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
int err = 0;
if (icsk->icsk_ca_dst_locked)
return -EPERM;
rcu_read_lock();
ca = __tcp_ca_find_autoload(name);
/* No change asking for existing value */
if (ca == icsk->icsk_ca_ops) {
icsk->icsk_ca_setsockopt = 1;
goto out;
}
if (!ca)
err = -ENOENT;
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
err = -EPERM;
else if (!try_module_get(ca->owner))
err = -EBUSY;
else
tcp_reinit_congestion_control(sk, ca);
out:
rcu_read_unlock();
return err;
}
/* Slow start is used when congestion window is no greater than the slow start
* threshold. We base on RFC2581 and also handle stretch ACKs properly.
* We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
* something better;) a packet is only considered (s)acked in its entirety to
* defend the ACK attacks described in the RFC. Slow start processes a stretch
* ACK of degree N as if N acks of degree 1 are received back to back except
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
return acked;
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
* for every packet that was ACKed.
*/
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
{
/* If credits accumulated at a higher w, apply them gently now. */
if (tp->snd_cwnd_cnt >= w) {
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd++;
}
tp->snd_cwnd_cnt += acked;
if (tp->snd_cwnd_cnt >= w) {
u32 delta = tp->snd_cwnd_cnt / w;
tp->snd_cwnd_cnt -= delta * w;
tp->snd_cwnd += delta;
}
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
}
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
/*
* TCP Reno congestion control
* This is special case used for fallback as well.
*/
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk))
return;
/* In "safe" area, increase. */
if (tcp_in_slow_start(tp)) {
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
}
/* In dangerous area, increase slowly. */
tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return max(tp->snd_cwnd >> 1U, 2U);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
};