1
0
Files
Greg Kroah-Hartman cf697f6deb Merge 4.9.163 into android-4.9
Changes in 4.9.163
	USB: serial: option: add Telit ME910 ECM composition
	USB: serial: cp210x: add ID for Ingenico 3070
	USB: serial: ftdi_sio: add ID for Hjelmslund Electronics USB485
	cpufreq: Use struct kobj_attribute instead of struct global_attr
	ncpfs: fix build warning of strncpy
	isdn: isdn_tty: fix build warning of strncpy
	staging: comedi: ni_660x: fix missing break in switch statement
	staging: wilc1000: fix to set correct value for 'vif_num'
	staging: android: ion: fix sys heap pool's gfp_flags
	ip6mr: Do not call __IP6_INC_STATS() from preemptible context
	net-sysfs: Fix mem leak in netdev_register_kobject
	sky2: Disable MSI on Dell Inspiron 1545 and Gateway P-79
	team: Free BPF filter when unregistering netdev
	bnxt_en: Drop oversize TX packets to prevent errors.
	hv_netvsc: Fix IP header checksum for coalesced packets
	net: dsa: mv88e6xxx: Fix u64 statistics
	netlabel: fix out-of-bounds memory accesses
	net: netem: fix skb length BUG_ON in __skb_to_sgvec
	net: nfc: Fix NULL dereference on nfc_llcp_build_tlv fails
	net: sit: fix memory leak in sit_init_net()
	xen-netback: don't populate the hash cache on XenBus disconnect
	xen-netback: fix occasional leak of grant ref mappings under memory pressure
	net: Add __icmp_send helper.
	net: avoid use IPCB in cipso_v4_error
	tun: fix blocking read
	tun: remove unnecessary memory barrier
	net: phy: Micrel KSZ8061: link failure after cable connect
	x86/CPU/AMD: Set the CPB bit unconditionally on F17h
	applicom: Fix potential Spectre v1 vulnerabilities
	MIPS: irq: Allocate accurate order pages for irq stack
	hugetlbfs: fix races and page leaks during migration
	exec: Fix mem leak in kernel_read_file
	media: uvcvideo: Fix 'type' check leading to overflow
	vti4: Fix a ipip packet processing bug in 'IPCOMP' virtual tunnel
	perf core: Fix perf_proc_update_handler() bug
	perf tools: Handle TOPOLOGY headers with no CPU
	IB/{hfi1, qib}: Fix WC.byte_len calculation for UD_SEND_WITH_IMM
	iommu/amd: Call free_iova_fast with pfn in map_sg
	iommu/amd: Unmap all mapped pages in error path of map_sg
	ipvs: Fix signed integer overflow when setsockopt timeout
	iommu/amd: Fix IOMMU page flush when detach device from a domain
	xtensa: SMP: fix ccount_timer_shutdown
	xtensa: SMP: fix secondary CPU initialization
	xtensa: smp_lx200_defconfig: fix vectors clash
	xtensa: SMP: mark each possible CPU as present
	xtensa: SMP: limit number of possible CPUs by NR_CPUS
	net: altera_tse: fix msgdma_tx_completion on non-zero fill_level case
	net: hns: Fix for missing of_node_put() after of_parse_phandle()
	net: hns: Fix wrong read accesses via Clause 45 MDIO protocol
	net: stmmac: dwmac-rk: fix error handling in rk_gmac_powerup()
	gpio: vf610: Mask all GPIO interrupts
	nfs: Fix NULL pointer dereference of dev_name
	qed: Fix VF probe failure while FLR
	scsi: libfc: free skb when receiving invalid flogi resp
	platform/x86: Fix unmet dependency warning for SAMSUNG_Q10
	cifs: fix computation for MAX_SMB2_HDR_SIZE
	arm64: kprobe: Always blacklist the KVM world-switch code
	x86/kexec: Don't setup EFI info if EFI runtime is not enabled
	x86_64: increase stack size for KASAN_EXTRA
	mm, memory_hotplug: is_mem_section_removable do not pass the end of a zone
	mm, memory_hotplug: test_pages_in_a_zone do not pass the end of zone
	fs/drop_caches.c: avoid softlockups in drop_pagecache_sb()
	autofs: drop dentry reference only when it is never used
	autofs: fix error return in autofs_fill_super()
	soc: fsl: qbman: avoid race in clearing QMan interrupt
	ARM: pxa: ssp: unneeded to free devm_ allocated data
	arm64: dts: add msm8996 compatible to gicv3
	usb: phy: fix link errors
	irqchip/mmp: Only touch the PJ4 IRQ & FIQ bits on enable/disable
	drm/sun4i: tcon: Prepare and enable TCON channel 0 clock at init
	dmaengine: at_xdmac: Fix wrongfull report of a channel as in use
	vsock/virtio: fix kernel panic after device hot-unplug
	vsock/virtio: reset connected sockets on device removal
	dmaengine: dmatest: Abort test in case of mapping error
	selftests: netfilter: fix config fragment CONFIG_NF_TABLES_INET
	selftests: netfilter: add simple masq/redirect test cases
	netfilter: nf_nat: skip nat clash resolution for same-origin entries
	s390/qeth: fix use-after-free in error path
	perf symbols: Filter out hidden symbols from labels
	MIPS: Remove function size check in get_frame_info()
	fs: ratelimit __find_get_block_slow() failure message.
	Input: wacom_serial4 - add support for Wacom ArtPad II tablet
	Input: elan_i2c - add id for touchpad found in Lenovo s21e-20
	iscsi_ibft: Fix missing break in switch statement
	scsi: aacraid: Fix missing break in switch statement
	futex,rt_mutex: Restructure rt_mutex_finish_proxy_lock()
	ARM: dts: exynos: Fix pinctrl definition for eMMC RTSN line on Odroid X2/U3
	ARM: dts: exynos: Add minimal clkout parameters to Exynos3250 PMU
	drm: disable uncached DMA optimization for ARM and arm64
	ARM: 8781/1: Fix Thumb-2 syscall return for binutils 2.29+
	ARM: dts: exynos: Do not ignore real-world fuse values for thermal zone 0 on Exynos5420
	perf/x86/intel: Make cpuc allocations consistent
	perf/x86/intel: Generalize dynamic constraint creation
	x86: Add TSX Force Abort CPUID/MSR
	Linux 4.9.163

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2019-03-14 20:02:42 +07:00

448 lines
11 KiB
C

/*
* Copyright (c) 2016 Citrix Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Softare Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define XEN_NETIF_DEFINE_TOEPLITZ
#include "common.h"
#include <linux/vmalloc.h>
#include <linux/rculist.h>
static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
unsigned int len, u32 val)
{
struct xenvif_hash_cache_entry *new, *entry, *oldest;
unsigned long flags;
bool found;
new = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!new)
return;
memcpy(new->tag, tag, len);
new->len = len;
new->val = val;
spin_lock_irqsave(&vif->hash.cache.lock, flags);
found = false;
oldest = NULL;
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
/* Make sure we don't add duplicate entries */
if (entry->len == len &&
memcmp(entry->tag, tag, len) == 0)
found = true;
if (!oldest || entry->seq < oldest->seq)
oldest = entry;
}
if (!found) {
new->seq = atomic_inc_return(&vif->hash.cache.seq);
list_add_rcu(&new->link, &vif->hash.cache.list);
if (++vif->hash.cache.count > xenvif_hash_cache_size) {
list_del_rcu(&oldest->link);
vif->hash.cache.count--;
kfree_rcu(oldest, rcu);
}
}
spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
if (found)
kfree(new);
}
static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
unsigned int len)
{
u32 val;
val = xen_netif_toeplitz_hash(vif->hash.key,
sizeof(vif->hash.key),
data, len);
if (xenvif_hash_cache_size != 0)
xenvif_add_hash(vif, data, len, val);
return val;
}
static void xenvif_flush_hash(struct xenvif *vif)
{
struct xenvif_hash_cache_entry *entry;
unsigned long flags;
if (xenvif_hash_cache_size == 0)
return;
spin_lock_irqsave(&vif->hash.cache.lock, flags);
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
list_del_rcu(&entry->link);
vif->hash.cache.count--;
kfree_rcu(entry, rcu);
}
spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
}
static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
unsigned int len)
{
struct xenvif_hash_cache_entry *entry;
u32 val;
bool found;
if (len >= XEN_NETBK_HASH_TAG_SIZE)
return 0;
if (xenvif_hash_cache_size == 0)
return xenvif_new_hash(vif, data, len);
rcu_read_lock();
found = false;
list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
if (entry->len == len &&
memcmp(entry->tag, data, len) == 0) {
val = entry->val;
entry->seq = atomic_inc_return(&vif->hash.cache.seq);
found = true;
break;
}
}
rcu_read_unlock();
if (!found)
val = xenvif_new_hash(vif, data, len);
return val;
}
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
{
struct flow_keys flow;
u32 hash = 0;
enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
u32 flags = vif->hash.flags;
bool has_tcp_hdr;
/* Quick rejection test: If the network protocol doesn't
* correspond to any enabled hash type then there's no point
* in parsing the packet header.
*/
switch (skb->protocol) {
case htons(ETH_P_IP):
if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV4))
break;
goto done;
case htons(ETH_P_IPV6):
if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6))
break;
goto done;
default:
goto done;
}
memset(&flow, 0, sizeof(flow));
if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
goto done;
has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
!(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
switch (skb->protocol) {
case htons(ETH_P_IP):
if (has_tcp_hdr &&
(flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
u8 data[12];
memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
memcpy(&data[8], &flow.ports.src, 2);
memcpy(&data[10], &flow.ports.dst, 2);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L4;
} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
u8 data[8];
memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L3;
}
break;
case htons(ETH_P_IPV6):
if (has_tcp_hdr &&
(flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
u8 data[36];
memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
memcpy(&data[32], &flow.ports.src, 2);
memcpy(&data[34], &flow.ports.dst, 2);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L4;
} else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
u8 data[32];
memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
hash = xenvif_find_hash(vif, data, sizeof(data));
type = PKT_HASH_TYPE_L3;
}
break;
}
done:
if (type == PKT_HASH_TYPE_NONE)
skb_clear_hash(skb);
else
__skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
}
u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
{
switch (alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
break;
default:
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
vif->hash.alg = alg;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
{
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
*flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
{
if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.flags = flags;
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
{
u8 *key = vif->hash.key;
struct gnttab_copy copy_op = {
.source.u.ref = gref,
.source.domid = vif->domid,
.dest.u.gmfn = virt_to_gfn(key),
.dest.domid = DOMID_SELF,
.dest.offset = xen_offset_in_page(key),
.len = len,
.flags = GNTCOPY_source_gref
};
if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
if (copy_op.len != 0) {
gnttab_batch_copy(&copy_op, 1);
if (copy_op.status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
/* Clear any remaining key octets */
if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
xenvif_flush_hash(vif);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
{
if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
memset(vif->hash.mapping, 0, sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
u32 *mapping = vif->hash.mapping;
struct gnttab_copy copy_op = {
.source.u.ref = gref,
.source.domid = vif->domid,
.dest.domid = DOMID_SELF,
.len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
};
if ((off + len < off) || (off + len > vif->hash.size) ||
len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
copy_op.dest.offset = xen_offset_in_page(mapping + off);
while (len-- != 0)
if (mapping[off++] >= vif->num_queues)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
if (copy_op.len != 0) {
gnttab_batch_copy(&copy_op, 1);
if (copy_op.status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
{
unsigned int i;
switch (vif->hash.alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
break;
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
/* FALLTHRU */
default:
return;
}
if (vif->hash.flags) {
seq_puts(m, "\nHash Flags:\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
seq_puts(m, "- IPv4\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
seq_puts(m, "- IPv4 + TCP\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
seq_puts(m, "- IPv6\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
seq_puts(m, "- IPv6 + TCP\n");
}
seq_puts(m, "\nHash Key:\n");
for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
unsigned int j, n;
n = 8;
if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%02x ", vif->hash.key[i]);
seq_puts(m, "\n");
}
if (vif->hash.size != 0) {
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
unsigned int j, n;
n = 8;
if (i + n >= vif->hash.size)
n = vif->hash.size - i;
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%4u ", vif->hash.mapping[i]);
seq_puts(m, "\n");
}
}
}
#endif /* CONFIG_DEBUG_FS */
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)
return;
BUG_ON(vif->hash.cache.count);
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
void xenvif_deinit_hash(struct xenvif *vif)
{
xenvif_flush_hash(vif);
}