1
0
Files
kernel-49/lib/scatterlist.c
Greg Kroah-Hartman 14cea4ffd7 Merge 4.9.215 into android-4.9-q
Changes in 4.9.215
	x86/vdso: Use RDPID in preference to LSL when available
	KVM: x86: emulate RDPID
	ALSA: hda: Use scnprintf() for printing texts for sysfs/procfs
	ecryptfs: fix a memory leak bug in parse_tag_1_packet()
	ecryptfs: fix a memory leak bug in ecryptfs_init_messaging()
	ALSA: usb-audio: Apply sample rate quirk for Audioengine D1
	ext4: don't assume that mmp_nodename/bdevname have NUL
	ext4: fix checksum errors with indexed dirs
	ext4: improve explanation of a mount failure caused by a misconfigured kernel
	Btrfs: fix race between using extent maps and merging them
	btrfs: log message when rw remount is attempted with unclean tree-log
	perf/x86/amd: Add missing L2 misses event spec to AMD Family 17h's event map
	padata: Remove broken queue flushing
	s390/time: Fix clk type in get_tod_clock
	perf/x86/intel: Fix inaccurate period in context switch for auto-reload
	hwmon: (pmbus/ltc2978) Fix PMBus polling of MFR_COMMON definitions.
	jbd2: move the clearing of b_modified flag to the journal_unmap_buffer()
	jbd2: do not clear the BH_Mapped flag when forgetting a metadata buffer
	btrfs: print message when tree-log replay starts
	scsi: qla2xxx: fix a potential NULL pointer dereference
	Revert "KVM: VMX: Add non-canonical check on writes to RTIT address MSRs"
	drm/gma500: Fixup fbdev stolen size usage evaluation
	cpu/hotplug, stop_machine: Fix stop_machine vs hotplug order
	brcmfmac: Fix use after free in brcmf_sdio_readframes()
	gianfar: Fix TX timestamping with a stacked DSA driver
	pinctrl: sh-pfc: sh7264: Fix CAN function GPIOs
	pxa168fb: Fix the function used to release some memory in an error handling path
	media: i2c: mt9v032: fix enum mbus codes and frame sizes
	powerpc/powernv/iov: Ensure the pdn for VFs always contains a valid PE number
	gpio: gpio-grgpio: fix possible sleep-in-atomic-context bugs in grgpio_irq_map/unmap()
	media: sti: bdisp: fix a possible sleep-in-atomic-context bug in bdisp_device_run()
	pinctrl: baytrail: Do not clear IRQ flags on direct-irq enabled pins
	efi/x86: Map the entire EFI vendor string before copying it
	MIPS: Loongson: Fix potential NULL dereference in loongson3_platform_init()
	sparc: Add .exit.data section.
	uio: fix a sleep-in-atomic-context bug in uio_dmem_genirq_irqcontrol()
	usb: gadget: udc: fix possible sleep-in-atomic-context bugs in gr_probe()
	jbd2: clear JBD2_ABORT flag before journal_reset to update log tail info when load journal
	x86/sysfb: Fix check for bad VRAM size
	tracing: Fix tracing_stat return values in error handling paths
	tracing: Fix very unlikely race of registering two stat tracers
	ext4, jbd2: ensure panic when aborting with zero errno
	kconfig: fix broken dependency in randconfig-generated .config
	clk: qcom: rcg2: Don't crash if our parent can't be found; return an error
	drm/amdgpu: remove 4 set but not used variable in amdgpu_atombios_get_connector_info_from_object_table
	regulator: rk808: Lower log level on optional GPIOs being not available
	net/wan/fsl_ucc_hdlc: reject muram offsets above 64K
	PCI/IOV: Fix memory leak in pci_iov_add_virtfn()
	NFC: port100: Convert cpu_to_le16(le16_to_cpu(E1) + E2) to use le16_add_cpu().
	media: v4l2-device.h: Explicitly compare grp{id,mask} to zero in v4l2_device macros
	reiserfs: Fix spurious unlock in reiserfs_fill_super() error handling
	ALSA: usx2y: Adjust indentation in snd_usX2Y_hwdep_dsp_status
	b43legacy: Fix -Wcast-function-type
	ipw2x00: Fix -Wcast-function-type
	iwlegacy: Fix -Wcast-function-type
	rtlwifi: rtl_pci: Fix -Wcast-function-type
	orinoco: avoid assertion in case of NULL pointer
	ACPICA: Disassembler: create buffer fields in ACPI_PARSE_LOAD_PASS1
	scsi: aic7xxx: Adjust indentation in ahc_find_syncrate
	drm/mediatek: handle events when enabling/disabling crtc
	ARM: dts: r8a7779: Add device node for ARM global timer
	x86/vdso: Provide missing include file
	PM / devfreq: rk3399_dmc: Add COMPILE_TEST and HAVE_ARM_SMCCC dependency
	pinctrl: sh-pfc: sh7269: Fix CAN function GPIOs
	RDMA/rxe: Fix error type of mmap_offset
	ALSA: sh: Fix compile warning wrt const
	tools lib api fs: Fix gcc9 stringop-truncation compilation error
	usbip: Fix unsafe unaligned pointer usage
	udf: Fix free space reporting for metadata and virtual partitions
	soc/tegra: fuse: Correct straps' address for older Tegra124 device trees
	rcu: Use WRITE_ONCE() for assignments to ->pprev for hlist_nulls
	Input: edt-ft5x06 - work around first register access error
	wan: ixp4xx_hss: fix compile-testing on 64-bit
	ASoC: atmel: fix build error with CONFIG_SND_ATMEL_SOC_DMA=m
	tty: synclinkmp: Adjust indentation in several functions
	tty: synclink_gt: Adjust indentation in several functions
	driver core: platform: Prevent resouce overflow from causing infinite loops
	driver core: Print device when resources present in really_probe()
	vme: bridges: reduce stack usage
	drm/nouveau/gr/gk20a,gm200-: add terminators to method lists read from fw
	drm/nouveau: Fix copy-paste error in nouveau_fence_wait_uevent_handler
	drm/vmwgfx: prevent memory leak in vmw_cmdbuf_res_add
	usb: musb: omap2430: Get rid of musb .set_vbus for omap2430 glue
	iommu/arm-smmu-v3: Use WRITE_ONCE() when changing validity of an STE
	scsi: iscsi: Don't destroy session if there are outstanding connections
	arm64: fix alternatives with LLVM's integrated assembler
	pwm: omap-dmtimer: Remove PWM chip in .remove before making it unfunctional
	cmd64x: potential buffer overflow in cmd64x_program_timings()
	ide: serverworks: potential overflow in svwks_set_pio_mode()
	remoteproc: Initialize rproc_class before use
	x86/decoder: Add TEST opcode to Group3-2
	s390/ftrace: generate traced function stack frame
	driver core: platform: fix u32 greater or equal to zero comparison
	ALSA: hda - Add docking station support for Lenovo Thinkpad T420s
	powerpc/sriov: Remove VF eeh_dev state when disabling SR-IOV
	jbd2: switch to use jbd2_journal_abort() when failed to submit the commit record
	ARM: 8951/1: Fix Kexec compilation issue.
	hostap: Adjust indentation in prism2_hostapd_add_sta
	iwlegacy: ensure loop counter addr does not wrap and cause an infinite loop
	cifs: fix NULL dereference in match_prepath
	irqchip/gic-v3: Only provision redistributors that are enabled in ACPI
	drm/nouveau/disp/nv50-: prevent oops when no channel method map provided
	ftrace: fpid_next() should increase position index
	trigger_next should increase position index
	radeon: insert 10ms sleep in dce5_crtc_load_lut
	ocfs2: fix a NULL pointer dereference when call ocfs2_update_inode_fsync_trans()
	lib/scatterlist.c: adjust indentation in __sg_alloc_table
	reiserfs: prevent NULL pointer dereference in reiserfs_insert_item()
	bcache: explicity type cast in bset_bkey_last()
	irqchip/gic-v3-its: Reference to its_invall_cmd descriptor when building INVALL
	iwlwifi: mvm: Fix thermal zone registration
	microblaze: Prevent the overflow of the start
	brd: check and limit max_part par
	help_next should increase position index
	selinux: ensure we cleanup the internal AVC counters on error in avc_update()
	enic: prevent waking up stopped tx queues over watchdog reset
	net/sched: matchall: add missing validation of TCA_MATCHALL_FLAGS
	net/sched: flower: add missing validation of TCA_FLOWER_FLAGS
	floppy: check FDC index for errors before assigning it
	vt: selection, handle pending signals in paste_selection
	staging: android: ashmem: Disallow ashmem memory from being remapped
	staging: vt6656: fix sign of rx_dbm to bb_pre_ed_rssi.
	xhci: Force Maximum Packet size for Full-speed bulk devices to valid range.
	usb: uas: fix a plug & unplug racing
	USB: Fix novation SourceControl XL after suspend
	USB: hub: Don't record a connect-change event during reset-resume
	staging: rtl8188eu: Fix potential security hole
	staging: rtl8188eu: Fix potential overuse of kernel memory
	x86/mce/amd: Publish the bank pointer only after setup has succeeded
	x86/mce/amd: Fix kobject lifetime
	tty/serial: atmel: manage shutdown in case of RS485 or ISO7816 mode
	tty: serial: imx: setup the correct sg entry for tx dma
	Revert "ipc,sem: remove uneeded sem_undo_list lock usage in exit_sem()"
	xhci: apply XHCI_PME_STUCK_QUIRK to Intel Comet Lake platforms
	KVM: x86: don't notify userspace IOAPIC on edge-triggered interrupt EOI
	VT_RESIZEX: get rid of field-by-field copyin
	vt: vt_ioctl: fix race in VT_RESIZEX
	lib/stackdepot.c: fix global out-of-bounds in stack_slabs
	KVM: nVMX: Don't emulate instructions in guest mode
	netfilter: xt_bpf: add overflow checks
	ext4: fix a data race in EXT4_I(inode)->i_disksize
	ext4: add cond_resched() to __ext4_find_entry()
	ext4: fix mount failure with quota configured as module
	ext4: rename s_journal_flag_rwsem to s_writepages_rwsem
	ext4: fix race between writepages and enabling EXT4_EXTENTS_FL
	KVM: nVMX: Refactor IO bitmap checks into helper function
	KVM: nVMX: Check IO instruction VM-exit conditions
	KVM: apic: avoid calculating pending eoi from an uninitialized val
	Btrfs: fix btrfs_wait_ordered_range() so that it waits for all ordered extents
	scsi: Revert "RDMA/isert: Fix a recently introduced regression related to logout"
	scsi: Revert "target: iscsi: Wait for all commands to finish before freeing a session"
	usb: gadget: composite: Fix bMaxPower for SuperSpeedPlus
	staging: greybus: use after free in gb_audio_manager_remove_all()
	ecryptfs: replace BUG_ON with error handling code
	ALSA: rawmidi: Avoid bit fields for state flags
	ALSA: seq: Avoid concurrent access to queue flags
	ALSA: seq: Fix concurrent access to queue current tick/time
	netfilter: xt_hashlimit: limit the max size of hashtable
	ata: ahci: Add shutdown to freeze hardware resources of ahci
	xen: Enable interrupts when calling _cond_resched()
	s390/mm: Explicitly compare PAGE_DEFAULT_KEY against zero in storage_key_init_range
	Linux 4.9.215

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4c663321dde48cd2a324e59acb70c99f75f9344e
2020-03-02 17:01:42 +03:00

759 lines
19 KiB
C

/*
* Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
*
* Scatterlist handling helpers.
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/kmemleak.h>
/**
* sg_next - return the next scatterlist entry in a list
* @sg: The current sg entry
*
* Description:
* Usually the next entry will be @sg@ + 1, but if this sg element is part
* of a chained scatterlist, it could jump to the start of a new
* scatterlist array.
*
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
if (sg_is_last(sg))
return NULL;
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
return sg;
}
EXPORT_SYMBOL(sg_next);
/**
* sg_nents - return total count of entries in scatterlist
* @sg: The scatterlist
*
* Description:
* Allows to know how many entries are in sg, taking into acount
* chaining as well
*
**/
int sg_nents(struct scatterlist *sg)
{
int nents;
for (nents = 0; sg; sg = sg_next(sg))
nents++;
return nents;
}
EXPORT_SYMBOL(sg_nents);
/**
* sg_nents_for_len - return total count of entries in scatterlist
* needed to satisfy the supplied length
* @sg: The scatterlist
* @len: The total required length
*
* Description:
* Determines the number of entries in sg that are required to meet
* the supplied length, taking into acount chaining as well
*
* Returns:
* the number of sg entries needed, negative error on failure
*
**/
int sg_nents_for_len(struct scatterlist *sg, u64 len)
{
int nents;
u64 total;
if (!len)
return 0;
for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
nents++;
total += sg->length;
if (total >= len)
return nents;
}
return -EINVAL;
}
EXPORT_SYMBOL(sg_nents_for_len);
/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
*
* Description:
* Should only be used casually, it (currently) scans the entire list
* to get the last entry.
*
* Note that the @sgl@ pointer passed in need not be the first one,
* the important bit is that @nents@ denotes the number of entries that
* exist from @sgl@.
*
**/
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
{
struct scatterlist *sg, *ret = NULL;
unsigned int i;
for_each_sg(sgl, sg, nents, i)
ret = sg;
#ifdef CONFIG_DEBUG_SG
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
/**
* sg_init_table - Initialize SG table
* @sgl: The SG table
* @nents: Number of entries in table
*
* Notes:
* If this is part of a chained sg table, sg_mark_end() should be
* used only on the last table part.
*
**/
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}
EXPORT_SYMBOL(sg_init_table);
/**
* sg_init_one - Initialize a single entry sg list
* @sg: SG entry
* @buf: Virtual address for IO
* @buflen: IO length
*
**/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
sg_init_table(sg, 1);
sg_set_buf(sg, buf, buflen);
}
EXPORT_SYMBOL(sg_init_one);
/*
* The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
* helpers.
*/
static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
{
if (nents == SG_MAX_SINGLE_ALLOC) {
/*
* Kmemleak doesn't track page allocations as they are not
* commonly used (in a raw form) for kernel data structures.
* As we chain together a list of pages and then a normal
* kmalloc (tracked by kmemleak), in order to for that last
* allocation not to become decoupled (and thus a
* false-positive) we need to inform kmemleak of all the
* intermediate allocations.
*/
void *ptr = (void *) __get_free_page(gfp_mask);
kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
return ptr;
} else
return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
}
static void sg_kfree(struct scatterlist *sg, unsigned int nents)
{
if (nents == SG_MAX_SINGLE_ALLOC) {
kmemleak_free(sg);
free_page((unsigned long) sg);
} else
kfree(sg);
}
/**
* __sg_free_table - Free a previously mapped sg table
* @table: The sg table header to use
* @max_ents: The maximum number of entries per single scatterlist
* @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
* @free_fn: Free function
*
* Description:
* Free an sg table previously allocated and setup with
* __sg_alloc_table(). The @max_ents value must be identical to
* that previously used with __sg_alloc_table().
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
bool skip_first_chunk, sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
if (unlikely(!table->sgl))
return;
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
table->orig_nents -= sg_size;
if (skip_first_chunk)
skip_first_chunk = false;
else
free_fn(sgl, alloc_size);
sgl = next;
}
table->sgl = NULL;
}
EXPORT_SYMBOL(__sg_free_table);
/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*
**/
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
}
EXPORT_SYMBOL(sg_free_table);
/**
* __sg_alloc_table - Allocate and initialize an sg table with given allocator
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
*
* Description:
* This function returns a @table @nents long. The allocator is
* defined to return scatterlist chunks of maximum size @max_ents.
* Thus if @nents is bigger than @max_ents, the scatterlists will be
* chained in units of @max_ents.
*
* Notes:
* If this function returns non-0 (eg failure), the caller must call
* __sg_free_table() to cleanup any leftover allocations.
*
**/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
unsigned int max_ents, struct scatterlist *first_chunk,
gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
{
struct scatterlist *sg, *prv;
unsigned int left;
memset(table, 0, sizeof(*table));
if (nents == 0)
return -EINVAL;
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
left -= sg_size;
if (first_chunk) {
sg = first_chunk;
first_chunk = NULL;
} else {
sg = alloc_fn(alloc_size, gfp_mask);
}
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
return -ENOMEM;
}
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
prv = sg;
} while (left);
return 0;
}
EXPORT_SYMBOL(__sg_alloc_table);
/**
* sg_alloc_table - Allocate and initialize an sg table
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table. If @nents@ is larger than
* SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
*
**/
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
int ret;
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
NULL, gfp_mask, sg_kmalloc);
if (unlikely(ret))
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
/**
* sg_alloc_table_from_pages - Allocate and initialize an sg table from
* an array of pages
* @sgt: The sg table header to use
* @pages: Pointer to an array of page pointers
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table from a list of pages. Contiguous
* ranges of the pages are squashed into a single scatterlist node. A user
* may provide an offset at a start and a size of valid data in a buffer
* specified by the page array. The returned sg table is released by
* sg_free_table.
*
* Returns:
* 0 on success, negative error on failure
*/
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
gfp_t gfp_mask)
{
unsigned int chunks;
unsigned int i;
unsigned int cur_page;
int ret;
struct scatterlist *s;
/* compute number of contiguous chunks */
chunks = 1;
for (i = 1; i < n_pages; ++i)
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
++chunks;
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
return ret;
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
unsigned long chunk_size;
unsigned int j;
/* look for the end of the current chunk */
for (j = cur_page + 1; j < n_pages; ++j)
if (page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
}
return 0;
}
EXPORT_SYMBOL(sg_alloc_table_from_pages);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset)
{
piter->__pg_advance = 0;
piter->__nents = nents;
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
EXPORT_SYMBOL(__sg_page_iter_start);
static int sg_page_count(struct scatterlist *sg)
{
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
}
bool __sg_page_iter_next(struct sg_page_iter *piter)
{
if (!piter->__nents || !piter->sg)
return false;
piter->sg_pgoffset += piter->__pg_advance;
piter->__pg_advance = 1;
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
piter->sg_pgoffset -= sg_page_count(piter->sg);
piter->sg = sg_next(piter->sg);
if (!--piter->__nents || !piter->sg)
return false;
}
return true;
}
EXPORT_SYMBOL(__sg_page_iter_next);
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
*
* Description:
* Starts mapping iterator @miter.
*
* Context:
* Don't care.
*/
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags)
{
memset(miter, 0, sizeof(struct sg_mapping_iter));
__sg_page_iter_start(&miter->piter, sgl, nents, 0);
WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
miter->__flags = flags;
}
EXPORT_SYMBOL(sg_miter_start);
static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
{
if (!miter->__remaining) {
struct scatterlist *sg;
if (!__sg_page_iter_next(&miter->piter))
return false;
sg = miter->piter.sg;
miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
miter->__offset &= PAGE_SIZE - 1;
miter->__remaining = sg->offset + sg->length -
(miter->piter.sg_pgoffset << PAGE_SHIFT) -
miter->__offset;
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
return true;
}
/**
* sg_miter_skip - reposition mapping iterator
* @miter: sg mapping iter to be skipped
* @offset: number of bytes to plus the current location
*
* Description:
* Sets the offset of @miter to its current location plus @offset bytes.
* If mapping iterator @miter has been proceeded by sg_miter_next(), this
* stops @miter.
*
* Context:
* Don't care if @miter is stopped, or not proceeded yet.
* Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
*
* Returns:
* true if @miter contains the valid mapping. false if end of sg
* list is reached.
*/
bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
{
sg_miter_stop(miter);
while (offset) {
off_t consumed;
if (!sg_miter_get_next_page(miter))
return false;
consumed = min_t(off_t, offset, miter->__remaining);
miter->__offset += consumed;
miter->__remaining -= consumed;
offset -= consumed;
}
return true;
}
EXPORT_SYMBOL(sg_miter_skip);
/**
* sg_miter_next - proceed mapping iterator to the next mapping
* @miter: sg mapping iter to proceed
*
* Description:
* Proceeds @miter to the next mapping. @miter should have been started
* using sg_miter_start(). On successful return, @miter->page,
* @miter->addr and @miter->length point to the current mapping.
*
* Context:
* Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled
* till @miter is stopped. May sleep if !SG_MITER_ATOMIC.
*
* Returns:
* true if @miter contains the next mapping. false if end of sg
* list is reached.
*/
bool sg_miter_next(struct sg_mapping_iter *miter)
{
sg_miter_stop(miter);
/*
* Get to the next page if necessary.
* __remaining, __offset is adjusted by sg_miter_stop
*/
if (!sg_miter_get_next_page(miter))
return false;
miter->page = sg_page_iter_page(&miter->piter);
miter->consumed = miter->length = miter->__remaining;
if (miter->__flags & SG_MITER_ATOMIC)
miter->addr = kmap_atomic(miter->page) + miter->__offset;
else
miter->addr = kmap(miter->page) + miter->__offset;
return true;
}
EXPORT_SYMBOL(sg_miter_next);
/**
* sg_miter_stop - stop mapping iteration
* @miter: sg mapping iter to be stopped
*
* Description:
* Stops mapping iterator @miter. @miter should have been started
* using sg_miter_start(). A stopped iteration can be resumed by
* calling sg_miter_next() on it. This is useful when resources (kmap)
* need to be released during iteration.
*
* Context:
* Preemption disabled if the SG_MITER_ATOMIC is set. Don't care
* otherwise.
*/
void sg_miter_stop(struct sg_mapping_iter *miter)
{
WARN_ON(miter->consumed > miter->length);
/* drop resources from the last iteration */
if (miter->addr) {
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
if ((miter->__flags & SG_MITER_TO_SG) &&
!PageSlab(miter->page))
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(preemptible());
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
miter->page = NULL;
miter->addr = NULL;
miter->length = 0;
miter->consumed = 0;
}
}
EXPORT_SYMBOL(sg_miter_stop);
/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
* @to_buffer: transfer direction (true == from an sg list to a
* buffer, false == from a buffer to an sg list
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer)
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
if (to_buffer)
sg_flags |= SG_MITER_FROM_SG;
else
sg_flags |= SG_MITER_TO_SG;
sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip))
return false;
local_irq_save(flags);
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
len = min(miter.length, buflen - offset);
if (to_buffer)
memcpy(buf + offset, miter.addr, len);
else
memcpy(miter.addr, buf + offset, len);
offset += len;
}
sg_miter_stop(&miter);
local_irq_restore(flags);
return offset;
}
EXPORT_SYMBOL(sg_copy_buffer);
/**
* sg_copy_from_buffer - Copy from a linear buffer to an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen)
{
return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
}
EXPORT_SYMBOL(sg_copy_from_buffer);
/**
* sg_copy_to_buffer - Copy from an SG list to a linear buffer
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy to
* @buflen: The number of bytes to copy
*
* Returns the number of copied bytes.
*
**/
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen)
{
return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
}
EXPORT_SYMBOL(sg_copy_to_buffer);
/**
* sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy from
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
*
* Returns the number of copied bytes.
*
**/
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
const void *buf, size_t buflen, off_t skip)
{
return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
}
EXPORT_SYMBOL(sg_pcopy_from_buffer);
/**
* sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
* @sgl: The SG list
* @nents: Number of SG entries
* @buf: Where to copy to
* @buflen: The number of bytes to copy
* @skip: Number of bytes to skip before copying
*
* Returns the number of copied bytes.
*
**/
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip)
{
return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
}
EXPORT_SYMBOL(sg_pcopy_to_buffer);