Also revert commit b91d532928df ("ipv6: set rt6i_protocol properly in the route when it is installed") as it breaks the test systems. Changes in 4.9.136 xfrm: Validate address prefix lengths in the xfrm selector. xfrm6: call kfree_skb when skb is toobig mac80211: Always report TX status cfg80211: reg: Init wiphy_idx in regulatory_hint_core() mac80211: fix pending queue hang due to TX_DROP cfg80211: Address some corner cases in scan result channel updating mac80211: TDLS: fix skb queue/priority assignment ARM: 8799/1: mm: fix pci_ioremap_io() offset check xfrm: validate template mode ARM: dts: BCM63xx: Fix incorrect interrupt specifiers net: macb: Clean 64b dma addresses if they are not detected soc: fsl: qbman: qman: avoid allocating from non existing gen_pool soc: fsl: qe: Fix copy/paste bug in ucc_get_tdm_sync_shift() nl80211: Fix possible Spectre-v1 for NL80211_TXRATE_HT mac80211_hwsim: do not omit multicast announce of first added radio Bluetooth: SMP: fix crash in unpairing pxa168fb: prepare the clock qed: Avoid implicit enum conversion in qed_roce_mode_to_flavor qed: Avoid constant logical operation warning in qed_vf_pf_acquire asix: Check for supported Wake-on-LAN modes ax88179_178a: Check for supported Wake-on-LAN modes lan78xx: Check for supported Wake-on-LAN modes sr9800: Check for supported Wake-on-LAN modes r8152: Check for supported Wake-on-LAN Modes smsc75xx: Check for Wake-on-LAN modes smsc95xx: Check for Wake-on-LAN modes perf/ring_buffer: Prevent concurent ring buffer access perf/x86/intel/uncore: Fix PCI BDF address of M3UPI on SKX net: fec: fix rare tx timeout declance: Fix continuation with the adapter identification message net: cxgb3_main: fix a missing-check bug perf symbols: Fix memory corruption because of zero length symbols mm/memory_hotplug.c: fix overflow in test_pages_in_a_zone() MIPS: microMIPS: Fix decoding of swsp16 instruction MIPS: Handle non word sized instructions when examining frame scsi: aacraid: Fix typo in blink status f2fs: fix multiple f2fs_add_link() having same name for inline dentry igb: Remove superfluous reset to PHY and page 0 selection ACPI: sysfs: Make ACPI GPE mask kernel parameter cover all GPEs PCI: Disable MSI for HiSilicon Hip06/Hip07 only in Root Port mode i2c: bcm2835: Avoid possible NULL ptr dereference efi/fb: Correct PCI_STD_RESOURCE_END usage ipv6: set rt6i_protocol properly in the route when it is installed platform/x86: acer-wmi: setup accelerometer when ACPI device was found IB/ipoib: Do not warn if IPoIB debugfs doesn't exist IB/core: Fix the validations of a multicast LID in attach or detach operations orangefs: off by ones in xattr size checks rxe: Fix a sleep-in-atomic bug in post_one_send nvme-pci: fix CMB sysfs file removal in reset path net: phy: marvell: Limit 88m1101 autoneg errata to 88E1145 as well. net/mlx5: Fix command completion after timeout access invalid structure tipc: Fix tipc_sk_reinit handling of -EAGAIN tipc: fix a race condition of releasing subscriber object bnxt_en: Don't use rtnl lock to protect link change logic in workqueue. ath10k: fix NAPI enable/disable symmetry for AHB interface ARM: dts: bcm283x: Reserve first page for firmware btrfs: fiemap: Cache and merge fiemap extent before submit it to user ata: sata_rcar: Handle return value of clk_prepare_enable reset: hi6220: Set module license so that it can be loaded ASoC: Intel: Skylake: Fix to parse consecutive string tkns in manifest arch/sparc: increase CONFIG_NODES_SHIFT on SPARC64 to 5 mac80211: fix TX aggregation start/stop callback race libata: fix error checking in in ata_parse_force_one() net: ethernet: stmmac: Fix altr_tse_pcs SGMII Initialization qlcnic: Fix tunnel offload for 82xx adapters x86/cpu/cyrix: Add alternative Device ID of Geode GX1 SoC ARM: 8677/1: boot/compressed: fix decompressor header layout for v7-M gpu: ipu-v3: Fix CSI selection for VDIC elevator: fix truncation of icq_cache_name net: stmmac: ensure jumbo_frm error return is correctly checked for -ve value Btrfs: clear EXTENT_DEFRAG bits in finish_ordered_io ufs: we need to sync inode before freeing it net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare ip6_tunnel: Correct tos value in collect_md mode net/mlx5: Fix driver load error flow when firmware is stuck perf evsel: Fix probing of precise_ip level for default cycles event perf probe: Fix probe definition for inlined functions net/mlx5: Fix health work queue spin lock to IRQ safe usb: renesas_usbhs: gadget: fix spin_lock_init() for &uep->lock usb: renesas_usbhs: gadget: fix unused-but-set-variable warning usb: dwc3: omap: remove IRQ_NOAUTOEN used with shared irq clk: samsung: Fix m2m scaler clock on Exynos542x ptr_ring: fix up after recent ptr_ring changes staging: wilc1000: Fix problem with wrong vif index rds: ib: Fix missing call to rds_ib_dev_put in rds_ib_setup_qp iio: adc: Revert "axp288: Drop bogus AXP288_ADC_TS_PIN_CTRL register modifications" qed: Warn PTT usage by wrong hw-function ocfs2: fix deadlock caused by recursive locking in xattr net: cdc_ncm: GetNtbFormat endian fix sctp: use right member as the param of list_for_each_entry ALSA: hda - No loopback on ALC299 codec ath10k: convert warning about non-existent OTP board id to debug message ipv6: fix cleanup ordering for ip6_mr failure IB/ipoib: Fix lockdep issue found on ipoib_ib_dev_heavy_flush IB/rxe: put the pool on allocation failure nbd: only set MSG_MORE when we have more to send mm/frame_vector.c: release a semaphore in 'get_vaddr_frames()' IB/mlx5: Avoid passing an invalid QP type to firmware scsi: qla2xxx: Avoid double completion of abort command drm: bochs: Don't remove uninitialized fbdev framebuffer i40e: avoid NVM acquire deadlock during NVM update Revert "IB/ipoib: Update broadcast object if PKey value was changed in index 0" Btrfs: incremental send, fix invalid memory access drm/msm: Fix possible null dereference on failure of get_pages() module: fix DEBUG_SET_MODULE_RONX typo iio: pressure: zpa2326: Remove always-true check which confuses gcc l2tp: remove configurable payload offset macsec: fix memory leaks when skb_to_sgvec fails perf/core: Fix locking for children siblings group read cifs: Use ULL suffix for 64-bit constant futex: futex_wake_op, do not fail on invalid op ALSA: hda - Fix incorrect usage of IS_REACHABLE() test_bpf: Fix testing with CONFIG_BPF_JIT_ALWAYS_ON=y on other arches xen-netfront: Update features after registering netdev sparc64: Fix regression in pmdp_invalidate(). xen-netfront: Fix mismatched rtnl_unlock enic: do not overwrite error code bonding: ratelimit failed speed/duplex update warning nvmet: fix space padding in serial number iio: buffer: fix the function signature to match implementation x86/paravirt: Fix some warning messages IB/mlx4: Fix an error handling path in 'mlx4_ib_rereg_user_mr()' libertas: call into generic suspend code before turning off power xhci: Fix USB3 NULL pointer dereference at logical disconnect. perf tests: Fix indexing when invoking subtests ARM: dts: imx53-qsb: disable 1.2GHz OPP rxrpc: Don't check RXRPC_CALL_TX_LAST after calling rxrpc_rotate_tx_window() rxrpc: Only take the rwind and mtu values from latest ACK net: ena: fix NULL dereference due to untimely napi initialization fs/fat/fatent.c: add cond_resched() to fat_count_free_clusters() mtd: spi-nor: Add support for is25wp series chips Revert "netfilter: ipv6: nf_defrag: drop skb dst before queueing" perf tools: Disable parallelism for 'make clean' bridge: do not add port to router list when receives query with source 0.0.0.0 net: bridge: remove ipv6 zero address check in mcast queries ipv6: mcast: fix a use-after-free in inet6_mc_check ipv6/ndisc: Preserve IPv6 control buffer if protocol error handlers are called llc: set SOCK_RCU_FREE in llc_sap_add_socket() net/ipv6: Fix index counter for unicast addresses in in6_dump_addrs net: sched: gred: pass the right attribute to gred_change_table_def() net: socket: fix a missing-check bug net: stmmac: Fix stmmac_mdio_reset() when building stmmac as modules net: udp: fix handling of CHECKSUM_COMPLETE packets r8169: fix NAPI handling under high load sctp: fix race on sctp_id2asoc vhost: Fix Spectre V1 vulnerability ethtool: fix a privilege escalation bug bonding: fix length of actor system net: drop skb on failure in ip_check_defrag() net: fix pskb_trim_rcsum_slow() with odd trim offset rtnetlink: Disallow FDB configuration for non-Ethernet device ip6_tunnel: Fix encapsulation layout Revert "x86/mm: Expand static page table for fixmap space" crypto: shash - Fix a sleep-in-atomic bug in shash_setkey_unaligned ahci: don't ignore result code of ahci_reset_controller() gpio: mxs: Get rid of external API call xfs: truncate transaction does not modify the inobt cachefiles: fix the race between cachefiles_bury_object() and rmdir(2) ptp: fix Spectre v1 vulnerability drm/edid: Add 6 bpc quirk for BOE panel in HP Pavilion 15-n233sl RDMA/ucma: Fix Spectre v1 vulnerability IB/ucm: Fix Spectre v1 vulnerability cdc-acm: correct counting of UART states in serial state notification usb: gadget: storage: Fix Spectre v1 vulnerability USB: fix the usbfs flag sanitization for control transfers Input: elan_i2c - add ACPI ID for Lenovo IdeaPad 330-15IGM sched/fair: Fix throttle_list starvation with low CFS quota x86/percpu: Fix this_cpu_read() x86/time: Correct the attribute on jiffies' definition net: fs_enet: do not call phy_stop() in interrupts posix-timers: Sanitize overrun handling Linux 4.9.136 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
493 lines
12 KiB
C
493 lines
12 KiB
C
/*
|
|
* linux/arch/arm/mm/ioremap.c
|
|
*
|
|
* Re-map IO memory to kernel address space so that we can access it.
|
|
*
|
|
* (C) Copyright 1995 1996 Linus Torvalds
|
|
*
|
|
* Hacked for ARM by Phil Blundell <philb@gnu.org>
|
|
* Hacked to allow all architectures to build, and various cleanups
|
|
* by Russell King
|
|
*
|
|
* This allows a driver to remap an arbitrary region of bus memory into
|
|
* virtual space. One should *only* use readl, writel, memcpy_toio and
|
|
* so on with such remapped areas.
|
|
*
|
|
* Because the ARM only has a 32-bit address space we can't address the
|
|
* whole of the (physical) PCI space at once. PCI huge-mode addressing
|
|
* allows us to circumvent this restriction by splitting PCI space into
|
|
* two 2GB chunks and mapping only one at a time into processor memory.
|
|
* We use MMU protection domains to trap any attempt to access the bank
|
|
* that is not currently mapped. (This isn't fully implemented yet.)
|
|
*/
|
|
#include <linux/module.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/io.h>
|
|
#include <linux/sizes.h>
|
|
|
|
#include <asm/cp15.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/early_ioremap.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/system_info.h>
|
|
|
|
#include <asm/mach/map.h>
|
|
#include <asm/mach/pci.h>
|
|
#include "mm.h"
|
|
|
|
|
|
LIST_HEAD(static_vmlist);
|
|
|
|
static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
|
|
size_t size, unsigned int mtype)
|
|
{
|
|
struct static_vm *svm;
|
|
struct vm_struct *vm;
|
|
|
|
list_for_each_entry(svm, &static_vmlist, list) {
|
|
vm = &svm->vm;
|
|
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
|
continue;
|
|
if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
|
|
continue;
|
|
|
|
if (vm->phys_addr > paddr ||
|
|
paddr + size - 1 > vm->phys_addr + vm->size - 1)
|
|
continue;
|
|
|
|
return svm;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
struct static_vm *find_static_vm_vaddr(void *vaddr)
|
|
{
|
|
struct static_vm *svm;
|
|
struct vm_struct *vm;
|
|
|
|
list_for_each_entry(svm, &static_vmlist, list) {
|
|
vm = &svm->vm;
|
|
|
|
/* static_vmlist is ascending order */
|
|
if (vm->addr > vaddr)
|
|
break;
|
|
|
|
if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
|
|
return svm;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void __init add_static_vm_early(struct static_vm *svm)
|
|
{
|
|
struct static_vm *curr_svm;
|
|
struct vm_struct *vm;
|
|
void *vaddr;
|
|
|
|
vm = &svm->vm;
|
|
vm_area_add_early(vm);
|
|
vaddr = vm->addr;
|
|
|
|
list_for_each_entry(curr_svm, &static_vmlist, list) {
|
|
vm = &curr_svm->vm;
|
|
|
|
if (vm->addr > vaddr)
|
|
break;
|
|
}
|
|
list_add_tail(&svm->list, &curr_svm->list);
|
|
}
|
|
|
|
int ioremap_page(unsigned long virt, unsigned long phys,
|
|
const struct mem_type *mtype)
|
|
{
|
|
return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
|
|
__pgprot(mtype->prot_pte));
|
|
}
|
|
EXPORT_SYMBOL(ioremap_page);
|
|
|
|
void __check_vmalloc_seq(struct mm_struct *mm)
|
|
{
|
|
unsigned int seq;
|
|
|
|
do {
|
|
seq = init_mm.context.vmalloc_seq;
|
|
memcpy(pgd_offset(mm, VMALLOC_START),
|
|
pgd_offset_k(VMALLOC_START),
|
|
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
|
|
pgd_index(VMALLOC_START)));
|
|
mm->context.vmalloc_seq = seq;
|
|
} while (seq != init_mm.context.vmalloc_seq);
|
|
}
|
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
/*
|
|
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
|
|
* the other CPUs will not see this change until their next context switch.
|
|
* Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
|
|
* which requires the new ioremap'd region to be referenced, the CPU will
|
|
* reference the _old_ region.
|
|
*
|
|
* Note that get_vm_area_caller() allocates a guard 4K page, so we need to
|
|
* mask the size back to 1MB aligned or we will overflow in the loop below.
|
|
*/
|
|
static void unmap_area_sections(unsigned long virt, unsigned long size)
|
|
{
|
|
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmdp;
|
|
|
|
flush_cache_vunmap(addr, end);
|
|
pgd = pgd_offset_k(addr);
|
|
pud = pud_offset(pgd, addr);
|
|
pmdp = pmd_offset(pud, addr);
|
|
do {
|
|
pmd_t pmd = *pmdp;
|
|
|
|
if (!pmd_none(pmd)) {
|
|
/*
|
|
* Clear the PMD from the page table, and
|
|
* increment the vmalloc sequence so others
|
|
* notice this change.
|
|
*
|
|
* Note: this is still racy on SMP machines.
|
|
*/
|
|
pmd_clear(pmdp);
|
|
init_mm.context.vmalloc_seq++;
|
|
|
|
/*
|
|
* Free the page table, if there was one.
|
|
*/
|
|
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
|
|
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
|
|
}
|
|
|
|
addr += PMD_SIZE;
|
|
pmdp += 2;
|
|
} while (addr < end);
|
|
|
|
/*
|
|
* Ensure that the active_mm is up to date - we want to
|
|
* catch any use-after-iounmap cases.
|
|
*/
|
|
if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
|
|
__check_vmalloc_seq(current->active_mm);
|
|
|
|
flush_tlb_kernel_range(virt, end);
|
|
}
|
|
|
|
static int
|
|
remap_area_sections(unsigned long virt, unsigned long pfn,
|
|
size_t size, const struct mem_type *type)
|
|
{
|
|
unsigned long addr = virt, end = virt + size;
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
/*
|
|
* Remove and free any PTE-based mapping, and
|
|
* sync the current kernel mapping.
|
|
*/
|
|
unmap_area_sections(virt, size);
|
|
|
|
pgd = pgd_offset_k(addr);
|
|
pud = pud_offset(pgd, addr);
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
|
|
pfn += SZ_1M >> PAGE_SHIFT;
|
|
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
|
|
pfn += SZ_1M >> PAGE_SHIFT;
|
|
flush_pmd_entry(pmd);
|
|
|
|
addr += PMD_SIZE;
|
|
pmd += 2;
|
|
} while (addr < end);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
remap_area_supersections(unsigned long virt, unsigned long pfn,
|
|
size_t size, const struct mem_type *type)
|
|
{
|
|
unsigned long addr = virt, end = virt + size;
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
|
|
/*
|
|
* Remove and free any PTE-based mapping, and
|
|
* sync the current kernel mapping.
|
|
*/
|
|
unmap_area_sections(virt, size);
|
|
|
|
pgd = pgd_offset_k(virt);
|
|
pud = pud_offset(pgd, addr);
|
|
pmd = pmd_offset(pud, addr);
|
|
do {
|
|
unsigned long super_pmd_val, i;
|
|
|
|
super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
|
|
PMD_SECT_SUPER;
|
|
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
pmd[0] = __pmd(super_pmd_val);
|
|
pmd[1] = __pmd(super_pmd_val);
|
|
flush_pmd_entry(pmd);
|
|
|
|
addr += PMD_SIZE;
|
|
pmd += 2;
|
|
}
|
|
|
|
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
|
|
} while (addr < end);
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
|
unsigned long offset, size_t size, unsigned int mtype, void *caller)
|
|
{
|
|
const struct mem_type *type;
|
|
int err;
|
|
unsigned long addr;
|
|
struct vm_struct *area;
|
|
phys_addr_t paddr = __pfn_to_phys(pfn);
|
|
|
|
#ifndef CONFIG_ARM_LPAE
|
|
/*
|
|
* High mappings must be supersection aligned
|
|
*/
|
|
if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
|
|
return NULL;
|
|
#endif
|
|
|
|
type = get_mem_type(mtype);
|
|
if (!type)
|
|
return NULL;
|
|
|
|
/*
|
|
* Page align the mapping size, taking account of any offset.
|
|
*/
|
|
size = PAGE_ALIGN(offset + size);
|
|
|
|
/*
|
|
* Try to reuse one of the static mapping whenever possible.
|
|
*/
|
|
if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
|
|
struct static_vm *svm;
|
|
|
|
svm = find_static_vm_paddr(paddr, size, mtype);
|
|
if (svm) {
|
|
addr = (unsigned long)svm->vm.addr;
|
|
addr += paddr - svm->vm.phys_addr;
|
|
return (void __iomem *) (offset + addr);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Don't allow RAM to be mapped with mismatched attributes - this
|
|
* causes problems with ARMv6+
|
|
*/
|
|
if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
|
|
return NULL;
|
|
|
|
area = get_vm_area_caller(size, VM_IOREMAP, caller);
|
|
if (!area)
|
|
return NULL;
|
|
addr = (unsigned long)area->addr;
|
|
area->phys_addr = paddr;
|
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
if (DOMAIN_IO == 0 &&
|
|
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
|
|
cpu_is_xsc3()) && pfn >= 0x100000 &&
|
|
!((paddr | size | addr) & ~SUPERSECTION_MASK)) {
|
|
area->flags |= VM_ARM_SECTION_MAPPING;
|
|
err = remap_area_supersections(addr, pfn, size, type);
|
|
} else if (!((paddr | size | addr) & ~PMD_MASK)) {
|
|
area->flags |= VM_ARM_SECTION_MAPPING;
|
|
err = remap_area_sections(addr, pfn, size, type);
|
|
} else
|
|
#endif
|
|
err = ioremap_page_range(addr, addr + size, paddr,
|
|
__pgprot(type->prot_pte));
|
|
|
|
if (err) {
|
|
vunmap((void *)addr);
|
|
return NULL;
|
|
}
|
|
|
|
flush_cache_vmap(addr, addr + size);
|
|
return (void __iomem *) (offset + addr);
|
|
}
|
|
|
|
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
|
|
unsigned int mtype, void *caller)
|
|
{
|
|
phys_addr_t last_addr;
|
|
unsigned long offset = phys_addr & ~PAGE_MASK;
|
|
unsigned long pfn = __phys_to_pfn(phys_addr);
|
|
|
|
/*
|
|
* Don't allow wraparound or zero size
|
|
*/
|
|
last_addr = phys_addr + size - 1;
|
|
if (!size || last_addr < phys_addr)
|
|
return NULL;
|
|
|
|
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
|
caller);
|
|
}
|
|
|
|
/*
|
|
* Remap an arbitrary physical address space into the kernel virtual
|
|
* address space. Needed when the kernel wants to access high addresses
|
|
* directly.
|
|
*
|
|
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
|
|
* have to convert them into an offset in a page-aligned mapping, but the
|
|
* caller shouldn't need to know that small detail.
|
|
*/
|
|
void __iomem *
|
|
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
|
|
unsigned int mtype)
|
|
{
|
|
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(__arm_ioremap_pfn);
|
|
|
|
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
|
|
unsigned int, void *) =
|
|
__arm_ioremap_caller;
|
|
|
|
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
|
|
{
|
|
return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
|
|
__alias(ioremap_cached);
|
|
|
|
void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
|
|
{
|
|
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(ioremap_cache);
|
|
EXPORT_SYMBOL(ioremap_cached);
|
|
|
|
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
|
|
{
|
|
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
|
|
__builtin_return_address(0));
|
|
}
|
|
EXPORT_SYMBOL(ioremap_wc);
|
|
|
|
/*
|
|
* Remap an arbitrary physical address space into the kernel virtual
|
|
* address space as memory. Needed when the kernel wants to execute
|
|
* code in external memory. This is needed for reprogramming source
|
|
* clocks that would affect normal memory for example. Please see
|
|
* CONFIG_GENERIC_ALLOCATOR for allocating external memory.
|
|
*/
|
|
void __iomem *
|
|
__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
|
|
{
|
|
unsigned int mtype;
|
|
|
|
if (cached)
|
|
mtype = MT_MEMORY_RWX;
|
|
else
|
|
mtype = MT_MEMORY_RWX_NONCACHED;
|
|
|
|
return __arm_ioremap_caller(phys_addr, size, mtype,
|
|
__builtin_return_address(0));
|
|
}
|
|
|
|
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
|
|
{
|
|
return (__force void *)arch_ioremap_caller(phys_addr, size,
|
|
MT_MEMORY_RW,
|
|
__builtin_return_address(0));
|
|
}
|
|
|
|
void __iounmap(volatile void __iomem *io_addr)
|
|
{
|
|
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
|
|
struct static_vm *svm;
|
|
|
|
/* If this is a static mapping, we must leave it alone */
|
|
svm = find_static_vm_vaddr(addr);
|
|
if (svm)
|
|
return;
|
|
|
|
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
|
{
|
|
struct vm_struct *vm;
|
|
|
|
vm = find_vm_area(addr);
|
|
|
|
/*
|
|
* If this is a section based mapping we need to handle it
|
|
* specially as the VM subsystem does not know how to handle
|
|
* such a beast.
|
|
*/
|
|
if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
|
|
unmap_area_sections((unsigned long)vm->addr, vm->size);
|
|
}
|
|
#endif
|
|
|
|
vunmap(addr);
|
|
}
|
|
|
|
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
|
|
|
|
void iounmap(volatile void __iomem *cookie)
|
|
{
|
|
arch_iounmap(cookie);
|
|
}
|
|
EXPORT_SYMBOL(iounmap);
|
|
|
|
#ifdef CONFIG_PCI
|
|
static int pci_ioremap_mem_type = MT_DEVICE;
|
|
|
|
void pci_ioremap_set_mem_type(int mem_type)
|
|
{
|
|
pci_ioremap_mem_type = mem_type;
|
|
}
|
|
|
|
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
|
{
|
|
BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
|
|
|
|
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
|
PCI_IO_VIRT_BASE + offset + SZ_64K,
|
|
phys_addr,
|
|
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_ioremap_io);
|
|
#endif
|
|
|
|
/*
|
|
* Must be called after early_fixmap_init
|
|
*/
|
|
void __init early_ioremap_init(void)
|
|
{
|
|
early_ioremap_setup();
|
|
}
|