Use debug_kmap_atomic in kmap_atomic, kmap_atomic_pfn, and iomap_atomic_prot_pfn. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			156 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			156 lines
		
	
	
		
			4.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  * highmem.h: virtual kernel memory mappings for high memory
 | |
|  *
 | |
|  * PowerPC version, stolen from the i386 version.
 | |
|  *
 | |
|  * Used in CONFIG_HIGHMEM systems for memory pages which
 | |
|  * are not addressable by direct kernel virtual addresses.
 | |
|  *
 | |
|  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
 | |
|  *		      Gerhard.Wichert@pdb.siemens.de
 | |
|  *
 | |
|  *
 | |
|  * Redesigned the x86 32-bit VM architecture to deal with
 | |
|  * up to 16 Terrabyte physical memory. With current x86 CPUs
 | |
|  * we now support up to 64 Gigabytes physical RAM.
 | |
|  *
 | |
|  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
 | |
|  */
 | |
| 
 | |
| #ifndef _ASM_HIGHMEM_H
 | |
| #define _ASM_HIGHMEM_H
 | |
| 
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| #include <linux/init.h>
 | |
| #include <linux/interrupt.h>
 | |
| #include <linux/highmem.h>
 | |
| #include <asm/kmap_types.h>
 | |
| #include <asm/tlbflush.h>
 | |
| #include <asm/page.h>
 | |
| #include <asm/fixmap.h>
 | |
| 
 | |
| extern pte_t *kmap_pte;
 | |
| extern pgprot_t kmap_prot;
 | |
| extern pte_t *pkmap_page_table;
 | |
| 
 | |
| /*
 | |
|  * Right now we initialize only a single pte table. It can be extended
 | |
|  * easily, subsequent pte tables have to be allocated in one physical
 | |
|  * chunk of RAM.
 | |
|  */
 | |
| /*
 | |
|  * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte
 | |
|  * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP
 | |
|  * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP
 | |
|  * in case of 16K/64K/256K page sizes.
 | |
|  */
 | |
| #ifdef CONFIG_PPC_4K_PAGES
 | |
| #define PKMAP_ORDER	PTE_SHIFT
 | |
| #else
 | |
| #define PKMAP_ORDER	9
 | |
| #endif
 | |
| #define LAST_PKMAP	(1 << PKMAP_ORDER)
 | |
| #ifndef CONFIG_PPC_4K_PAGES
 | |
| #define PKMAP_BASE	(FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
 | |
| #else
 | |
| #define PKMAP_BASE	((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
 | |
| #endif
 | |
| #define LAST_PKMAP_MASK	(LAST_PKMAP-1)
 | |
| #define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
 | |
| #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
 | |
| 
 | |
| extern void *kmap_high(struct page *page);
 | |
| extern void kunmap_high(struct page *page);
 | |
| 
 | |
| static inline void *kmap(struct page *page)
 | |
| {
 | |
| 	might_sleep();
 | |
| 	if (!PageHighMem(page))
 | |
| 		return page_address(page);
 | |
| 	return kmap_high(page);
 | |
| }
 | |
| 
 | |
| static inline void kunmap(struct page *page)
 | |
| {
 | |
| 	BUG_ON(in_interrupt());
 | |
| 	if (!PageHighMem(page))
 | |
| 		return;
 | |
| 	kunmap_high(page);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
 | |
|  * gives a more generic (and caching) interface. But kmap_atomic can
 | |
|  * be used in IRQ contexts, so in some (very limited) cases we need
 | |
|  * it.
 | |
|  */
 | |
| static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
 | |
| {
 | |
| 	unsigned int idx;
 | |
| 	unsigned long vaddr;
 | |
| 
 | |
| 	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 | |
| 	pagefault_disable();
 | |
| 	if (!PageHighMem(page))
 | |
| 		return page_address(page);
 | |
| 
 | |
| 	debug_kmap_atomic(type);
 | |
| 	idx = type + KM_TYPE_NR*smp_processor_id();
 | |
| 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 | |
| #ifdef CONFIG_DEBUG_HIGHMEM
 | |
| 	BUG_ON(!pte_none(*(kmap_pte-idx)));
 | |
| #endif
 | |
| 	__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
 | |
| 	local_flush_tlb_page(NULL, vaddr);
 | |
| 
 | |
| 	return (void*) vaddr;
 | |
| }
 | |
| 
 | |
| static inline void *kmap_atomic(struct page *page, enum km_type type)
 | |
| {
 | |
| 	return kmap_atomic_prot(page, type, kmap_prot);
 | |
| }
 | |
| 
 | |
| static inline void kunmap_atomic(void *kvaddr, enum km_type type)
 | |
| {
 | |
| #ifdef CONFIG_DEBUG_HIGHMEM
 | |
| 	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 | |
| 	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
 | |
| 
 | |
| 	if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
 | |
| 		pagefault_enable();
 | |
| 		return;
 | |
| 	}
 | |
| 
 | |
| 	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 | |
| 
 | |
| 	/*
 | |
| 	 * force other mappings to Oops if they'll try to access
 | |
| 	 * this pte without first remap it
 | |
| 	 */
 | |
| 	pte_clear(&init_mm, vaddr, kmap_pte-idx);
 | |
| 	local_flush_tlb_page(NULL, vaddr);
 | |
| #endif
 | |
| 	pagefault_enable();
 | |
| }
 | |
| 
 | |
| static inline struct page *kmap_atomic_to_page(void *ptr)
 | |
| {
 | |
| 	unsigned long idx, vaddr = (unsigned long) ptr;
 | |
| 	pte_t *pte;
 | |
| 
 | |
| 	if (vaddr < FIXADDR_START)
 | |
| 		return virt_to_page(ptr);
 | |
| 
 | |
| 	idx = virt_to_fix(vaddr);
 | |
| 	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
 | |
| 	return pte_page(*pte);
 | |
| }
 | |
| 
 | |
| #define flush_cache_kmaps()	flush_cache_all()
 | |
| 
 | |
| #endif /* __KERNEL__ */
 | |
| 
 | |
| #endif /* _ASM_HIGHMEM_H */
 |