85 lines
2.2 KiB
Diff
85 lines
2.2 KiB
Diff
--- a/mm/shmem.c
|
|
+++ b/mm/shmem.c
|
|
@@ -2614,6 +2614,7 @@ void shmem_set_file(struct vm_area_struc
|
|
vma->vm_ops = &shmem_vm_ops;
|
|
vma->vm_flags |= VM_CAN_NONLINEAR;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(shmem_set_file);
|
|
|
|
/**
|
|
* shmem_zero_setup - setup a shared anonymous mapping
|
|
--- a/fs/file.c
|
|
+++ b/fs/file.c
|
|
@@ -269,6 +269,7 @@ int expand_files(struct files_struct *fi
|
|
/* All good, so we try */
|
|
return expand_fdtable(files, nr);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(expand_files);
|
|
|
|
static int count_open_files(struct fdtable *fdt)
|
|
{
|
|
--- a/kernel/exit.c
|
|
+++ b/kernel/exit.c
|
|
@@ -501,6 +501,7 @@ struct files_struct *get_files_struct(st
|
|
|
|
return files;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(get_files_struct);
|
|
|
|
void put_files_struct(struct files_struct *files)
|
|
{
|
|
@@ -522,6 +523,7 @@ void put_files_struct(struct files_struc
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
+EXPORT_SYMBOL_GPL(put_files_struct);
|
|
|
|
void reset_files_struct(struct files_struct *files)
|
|
{
|
|
--- a/kernel/sched/core.c
|
|
+++ b/kernel/sched/core.c
|
|
@@ -3928,6 +3928,7 @@ int can_nice(const struct task_struct *p
|
|
return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
|
|
capable(CAP_SYS_NICE));
|
|
}
|
|
+EXPORT_SYMBOL_GPL(can_nice);
|
|
|
|
#ifdef __ARCH_WANT_SYS_NICE
|
|
|
|
--- a/mm/memory.c
|
|
+++ b/mm/memory.c
|
|
@@ -1417,6 +1417,7 @@ static void zap_page_range_single(struct
|
|
mmu_notifier_invalidate_range_end(mm, address, end);
|
|
tlb_finish_mmu(&tlb, address, end);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(zap_page_range);
|
|
|
|
/**
|
|
* zap_vma_ptes - remove ptes mapping the vma
|
|
@@ -3102,6 +3103,7 @@ static inline int check_stack_guard_page
|
|
}
|
|
return 0;
|
|
}
|
|
+EXPORT_SYMBOL_GPL(vmtruncate_range);
|
|
|
|
/*
|
|
* We enter with non-exclusive mmap_sem (to exclude vma changes,
|
|
--- a/mm/vmalloc.c
|
|
+++ b/mm/vmalloc.c
|
|
@@ -1258,6 +1258,7 @@ void unmap_kernel_range(unsigned long ad
|
|
vunmap_page_range(addr, end);
|
|
flush_tlb_kernel_range(addr, end);
|
|
}
|
|
+EXPORT_SYMBOL_GPL(unmap_kernel_range);
|
|
|
|
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
|
|
{
|
|
@@ -1395,6 +1396,7 @@ struct vm_struct *get_vm_area(unsigned l
|
|
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
|
|
-1, GFP_KERNEL, __builtin_return_address(0));
|
|
}
|
|
+EXPORT_SYMBOL_GPL(get_vm_area);
|
|
|
|
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
|
|
void *caller)
|