1
0
Files
2016-11-30 09:03:17 +08:00

56036 lines
1.5 MiB
Executable File

# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
# 1 "<built-in>"
# 1 "<命令行>"
# 1 "./include/generated/autoconf.h" 1
# 1 "<命令行>" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
# 31 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
# 1 "include/linux/module.h" 1
# 9 "include/linux/module.h"
# 1 "include/linux/list.h" 1
# 1 "include/linux/types.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/types.h" 1
# 21 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/types.h"
# 1 "include/asm-generic/int-ll64.h" 1
# 11 "include/asm-generic/int-ll64.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitsperlong.h" 1
# 1 "include/asm-generic/bitsperlong.h" 1
# 7 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitsperlong.h" 2
# 12 "include/asm-generic/int-ll64.h" 2
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
# 42 "include/asm-generic/int-ll64.h"
typedef signed char s8;
typedef unsigned char u8;
typedef signed short s16;
typedef unsigned short u16;
typedef signed int s32;
typedef unsigned int u32;
typedef signed long long s64;
typedef unsigned long long u64;
# 22 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/types.h" 2
typedef unsigned short umode_t;
# 40 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/types.h"
typedef u32 dma_addr_t;
typedef u64 dma64_addr_t;
typedef unsigned long phys_t;
# 5 "include/linux/types.h" 2
# 17 "include/linux/types.h"
# 1 "include/linux/posix_types.h" 1
# 1 "include/linux/stddef.h" 1
# 1 "include/linux/compiler.h" 1
# 44 "include/linux/compiler.h"
# 1 "include/linux/compiler-gcc.h" 1
# 90 "include/linux/compiler-gcc.h"
# 1 "include/linux/compiler-gcc4.h" 1
# 91 "include/linux/compiler-gcc.h" 2
# 45 "include/linux/compiler.h" 2
# 62 "include/linux/compiler.h"
struct ftrace_branch_data {
const char *func;
const char *file;
unsigned line;
union {
struct {
unsigned long correct;
unsigned long incorrect;
};
struct {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
# 5 "include/linux/stddef.h" 2
# 15 "include/linux/stddef.h"
enum {
false = 0,
true = 1
};
# 5 "include/linux/posix_types.h" 2
# 36 "include/linux/posix_types.h"
typedef struct {
unsigned long fds_bits [(1024/(8 * sizeof(unsigned long)))];
} __kernel_fd_set;
typedef void (*__kernel_sighandler_t)(int);
typedef int __kernel_key_t;
typedef int __kernel_mqd_t;
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/posix_types.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/posix_types.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sgidefs.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/posix_types.h" 2
typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned long __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef long __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
typedef unsigned int __kernel_old_dev_t;
typedef long long __kernel_loff_t;
typedef struct {
long val[2];
} __kernel_fsid_t;
static __inline__ __attribute__((always_inline)) void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
unsigned long __rem = __fd % (8 * sizeof(unsigned long));
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
}
static __inline__ __attribute__((always_inline)) void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
unsigned long __rem = __fd % (8 * sizeof(unsigned long));
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
}
static __inline__ __attribute__((always_inline)) int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
{
unsigned long __tmp = __fd / (8 * sizeof(unsigned long));
unsigned long __rem = __fd % (8 * sizeof(unsigned long));
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
}
static __inline__ __attribute__((always_inline)) void __FD_ZERO(__kernel_fd_set *__p)
{
unsigned long *__tmp = __p->fds_bits;
int __i;
if (__builtin_constant_p((1024/(8 * sizeof(unsigned long))))) {
switch ((1024/(8 * sizeof(unsigned long)))) {
case 16:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
__tmp[ 8] = 0; __tmp[ 9] = 0;
__tmp[10] = 0; __tmp[11] = 0;
__tmp[12] = 0; __tmp[13] = 0;
__tmp[14] = 0; __tmp[15] = 0;
return;
case 8:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
return;
case 4:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
return;
}
}
__i = (1024/(8 * sizeof(unsigned long)));
while (__i) {
__i--;
*__tmp = 0;
__tmp++;
}
}
# 48 "include/linux/posix_types.h" 2
# 18 "include/linux/types.h" 2
typedef __u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
typedef __kernel_key_t key_t;
typedef __kernel_suseconds_t suseconds_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef __kernel_mqd_t mqd_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
typedef unsigned long uintptr_t;
# 55 "include/linux/types.h"
typedef __kernel_loff_t loff_t;
# 64 "include/linux/types.h"
typedef __kernel_size_t size_t;
typedef __kernel_ssize_t ssize_t;
typedef __kernel_ptrdiff_t ptrdiff_t;
typedef __kernel_time_t time_t;
typedef __kernel_clock_t clock_t;
typedef __kernel_caddr_t caddr_t;
typedef unsigned char u_char;
typedef unsigned short u_short;
typedef unsigned int u_int;
typedef unsigned long u_long;
typedef unsigned char unchar;
typedef unsigned short ushort;
typedef unsigned int uint;
typedef unsigned long ulong;
typedef __u8 u_int8_t;
typedef __s8 int8_t;
typedef __u16 u_int16_t;
typedef __s16 int16_t;
typedef __u32 u_int32_t;
typedef __s32 int32_t;
typedef __u8 uint8_t;
typedef __u16 uint16_t;
typedef __u32 uint32_t;
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __s64 int64_t;
# 151 "include/linux/types.h"
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
# 181 "include/linux/types.h"
typedef __u16 __le16;
typedef __u16 __be16;
typedef __u32 __le32;
typedef __u32 __be32;
typedef __u64 __le64;
typedef __u64 __be64;
typedef __u16 __sum16;
typedef __u32 __wsum;
typedef unsigned gfp_t;
typedef unsigned fmode_t;
typedef u32 phys_addr_t;
typedef phys_addr_t resource_size_t;
typedef struct {
int counter;
} atomic_t;
struct list_head {
struct list_head *next, *prev;
};
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
struct ustat {
__kernel_daddr_t f_tfree;
__kernel_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
# 5 "include/linux/list.h" 2
# 1 "include/linux/poison.h" 1
# 7 "include/linux/list.h" 2
# 1 "include/linux/prefetch.h" 1
# 14 "include/linux/prefetch.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 1
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
# 1 "include/linux/cpumask.h" 1
# 9 "include/linux/cpumask.h"
# 1 "include/linux/kernel.h" 1
# 12 "include/linux/kernel.h"
# 1 "/opt/trendchip/mips-linux-glibc-4.9.3/usr/lib/gcc/mips-buildroot-linux-gnu/4.9.3/include/stdarg.h" 1 3 4
# 40 "/opt/trendchip/mips-linux-glibc-4.9.3/usr/lib/gcc/mips-buildroot-linux-gnu/4.9.3/include/stdarg.h" 3 4
typedef __builtin_va_list __gnuc_va_list;
# 98 "/opt/trendchip/mips-linux-glibc-4.9.3/usr/lib/gcc/mips-buildroot-linux-gnu/4.9.3/include/stdarg.h" 3 4
typedef __gnuc_va_list va_list;
# 13 "include/linux/kernel.h" 2
# 1 "include/linux/linkage.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/linkage.h" 1
# 6 "include/linux/linkage.h" 2
# 14 "include/linux/kernel.h" 2
# 1 "include/linux/bitops.h" 1
# 13 "include/linux/bitops.h"
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
# 1 "include/linux/irqflags.h" 1
# 14 "include/linux/irqflags.h"
# 1 "include/linux/typecheck.h" 1
# 15 "include/linux/irqflags.h" 2
# 57 "include/linux/irqflags.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-features.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-features.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu.h" 1
# 190 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu.h"
enum cpu_type_enum {
CPU_UNKNOWN,
CPU_R2000, CPU_R3000, CPU_R3000A, CPU_R3041, CPU_R3051, CPU_R3052,
CPU_R3081, CPU_R3081E,
CPU_R6000, CPU_R6000A,
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5000A, CPU_R5500, CPU_NEVADA, CPU_R5432,
CPU_R10000, CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_RM9000, CPU_TX49XX,
CPU_R8000,
CPU_TX3912, CPU_TX3922, CPU_TX3927,
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
CPU_BCM6338, CPU_BCM6345, CPU_BCM6348, CPU_BCM6358,
CPU_JZRISC,
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_LAST
};
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-features.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-info.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-info.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cache.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cache.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/kmalloc.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cache.h" 2
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-info.h" 2
struct cache_desc {
unsigned int waysize;
unsigned short sets;
unsigned char ways;
unsigned char linesz;
unsigned char waybit;
unsigned char flags;
};
# 41 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-info.h"
struct cpuinfo_mips {
unsigned int udelay_val;
unsigned int asid_cache;
unsigned long options;
unsigned long ases;
unsigned int processor_id;
unsigned int fpu_id;
unsigned int cputype;
int isa_level;
int tlbsize;
struct cache_desc icache;
struct cache_desc dcache;
struct cache_desc scache;
struct cache_desc tcache;
int srsets;
int core;
# 71 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-info.h"
int vpe_id;
int tc_id;
void *data;
unsigned int watch_reg_count;
unsigned int watch_reg_use_cnt;
u16 watch_reg_masks[4];
} __attribute__((aligned((1 << 5))));
extern struct cpuinfo_mips cpu_data[];
extern void cpu_probe(void);
extern void cpu_report(void);
extern const char *__cpu_name[];
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-features.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/cpu-feature-overrides.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cpu-features.h" 2
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h" 2
# 30 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h"
extern void mips_ihb(void);
__asm__(".macro " "_ssnop" "; " "sll $0, $0, 1" "; .endm"); static inline __attribute__((always_inline)) void _ssnop(void) { __asm__ __volatile__ ("_ssnop"); }
__asm__(".macro " "_ehb" "; " "sll $0, $0, 3" "; .endm"); static inline __attribute__((always_inline)) void _ehb(void) { __asm__ __volatile__ ("_ehb"); }
# 51 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h"
__asm__(".macro " "mtc0_tlbw_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void mtc0_tlbw_hazard(void) { __asm__ __volatile__ ("mtc0_tlbw_hazard"); }
__asm__(".macro " "tlbw_use_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void tlbw_use_hazard(void) { __asm__ __volatile__ ("tlbw_use_hazard"); }
__asm__(".macro " "tlb_probe_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void tlb_probe_hazard(void) { __asm__ __volatile__ ("tlb_probe_hazard"); }
__asm__(".macro " "irq_enable_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void irq_enable_hazard(void) { __asm__ __volatile__ ("irq_enable_hazard"); }
__asm__(".macro " "irq_disable_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void irq_disable_hazard(void) { __asm__ __volatile__ ("irq_disable_hazard"); }
__asm__(".macro " "back_to_back_c0_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void back_to_back_c0_hazard(void) { __asm__ __volatile__ ("back_to_back_c0_hazard"); }
# 258 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hazards.h"
__asm__(".macro " "enable_fpu_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void enable_fpu_hazard(void) { __asm__ __volatile__ ("enable_fpu_hazard"); }
__asm__(".macro " "disable_fpu_hazard" "; " "_ehb" "; .endm"); static inline __attribute__((always_inline)) void disable_fpu_hazard(void) { __asm__ __volatile__ ("disable_fpu_hazard"); }
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h" 2
__asm__(
" .macro raw_local_irq_enable \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
# 37 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
" irq_enable_hazard \n"
" .set pop \n"
" .endm");
extern void smtc_ipi_replay(void);
static inline __attribute__((always_inline)) void raw_local_irq_enable(void)
{
smtc_ipi_replay();
__asm__ __volatile__(
"raw_local_irq_enable"
:
:
: "memory");
}
# 78 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
__asm__(
" .macro raw_local_irq_disable\n"
" .set push \n"
" .set noat \n"
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
# 96 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline __attribute__((always_inline)) void raw_local_irq_disable(void)
{
__asm__ __volatile__(
"raw_local_irq_disable"
:
:
: "memory");
}
__asm__(
" .macro raw_local_save_flags flags \n"
" .set push \n"
" .set reorder \n"
" mfc0 \\flags, $2, 1 \n"
" .set pop \n"
" .endm \n");
__asm__(
" .macro raw_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
# 147 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
# 158 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
__asm__(
" .macro raw_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
# 194 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline __attribute__((always_inline)) void raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
# 213 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irqflags.h"
if (__builtin_expect(!!(!(flags & 0x0400)), 0))
smtc_ipi_replay();
__asm__ __volatile__(
"raw_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline __attribute__((always_inline)) void __raw_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"raw_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline __attribute__((always_inline)) int raw_irqs_disabled_flags(unsigned long flags)
{
return flags & 0x400;
}
# 58 "include/linux/irqflags.h" 2
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/barrier.h" 1
# 20 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bug.h" 1
# 9 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bug.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/break.h" 1
# 10 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bug.h" 2
static inline __attribute__((always_inline)) void __attribute__((noreturn)) BUG(void)
{
__asm__ __volatile__("break %0" : : "i" (512));
__builtin_unreachable();
}
static inline __attribute__((always_inline)) void __BUG_ON(unsigned long condition)
{
if (__builtin_constant_p(condition)) {
if (condition)
BUG();
else
return;
}
__asm__ __volatile__("tne $0, %0, %1"
: : "r" (condition), "i" (512));
}
# 41 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bug.h"
# 1 "include/asm-generic/bug.h" 1
# 64 "include/asm-generic/bug.h"
extern void warn_slowpath_fmt(const char *file, const int line,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
extern void warn_slowpath_fmt_taint(const char *file, const int line,
unsigned taint, const char *fmt, ...)
__attribute__((format(printf, 4, 5)));
extern void warn_slowpath_null(const char *file, const int line);
# 42 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bug.h" 2
# 21 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/byteorder.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/byteorder.h"
# 1 "include/linux/byteorder/big_endian.h" 1
# 12 "include/linux/byteorder/big_endian.h"
# 1 "include/linux/swab.h" 1
# 46 "include/linux/swab.h"
static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val)
{
return ((__u16)( (((__u16)(val) & (__u16)0x00ffU) << 8) | (((__u16)(val) & (__u16)0xff00U) >> 8)));
}
static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val)
{
return ((__u32)( (((__u32)(val) & (__u32)0x000000ffUL) << 24) | (((__u32)(val) & (__u32)0x0000ff00UL) << 8) | (((__u32)(val) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(val) & (__u32)0xff000000UL) >> 24)));
}
static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val)
{
return ((__u64)( (((__u64)(val) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(val) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(val) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(val) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(val) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(val) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(val) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(val) & (__u64)0xff00000000000000ULL) >> 56)));
}
static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val)
{
return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16)));
}
static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val)
{
return ((__u32)( (((__u32)(val) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(val) & (__u32)0xff00ff00UL) >> 8)));
}
# 148 "include/linux/swab.h"
static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p)
{
return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p));
}
static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p));
}
static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p)
{
return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p));
}
static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p));
}
static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p)
{
return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p));
}
static inline __attribute__((always_inline)) void __swab16s(__u16 *p)
{
*p = __swab16p(p);
}
static inline __attribute__((always_inline)) void __swab32s(__u32 *p)
{
*p = __swab32p(p);
}
static inline __attribute__((always_inline)) void __swab64s(__u64 *p)
{
*p = __swab64p(p);
}
static inline __attribute__((always_inline)) void __swahw32s(__u32 *p)
{
*p = __swahw32p(p);
}
static inline __attribute__((always_inline)) void __swahb32s(__u32 *p)
{
*p = __swahb32p(p);
}
# 13 "include/linux/byteorder/big_endian.h" 2
# 43 "include/linux/byteorder/big_endian.h"
static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p)
{
return ( __le64)__swab64p(p);
}
static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p)
{
return __swab64p((__u64 *)p);
}
static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p)
{
return ( __le32)__swab32p(p);
}
static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p)
{
return __swab32p((__u32 *)p);
}
static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p)
{
return ( __le16)__swab16p(p);
}
static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p)
{
return __swab16p((__u16 *)p);
}
static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p)
{
return ( __be64)*p;
}
static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p)
{
return ( __u64)*p;
}
static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p)
{
return ( __be32)*p;
}
static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p)
{
return ( __u32)*p;
}
static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p)
{
return ( __be16)*p;
}
static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p)
{
return ( __u16)*p;
}
# 105 "include/linux/byteorder/big_endian.h"
# 1 "include/linux/byteorder/generic.h" 1
# 143 "include/linux/byteorder/generic.h"
static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val)
{
*var = (( __le16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__le16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__le16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__le16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__le16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__le16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__le16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__le16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__le16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(*var))) + val))));
}
static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val)
{
*var = (( __le32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__le32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(*var))) + val))));
}
static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val)
{
*var = (( __le64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__le64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(*var))) + val))));
}
static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val)
{
*var = (( __be16)(__u16)((( __u16)(__be16)(*var)) + val));
}
static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val)
{
*var = (( __be32)(__u32)((( __u32)(__be32)(*var)) + val));
}
static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val)
{
*var = (( __be64)(__u64)((( __u64)(__be64)(*var)) + val));
}
# 106 "include/linux/byteorder/big_endian.h" 2
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/byteorder.h" 2
# 22 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/war.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/war.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/war.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/war.h" 2
# 25 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 58 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned short bit = nr & 31UL;
unsigned long temp;
if (1 && 0) {
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
} else if (1 && __builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " "ll " "%0, %1 # set_bit \n"
" " "ins " "%0, %4, %2, 1 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
: "ir" (bit), "m" (*m), "r" (~0));
} else if (1) {
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
*a |= mask;
raw_local_irq_restore(flags);
}
}
# 123 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned short bit = nr & 31UL;
unsigned long temp;
if (1 && 0) {
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (~(1UL << bit)), "m" (*m));
} else if (1 && __builtin_constant_p(bit)) {
__asm__ __volatile__(
"1: " "ll " "%0, %1 # clear_bit \n"
" " "ins " "%0, $0, %2, 1 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m)
: "ir" (bit), "m" (*m));
} else if (1) {
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (~(1UL << bit)), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
*a &= ~mask;
raw_local_irq_restore(flags);
}
}
# 186 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
__asm__ __volatile__(" \n" : : :"memory");
clear_bit(nr, addr);
}
# 201 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned short bit = nr & 31UL;
if (1 && 0) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
} else if (1) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
*a ^= mask;
raw_local_irq_restore(flags);
}
}
# 255 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & 31UL;
unsigned long res;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " "sc " "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else if (1) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " "sc " "%2, %1 \n"
" beqz %2, 2f \n"
" and %2, %0, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return res != 0;
}
# 325 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & 31UL;
unsigned long res;
if (1 && 0) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " "sc " "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else if (1) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " "sc " "%2, %1 \n"
" beqz %2, 2f \n"
" and %2, %0, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return res != 0;
}
# 392 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & 31UL;
unsigned long res;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " "sc " "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else if (1 && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
"1: " "ll " "%0, %1 # test_and_clear_bit \n"
" " "ext " "%2, %0, %3, 1 \n"
" " "ins " "%0, $0, %3, 1 \n"
" " "sc " "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "ir" (bit), "m" (*m)
: "memory");
} else if (1) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " "sc " "%2, %1 \n"
" beqz %2, 2f \n"
" and %2, %0, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return res != 0;
}
# 482 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & 31UL;
unsigned long res;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " "sc " "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else if (1) {
unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n"
"1: " "ll " "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " "sc " "\t%2, %1 \n"
" beqz %2, 2f \n"
" and %2, %0, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> 5;
mask = 1UL << bit;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return res != 0;
}
# 1 "include/asm-generic/bitops/non-atomic.h" 1
# 15 "include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
*p |= mask;
}
static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
*p &= ~mask;
}
# 40 "include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
*p ^= mask;
}
# 57 "include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
unsigned long old = *p;
*p = old | mask;
return (old & mask) != 0;
}
# 76 "include/asm-generic/bitops/non-atomic.h"
static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
unsigned long old = *p;
*p = old & ~mask;
return (old & mask) != 0;
}
static inline __attribute__((always_inline)) int __test_and_change_bit(int nr,
volatile unsigned long *addr)
{
unsigned long mask = (1UL << ((nr) % 32));
unsigned long *p = ((unsigned long *)addr) + ((nr) / 32);
unsigned long old = *p;
*p = old ^ mask;
return (old & mask) != 0;
}
static inline __attribute__((always_inline)) int test_bit(int nr, const volatile unsigned long *addr)
{
return 1UL & (addr[((nr) / 32)] >> (nr & (32 -1)));
}
# 545 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 555 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
{
__asm__ __volatile__("": : :"memory");
__clear_bit(nr, addr);
}
static inline __attribute__((always_inline)) unsigned long __fls(unsigned long word)
{
int num;
if (32 == 32 &&
__builtin_constant_p(((cpu_data[0].isa_level & 0x00000020) | (cpu_data[0].isa_level & 0x00000040) | (cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) && ((cpu_data[0].isa_level & 0x00000020) | (cpu_data[0].isa_level & 0x00000040) | (cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) {
__asm__(
" .set push \n"
" .set mips32 \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
return 31 - num;
}
if (32 == 64 &&
__builtin_constant_p(((cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) && ((cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) {
__asm__(
" .set push \n"
" .set mips64 \n"
" dclz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
return 63 - num;
}
num = 32 - 1;
if (!(word & (~0ul << (32 -16)))) {
num -= 16;
word <<= 16;
}
if (!(word & (~0ul << (32 -8)))) {
num -= 8;
word <<= 8;
}
if (!(word & (~0ul << (32 -4)))) {
num -= 4;
word <<= 4;
}
if (!(word & (~0ul << (32 -2)))) {
num -= 2;
word <<= 2;
}
if (!(word & (~0ul << (32 -1))))
num -= 1;
return num;
}
# 631 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) unsigned long __ffs(unsigned long word)
{
return __fls(word & -word);
}
# 643 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int fls(int x)
{
int r;
if (__builtin_constant_p(((cpu_data[0].isa_level & 0x00000020) | (cpu_data[0].isa_level & 0x00000040) | (cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) && ((cpu_data[0].isa_level & 0x00000020) | (cpu_data[0].isa_level & 0x00000040) | (cpu_data[0].isa_level & 0x00000080) | (cpu_data[0].isa_level & 0x00000100))) {
__asm__("clz %0, %1" : "=r" (x) : "r" (x));
return 32 - x;
}
r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
# 1 "include/asm-generic/bitops/fls64.h" 1
# 18 "include/asm-generic/bitops/fls64.h"
static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
return fls(x);
}
# 680 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 689 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h"
static inline __attribute__((always_inline)) int ffs(int word)
{
if (!word)
return 0;
return fls(word & -word);
}
# 1 "include/asm-generic/bitops/ffz.h" 1
# 698 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/find.h" 1
# 699 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/sched.h" 1
# 12 "include/asm-generic/bitops/sched.h"
static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b)
{
if (b[0])
return __ffs(b[0]);
if (b[1])
return __ffs(b[1]) + 32;
if (b[2])
return __ffs(b[2]) + 64;
return __ffs(b[3]) + 96;
}
# 703 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/arch_hweight.h" 1
# 35 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/arch_hweight.h"
# 1 "include/asm-generic/bitops/arch_hweight.h" 1
static inline __attribute__((always_inline)) unsigned int __arch_hweight32(unsigned int w)
{
return __sw_hweight32(w);
}
static inline __attribute__((always_inline)) unsigned int __arch_hweight16(unsigned int w)
{
return __sw_hweight16(w);
}
static inline __attribute__((always_inline)) unsigned int __arch_hweight8(unsigned int w)
{
return __sw_hweight8(w);
}
static inline __attribute__((always_inline)) unsigned long __arch_hweight64(__u64 w)
{
return __sw_hweight64(w);
}
# 36 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/arch_hweight.h" 2
# 705 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/const_hweight.h" 1
# 706 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/ext2-non-atomic.h" 1
# 1 "include/asm-generic/bitops/le.h" 1
# 45 "include/asm-generic/bitops/le.h"
extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
# 5 "include/asm-generic/bitops/ext2-non-atomic.h" 2
# 708 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/ext2-atomic.h" 1
# 709 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 1 "include/asm-generic/bitops/minix.h" 1
# 710 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/bitops.h" 2
# 23 "include/linux/bitops.h" 2
static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order;
}
static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count)
{
int order;
order = fls(count) - 1;
if (count & (count - 1))
order++;
return order;
}
static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) + ((( (!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))) ) + ( (!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7))) )) + (( (!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))) ) + ( (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7))) )))) : __arch_hweight64(w));
}
static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> (32 - shift));
}
static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
}
static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift)
{
return (word << shift) | (word >> (16 - shift));
}
static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift)
{
return (word >> shift) | (word << (16 - shift));
}
static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift)
{
return (word << shift) | (word >> (8 - shift));
}
static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift)
{
return (word >> shift) | (word << (8 - shift));
}
static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
return fls(l);
return fls64(l);
}
# 127 "include/linux/bitops.h"
static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word)
{
if (((u32)word) == 0UL)
return __ffs((u32)(word >> 32)) + 32;
return __ffs((unsigned long)word);
}
# 170 "include/linux/bitops.h"
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
# 182 "include/linux/bitops.h"
extern unsigned long find_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset);
# 192 "include/linux/bitops.h"
extern unsigned long find_next_zero_bit(const unsigned long *addr,
unsigned long size,
unsigned long offset);
# 18 "include/linux/kernel.h" 2
# 1 "include/linux/log2.h" 1
# 21 "include/linux/log2.h"
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
# 31 "include/linux/log2.h"
static inline __attribute__((always_inline)) __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
}
static inline __attribute__((always_inline)) __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
static inline __attribute__((always_inline)) __attribute__((const))
int is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static inline __attribute__((always_inline)) __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
{
return 1UL << fls_long(n - 1);
}
static inline __attribute__((always_inline)) __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
{
return 1UL << (fls_long(n) - 1);
}
# 19 "include/linux/kernel.h" 2
# 1 "include/linux/dynamic_debug.h" 1
extern long long dynamic_debug_enabled;
extern long long dynamic_debug_enabled2;
struct _ddebug {
const char *modname;
const char *function;
const char *filename;
const char *format;
char primary_hash;
char secondary_hash;
unsigned int lineno:24;
unsigned int flags:8;
} __attribute__((aligned(8)));
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
# 76 "include/linux/dynamic_debug.h"
static inline __attribute__((always_inline)) int ddebug_remove_module(const char *mod)
{
return 0;
}
# 21 "include/linux/kernel.h" 2
extern const char linux_banner[];
extern const char linux_proc_banner[];
# 132 "include/linux/kernel.h"
extern int console_printk[];
struct completion;
struct pt_regs;
struct user;
# 165 "include/linux/kernel.h"
static inline __attribute__((always_inline)) void __might_sleep(const char *file, int line,
int preempt_offset) { }
# 180 "include/linux/kernel.h"
static inline __attribute__((always_inline)) void might_fault(void)
{
do { do { } while (0); } while (0);
}
struct va_format {
const char *fmt;
va_list *va;
};
extern struct atomic_notifier_head panic_notifier_list;
extern long (*panic_blink)(int state);
void panic(const char * fmt, ...)
__attribute__ ((noreturn, format (printf, 1, 2))) __attribute__((__cold__));
extern void oops_enter(void);
extern void oops_exit(void);
void print_oops_end_marker(void);
extern int oops_may_print(void);
void do_exit(long error_code)
__attribute__((noreturn));
void complete_and_exit(struct completion *, long)
__attribute__((noreturn));
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
extern int strict_strtoul(const char *, unsigned int, unsigned long *);
extern int strict_strtol(const char *, unsigned int, long *);
extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
extern int strict_strtoll(const char *, unsigned int, long long *);
extern int sprintf(char * buf, const char * fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int vsprintf(char *buf, const char *, va_list)
__attribute__ ((format (printf, 2, 0)));
extern int snprintf(char * buf, size_t size, const char * fmt, ...)
__attribute__ ((format (printf, 3, 4)));
extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
__attribute__ ((format (printf, 3, 0)));
extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
__attribute__ ((format (printf, 3, 4)));
extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
__attribute__ ((format (printf, 3, 0)));
extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern int sscanf(const char *, const char *, ...)
__attribute__ ((format (scanf, 2, 3)));
extern int vsscanf(const char *, const char *, va_list)
__attribute__ ((format (scanf, 2, 0)));
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern int core_kernel_text(unsigned long addr);
extern int __kernel_text_address(unsigned long addr);
extern int kernel_text_address(unsigned long addr);
extern int func_ptr_is_kernel_text(void *ptr);
struct pid;
extern struct pid *session_of_pgrp(struct pid *pgrp);
# 276 "include/linux/kernel.h"
int vprintk(const char *fmt, va_list args)
__attribute__ ((format (printf, 1, 0)));
int printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2))) __attribute__((__cold__));
extern int __printk_ratelimit(const char *func);
extern int printk_timed_ratelimit(unsigned long *caller_jiffies,
unsigned int interval_msec);
extern int printk_delay_msec;
# 300 "include/linux/kernel.h"
void log_buf_kexec_setup(void);
# 325 "include/linux/kernel.h"
static inline __attribute__((always_inline)) __attribute__ ((format (printf, 1, 2)))
int no_printk(const char *s, ...) { return 0; }
extern int printk_needs_cpu(int cpu);
extern void printk_tick(void);
extern void __attribute__((format(printf, 1, 2)))
early_printk(const char *fmt, ...);
unsigned long int_sqrt(unsigned long);
static inline __attribute__((always_inline)) void console_silent(void)
{
(console_printk[0]) = 0;
}
static inline __attribute__((always_inline)) void console_verbose(void)
{
if ((console_printk[0]))
(console_printk[0]) = 15;
}
extern void bust_spinlocks(int yes);
extern void wake_up_klogd(void);
extern int oops_in_progress;
extern int panic_timeout;
extern int panic_on_oops;
extern int panic_on_unrecovered_nmi;
extern int panic_on_io_nmi;
extern const char *print_tainted(void);
extern void add_taint(unsigned flag);
extern int test_taint(unsigned flag);
extern unsigned long get_taint(void);
extern int root_mountflags;
extern enum system_states {
SYSTEM_BOOTING,
SYSTEM_RUNNING,
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND_DISK,
} system_state;
# 383 "include/linux/kernel.h"
extern void dump_stack(void) __attribute__((__cold__));
enum {
DUMP_PREFIX_NONE,
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern void hex_dump_to_buffer(const void *buf, size_t len,
int rowsize, int groupsize,
char *linebuf, size_t linebuflen, int ascii);
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
const void *buf, size_t len, int ascii);
extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len);
extern const char hex_asc[];
static inline __attribute__((always_inline)) char *pack_hex_byte(char *buf, u8 byte)
{
*buf++ = hex_asc[((byte) & 0xf0) >> 4];
*buf++ = hex_asc[((byte) & 0x0f)];
return buf;
}
extern int hex_to_bin(char ch);
# 559 "include/linux/kernel.h"
static inline __attribute__((always_inline)) void tracing_on(void) { }
static inline __attribute__((always_inline)) void tracing_off(void) { }
static inline __attribute__((always_inline)) void tracing_off_permanent(void) { }
static inline __attribute__((always_inline)) int tracing_is_on(void) { return 0; }
enum ftrace_dump_mode {
DUMP_NONE,
DUMP_ALL,
DUMP_ORIG,
};
# 651 "include/linux/kernel.h"
static inline __attribute__((always_inline)) int
trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
static inline __attribute__((always_inline)) void tracing_start(void) { }
static inline __attribute__((always_inline)) void tracing_stop(void) { }
static inline __attribute__((always_inline)) void ftrace_off_permanent(void) { }
static inline __attribute__((always_inline)) void trace_dump_stack(void) { }
static inline __attribute__((always_inline)) int
trace_printk(const char *fmt, ...)
{
return 0;
}
static inline __attribute__((always_inline)) int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
}
static inline __attribute__((always_inline)) void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
# 787 "include/linux/kernel.h"
struct sysinfo;
extern int do_sysinfo(struct sysinfo *info);
struct sysinfo {
long uptime;
unsigned long loads[3];
unsigned long totalram;
unsigned long freeram;
unsigned long sharedram;
unsigned long bufferram;
unsigned long totalswap;
unsigned long freeswap;
unsigned short procs;
unsigned short pad;
unsigned long totalhigh;
unsigned long freehigh;
unsigned int mem_unit;
char _f[20-2*sizeof(long)-sizeof(int)];
};
# 10 "include/linux/cpumask.h" 2
# 1 "include/linux/threads.h" 1
# 11 "include/linux/cpumask.h" 2
# 1 "include/linux/bitmap.h" 1
# 1 "include/linux/string.h" 1
# 15 "include/linux/string.h"
extern char *strndup_user(const char *, long);
extern void *memdup_user(const void *, size_t);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/string.h" 1
# 23 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/string.h"
static __inline__ __attribute__((always_inline)) char *strcpy(char *__dest, __const__ char *__src)
{
char *__xdest = __dest;
__asm__ __volatile__(
".set\tnoreorder\n\t"
".set\tnoat\n"
"1:\tlbu\t$1,(%1)\n\t"
"addiu\t%1,1\n\t"
"sb\t$1,(%0)\n\t"
"bnez\t$1,1b\n\t"
"addiu\t%0,1\n\t"
".set\tat\n\t"
".set\treorder"
: "=r" (__dest), "=r" (__src)
: "0" (__dest), "1" (__src)
: "memory");
return __xdest;
}
static __inline__ __attribute__((always_inline)) char *strncpy(char *__dest, __const__ char *__src, size_t __n)
{
char *__xdest = __dest;
if (__n == 0)
return __xdest;
__asm__ __volatile__(
".set\tnoreorder\n\t"
".set\tnoat\n"
"1:\tlbu\t$1,(%1)\n\t"
"subu\t%2,1\n\t"
"sb\t$1,(%0)\n\t"
"beqz\t$1,2f\n\t"
"addiu\t%0,1\n\t"
"bnez\t%2,1b\n\t"
"addiu\t%1,1\n"
"2:\n\t"
".set\tat\n\t"
".set\treorder"
: "=r" (__dest), "=r" (__src), "=r" (__n)
: "0" (__dest), "1" (__src), "2" (__n)
: "memory");
return __xdest;
}
static __inline__ __attribute__((always_inline)) int strcmp(__const__ char *__cs, __const__ char *__ct)
{
int __res;
__asm__ __volatile__(
".set\tnoreorder\n\t"
".set\tnoat\n\t"
"lbu\t%2,(%0)\n"
"1:\tlbu\t$1,(%1)\n\t"
"addiu\t%0,1\n\t"
"bne\t$1,%2,2f\n\t"
"addiu\t%1,1\n\t"
"bnez\t%2,1b\n\t"
"lbu\t%2,(%0)\n\t"
"move\t%2,$1\n"
"2:\tsubu\t%2,$1\n"
"3:\t.set\tat\n\t"
".set\treorder"
: "=r" (__cs), "=r" (__ct), "=r" (__res)
: "0" (__cs), "1" (__ct));
return __res;
}
static __inline__ __attribute__((always_inline)) int
strncmp(__const__ char *__cs, __const__ char *__ct, size_t __count)
{
int __res;
__asm__ __volatile__(
".set\tnoreorder\n\t"
".set\tnoat\n"
"1:\tlbu\t%3,(%0)\n\t"
"beqz\t%2,2f\n\t"
"lbu\t$1,(%1)\n\t"
"subu\t%2,1\n\t"
"bne\t$1,%3,3f\n\t"
"addiu\t%0,1\n\t"
"bnez\t%3,1b\n\t"
"addiu\t%1,1\n"
"2:\n\t"
"move\t%3,$1\n"
"3:\tsubu\t%3,$1\n\t"
".set\tat\n\t"
".set\treorder"
: "=r" (__cs), "=r" (__ct), "=r" (__count), "=r" (__res)
: "0" (__cs), "1" (__ct), "2" (__count));
return __res;
}
extern void *memset(void *__s, int __c, size_t __count);
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
# 22 "include/linux/string.h" 2
# 30 "include/linux/string.h"
size_t strlcpy(char *, const char *, size_t);
extern char * strcat(char *, const char *);
extern char * strncat(char *, const char *, __kernel_size_t);
extern size_t strlcat(char *, const char *, __kernel_size_t);
# 48 "include/linux/string.h"
extern int strnicmp(const char *, const char *, __kernel_size_t);
extern int strcasecmp(const char *s1, const char *s2);
extern int strncasecmp(const char *s1, const char *s2, size_t n);
extern char * strchr(const char *,int);
extern char * strnchr(const char *, size_t, int);
extern char * strrchr(const char *,int);
extern char * __attribute__((warn_unused_result)) skip_spaces(const char *);
extern char *strim(char *);
static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) char *strstrip(char *str)
{
return strim(str);
}
extern char * strstr(const char *, const char *);
extern char * strnstr(const char *, const char *, size_t);
extern __kernel_size_t strlen(const char *);
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
extern char * strpbrk(const char *,const char *);
extern char * strsep(char **,const char *);
extern __kernel_size_t strspn(const char *,const char *);
extern __kernel_size_t strcspn(const char *,const char *);
# 106 "include/linux/string.h"
extern void * memcpy4(void *,const void *,__kernel_size_t);
extern void * memscan(void *,int,__kernel_size_t);
extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
extern char *kstrdup(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv);
extern int sysfs_streq(const char *s1, const char *s2);
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
static inline __attribute__((always_inline)) int strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
# 9 "include/linux/bitmap.h" 2
# 90 "include/linux/bitmap.h"
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern void bitmap_set(unsigned long *map, int i, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr);
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask);
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
# 154 "include/linux/bitmap.h"
static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = 0UL;
else {
int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits)
{
size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)));
if (!(__builtin_constant_p(nbits) && (nbits) <= 32)) {
int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
}
static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = *src;
else {
int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return (*dst = *src1 & *src2) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = *src1 | *src2;
else
__bitmap_or(dst, src1, src2, nbits);
}
static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = *src1 ^ *src2;
else
__bitmap_xor(dst, src1, src2, nbits);
}
static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return (*dst = *src1 & ~(*src2)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
else
__bitmap_complement(dst, src, nbits);
}
static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
else
return __bitmap_equal(src1, src2, nbits);
}
static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0;
else
return __bitmap_intersects(src1, src2, nbits);
}
static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
else
return __bitmap_subset(src1, src2, nbits);
}
static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
else
return __bitmap_empty(src, nbits);
}
static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
else
return __bitmap_full(src, nbits);
}
static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ));
return __bitmap_weight(src, nbits);
}
static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = *src >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}
static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
{
if ((__builtin_constant_p(nbits) && (nbits) <= 32))
*dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL );
else
__bitmap_shift_left(dst, src, n, nbits);
}
static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *maskp, int nmaskbits)
{
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
}
# 12 "include/linux/cpumask.h" 2
typedef struct cpumask { unsigned long bits[(((4) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t;
# 27 "include/linux/cpumask.h"
extern int nr_cpu_ids;
# 78 "include/linux/cpumask.h"
extern const struct cpumask *const cpu_possible_mask;
extern const struct cpumask *const cpu_online_mask;
extern const struct cpumask *const cpu_present_mask;
extern const struct cpumask *const cpu_active_mask;
# 104 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu)
{
return cpu;
}
# 157 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp)
{
return find_next_bit((((srcp)->bits)), (4), 0);
}
# 169 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp)
{
if (n != -1)
cpumask_check(n);
return find_next_bit(((srcp)->bits), 4, n+1);
}
# 184 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
if (n != -1)
cpumask_check(n);
return find_next_zero_bit(((srcp)->bits), 4, n+1);
}
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
# 254 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
set_bit(cpumask_check(cpu), ((dstp)->bits));
}
static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
clear_bit(cpumask_check(cpu), ((dstp)->bits));
}
# 286 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_set_bit(cpumask_check(cpu), ((cpumask)->bits));
}
# 298 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
return test_and_clear_bit(cpumask_check(cpu), ((cpumask)->bits));
}
static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp)
{
bitmap_fill(((dstp)->bits), 4);
}
static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(((dstp)->bits), 4);
}
static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_and(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), 4);
}
static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_or(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), 4);
}
static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
bitmap_xor(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), 4);
}
static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_andnot(((dstp)->bits), ((src1p)->bits),
((src2p)->bits), 4);
}
static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_complement(((dstp)->bits), ((srcp)->bits),
4);
}
static inline __attribute__((always_inline)) int cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_equal(((src1p)->bits), ((src2p)->bits),
4);
}
static inline __attribute__((always_inline)) int cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_intersects(((src1p)->bits), ((src2p)->bits),
4);
}
static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
{
return bitmap_subset(((src1p)->bits), ((src2p)->bits),
4);
}
static inline __attribute__((always_inline)) int cpumask_empty(const struct cpumask *srcp)
{
return bitmap_empty(((srcp)->bits), 4);
}
static inline __attribute__((always_inline)) int cpumask_full(const struct cpumask *srcp)
{
return bitmap_full(((srcp)->bits), 4);
}
static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp)
{
return bitmap_weight(((srcp)->bits), 4);
}
static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n,
4);
}
static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp,
const struct cpumask *srcp, int n)
{
bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n,
4);
}
static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp,
const struct cpumask *srcp)
{
bitmap_copy(((dstp)->bits), ((srcp)->bits), 4);
}
# 529 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnprintf(buf, len, ((srcp)->bits), 4);
}
# 543 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, ((dstp)->bits), 4);
}
# 558 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnlistprintf(buf, len, ((srcp)->bits),
4);
}
# 573 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, ((dstp)->bits), 4);
}
static inline __attribute__((always_inline)) size_t cpumask_size(void)
{
return (((4) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long);
}
# 618 "include/linux/cpumask.h"
typedef struct cpumask cpumask_var_t[1];
static inline __attribute__((always_inline)) int alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
}
static inline __attribute__((always_inline)) int alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
return true;
}
static inline __attribute__((always_inline)) int zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
cpumask_clear(*mask);
return true;
}
static inline __attribute__((always_inline)) int zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
int node)
{
cpumask_clear(*mask);
return true;
}
static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask)
{
}
static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
extern const unsigned long cpu_all_bits[(((4) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
# 670 "include/linux/cpumask.h"
void set_cpu_possible(unsigned int cpu, int possible);
void set_cpu_present(unsigned int cpu, int present);
void set_cpu_online(unsigned int cpu, int online);
void set_cpu_active(unsigned int cpu, int active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
# 692 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap)
{
return 1;
}
# 704 "include/linux/cpumask.h"
extern const unsigned long
cpu_bit_bitmap[32 +1][(((4) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu)
{
const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32];
p -= cpu / 32;
return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p))));
}
# 781 "include/linux/cpumask.h"
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
int __any_online_cpu(const cpumask_t *mask);
# 811 "include/linux/cpumask.h"
static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cachectl.h" 1
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h" 1
# 1414 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h"
static inline __attribute__((always_inline)) void tlb_probe(void)
{
__asm__ __volatile__(
".set noreorder\n\t"
"tlbp\n\t"
".set reorder");
}
static inline __attribute__((always_inline)) void tlb_read(void)
{
# 1441 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h"
__asm__ __volatile__(
".set noreorder\n\t"
"tlbr\n\t"
".set reorder");
# 1457 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h"
}
static inline __attribute__((always_inline)) void tlb_write_indexed(void)
{
__asm__ __volatile__(
".set noreorder\n\t"
"tlbwi\n\t"
".set reorder");
}
static inline __attribute__((always_inline)) void tlb_write_random(void)
{
__asm__ __volatile__(
".set noreorder\n\t"
"tlbwr\n\t"
".set reorder");
}
# 1529 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h"
static inline __attribute__((always_inline)) unsigned int __dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
: "=r" (res));
do { unsigned long tmp; __asm__ __volatile__( " .set mips64r2 \n" " dla %0, 1f \n" " jr.hb %0 \n" " .set mips0 \n" "1: \n" : "=r" (tmp)); } while (0);
return res;
}
static inline __attribute__((always_inline)) void __emt(unsigned int previous)
{
if ((previous & (1UL << 15)))
__asm__ __volatile__(
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set mips0 \n");
}
static inline __attribute__((always_inline)) void __ehb(void)
{
__asm__ __volatile__(
" .set mips32r2 \n"
" ehb \n" " .set mips0 \n");
}
# 1636 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsregs.h"
static inline __attribute__((always_inline)) unsigned int set_c0_status(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __ehb(); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_status(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __ehb(); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_status(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __ehb(); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
static inline __attribute__((always_inline)) unsigned int set_c0_cause(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$13" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$13" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$13" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$13" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_cause(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$13" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$13" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$13" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$13" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_cause(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$13" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$13" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$13" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$13" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
static inline __attribute__((always_inline)) unsigned int set_c0_config(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$16" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$16" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$16" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$16" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_config(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$16" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$16" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$16" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$16" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_config(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (0 == 0) __asm__ __volatile__( "mfc0\t%0, " "$16" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$16" ", " "0" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { if (0 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$16" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$16" ", " "0" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
static inline __attribute__((always_inline)) unsigned int set_c0_intcontrol(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; __asm__ __volatile__( "cfc0\t%0, " "$20" "\n\t" : "=r" (__res)); __res; }); new = res | set; do { __asm__ __volatile__( "ctc0\t%z0, " "$20" "\n\t" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_intcontrol(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; __asm__ __volatile__( "cfc0\t%0, " "$20" "\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { __asm__ __volatile__( "ctc0\t%z0, " "$20" "\n\t" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_intcontrol(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; __asm__ __volatile__( "cfc0\t%0, " "$20" "\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { __asm__ __volatile__( "ctc0\t%z0, " "$20" "\n\t" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
static inline __attribute__((always_inline)) unsigned int set_c0_intctl(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_intctl(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_intctl(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
static inline __attribute__((always_inline)) unsigned int set_c0_srsmap(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (3 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "3" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { if (3 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "3" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_srsmap(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (3 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "3" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { if (3 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "3" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_srsmap(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (3 == 0) __asm__ __volatile__( "mfc0\t%0, " "$12" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$12" ", " "3" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { if (3 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$12" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$12" ", " "3" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
# 21 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/prefetch.h" 1
# 22 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h" 1
# 19 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/addrspace.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/addrspace.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/spaces.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/spaces.h"
# 1 "include/linux/const.h" 1
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/spaces.h" 2
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/addrspace.h" 2
# 20 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cmpxchg.h" 1
# 73 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cmpxchg.h"
extern void __cmpxchg_called_with_bad_pointer(void);
# 120 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cmpxchg.h"
# 1 "include/asm-generic/cmpxchg-local.h" 1
extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
static inline __attribute__((always_inline)) unsigned long __cmpxchg_local_generic(volatile void *ptr,
unsigned long old, unsigned long new, int size)
{
unsigned long flags, prev;
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
*(u8 *)ptr = (u8)new;
break;
case 2: prev = *(u16 *)ptr;
if (prev == old)
*(u16 *)ptr = (u16)new;
break;
case 4: prev = *(u32 *)ptr;
if (prev == old)
*(u32 *)ptr = (u32)new;
break;
case 8: prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = (u64)new;
break;
default:
wrong_size_cmpxchg(ptr);
}
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
return prev;
}
static inline __attribute__((always_inline)) u64 __cmpxchg64_local_generic(volatile void *ptr,
u64 old, u64 new)
{
u64 prev;
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
return prev;
}
# 121 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cmpxchg.h" 2
# 22 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/dsp.h" 1
# 26 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/dsp.h"
static inline __attribute__((always_inline)) void __init_dsp(void)
{
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mthi $1, $ac1 \n" " .word 0x00200811 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mtlo $1, $ac1 \n" " .word 0x00200813 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mthi $1, $ac2 \n" " .word 0x00201011 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mtlo $1, $ac2 \n" " .word 0x00201013 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mthi $1, $ac3 \n" " .word 0x00201811 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # mtlo $1, $ac3 \n" " .word 0x00201813 \n" " .set pop \n" : : "r" (0)); } while (0);
do { __asm__ __volatile__( " .set push \n" " .set noat \n" " move $1, %0 \n" " # wrdsp $1, %x1 \n" " .word 0x7c2004f8 | (%x1 << 11) \n" " .set pop \n" : : "r" (0x00000000), "i" (0x3ff)); } while (0);
}
static inline __attribute__((always_inline)) void init_dsp(void)
{
if ((cpu_data[0].ases & 0x00000010))
__init_dsp();
}
# 24 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/watch.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/watch.h"
void mips_install_watch_registers(void);
void mips_read_watch_registers(void);
void mips_clear_watch_registers(void);
void mips_probe_watch_registers(struct cpuinfo_mips *c);
# 25 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h" 2
extern void *resume(void *last, void *next, void *next_ti);
struct task_struct;
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
# 94 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h"
static inline __attribute__((always_inline)) unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
unsigned long dummy;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set mips3 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} else if (1) {
unsigned long dummy;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set mips3 \n"
" sc %2, %1 \n"
" beqz %2, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
retval = *m;
*m = val;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return retval;
}
# 197 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h"
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
static inline __attribute__((always_inline)) unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:
return __xchg_u32(ptr, x);
case 8:
return __xchg_u64_unsupported_on_32bit_kernels(ptr, x);
}
return x;
}
# 221 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/system.h"
extern void set_handler(unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
typedef void (*vi_handler_t)(void);
extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
extern void per_cpu_trap_init(void);
extern unsigned long arch_align_stack(unsigned long sp);
# 23 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h" 2
# 32 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
extern void (*cpu_wait)(void);
extern unsigned int vced_count, vcei_count;
# 99 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
typedef __u64 fpureg_t;
# 108 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
struct mips_fpu_struct {
fpureg_t fpr[32];
unsigned int fcr31;
};
typedef __u32 dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[6];
unsigned int dspcontrol;
};
# 130 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
struct mips3264_watch_reg_state {
unsigned long watchlo[4];
u16 watchhi[4];
};
union mips_watch_reg_state {
struct mips3264_watch_reg_state mips3264;
};
# 197 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
typedef struct {
unsigned long seg;
} mm_segment_t;
struct mips_abi;
struct thread_struct {
unsigned long reg16;
unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
unsigned long reg29, reg30, reg31;
unsigned long cp0_status;
struct mips_fpu_struct fpu;
unsigned long emulated_fp;
cpumask_t user_cpus_allowed;
struct mips_dsp_state dsp;
union mips_watch_reg_state watch;
unsigned long cp0_badvaddr;
unsigned long cp0_baduaddr;
unsigned long error_code;
unsigned long irix_trampoline;
unsigned long irix_oldctx;
struct mips_abi *abi;
};
# 315 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/processor.h"
struct task_struct;
extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
unsigned long get_wchan(struct task_struct *p);
# 15 "include/linux/prefetch.h" 2
# 53 "include/linux/prefetch.h"
static inline __attribute__((always_inline)) void prefetch_range(void *addr, size_t len)
{
char *cp;
char *end = addr + len;
for (cp = addr; cp < end; cp += (4*(1 << 5)))
__builtin_prefetch((cp), 0, 1);
}
# 8 "include/linux/list.h" 2
# 25 "include/linux/list.h"
static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}
# 38 "include/linux/list.h"
static inline __attribute__((always_inline)) void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}
# 61 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
# 75 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
# 87 "include/linux/list.h"
static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
prev->next = next;
}
# 100 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_del(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->next = ((void *) 0x00100100 + 0);
entry->prev = ((void *) 0x00200200 + 0);
}
# 117 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
INIT_LIST_HEAD(entry);
}
static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head)
{
__list_del(list->prev, list->next);
list_add(list, head);
}
static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del(list->prev, list->next);
list_add_tail(list, head);
}
static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list,
const struct list_head *head)
{
return list->next == head;
}
static inline __attribute__((always_inline)) int list_empty(const struct list_head *head)
{
return head->next == head;
}
# 199 "include/linux/list.h"
static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head)
{
struct list_head *next = head->next;
return (next == head) && (next == head->prev);
}
static inline __attribute__((always_inline)) void list_rotate_left(struct list_head *head)
{
struct list_head *first;
if (!list_empty(head)) {
first = head->next;
list_move_tail(first, head);
}
}
static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}
static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
# 254 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) &&
(head->next != entry && head != entry))
return;
if (entry == head)
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
static inline __attribute__((always_inline)) void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}
static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
# 313 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
# 330 "include/linux/list.h"
static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
# 569 "include/linux/list.h"
static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = ((void *)0);
h->pprev = ((void *)0);
}
static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h)
{
return !h->first;
}
static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
*pprev = next;
if (next)
next->pprev = pprev;
}
static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = ((void *) 0x00100100 + 0);
n->pprev = ((void *) 0x00200200 + 0);
}
static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
if (first)
first->pprev = &n->next;
h->first = n;
n->pprev = &h->first;
}
static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
next->pprev = &n->next;
*(n->pprev) = n;
}
static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
if(next->next)
next->next->pprev = &next->next;
}
static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = ((void *)0);
}
# 10 "include/linux/module.h" 2
# 1 "include/linux/stat.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/stat.h" 1
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/stat.h"
struct stat {
unsigned st_dev;
long st_pad1[3];
ino_t st_ino;
mode_t st_mode;
nlink_t st_nlink;
uid_t st_uid;
gid_t st_gid;
unsigned st_rdev;
long st_pad2[2];
off_t st_size;
long st_pad3;
time_t st_atime;
long st_atime_nsec;
time_t st_mtime;
long st_mtime_nsec;
time_t st_ctime;
long st_ctime_nsec;
long st_blksize;
long st_blocks;
long st_pad4[14];
};
struct stat64 {
unsigned long st_dev;
unsigned long st_pad0[3];
unsigned long long st_ino;
mode_t st_mode;
nlink_t st_nlink;
uid_t st_uid;
gid_t st_gid;
unsigned long st_rdev;
unsigned long st_pad1[3];
long long st_size;
time_t st_atime;
unsigned long st_atime_nsec;
time_t st_mtime;
unsigned long st_mtime_nsec;
time_t st_ctime;
unsigned long st_ctime_nsec;
unsigned long st_blksize;
unsigned long st_pad2;
long long st_blocks;
};
# 7 "include/linux/stat.h" 2
# 60 "include/linux/stat.h"
# 1 "include/linux/time.h" 1
# 1 "include/linux/cache.h" 1
# 8 "include/linux/time.h" 2
# 1 "include/linux/seqlock.h" 1
# 29 "include/linux/seqlock.h"
# 1 "include/linux/spinlock.h" 1
# 50 "include/linux/spinlock.h"
# 1 "include/linux/preempt.h" 1
# 9 "include/linux/preempt.h"
# 1 "include/linux/thread_info.h" 1
# 12 "include/linux/thread_info.h"
struct timespec;
struct compat_timespec;
struct restart_block {
long (*fn)(struct restart_block *);
union {
struct {
unsigned long arg0, arg1, arg2, arg3;
};
struct {
u32 *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
u32 *uaddr2;
} futex;
struct {
clockid_t index;
struct timespec *rmtp;
u64 expires;
} nanosleep;
struct {
struct pollfd *ufds;
int nfds;
int has_timeout;
unsigned long tv_sec;
unsigned long tv_nsec;
} poll;
};
};
extern long do_no_restart_syscall(struct restart_block *parm);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/thread_info.h" 1
# 24 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/thread_info.h"
struct thread_info {
struct task_struct *task;
struct exec_domain *exec_domain;
unsigned long flags;
unsigned long tp_value;
__u32 cpu;
int preempt_count;
mm_segment_t addr_limit;
struct restart_block restart_block;
struct pt_regs *regs;
};
# 60 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/thread_info.h"
register struct thread_info *__current_thread_info __asm__("$28");
# 57 "include/linux/thread_info.h" 2
# 65 "include/linux/thread_info.h"
static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag)
{
clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_set_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
}
static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag)
{
return test_bit(flag, (unsigned long *)&ti->flags);
}
# 122 "include/linux/thread_info.h"
static inline __attribute__((always_inline)) void set_restore_sigmask(void)
{
set_ti_thread_flag(__current_thread_info, 9);
set_ti_thread_flag(__current_thread_info, 1);
}
# 10 "include/linux/preempt.h" 2
# 51 "include/linux/spinlock.h" 2
# 1 "include/linux/stringify.h" 1
# 56 "include/linux/spinlock.h" 2
# 1 "include/linux/bottom_half.h" 1
extern void local_bh_disable(void);
extern void _local_bh_enable(void);
extern void local_bh_enable(void);
extern void local_bh_enable_ip(unsigned long ip);
# 57 "include/linux/spinlock.h" 2
# 80 "include/linux/spinlock.h"
# 1 "include/linux/spinlock_types.h" 1
# 13 "include/linux/spinlock_types.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/spinlock_types.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/spinlock_types.h"
typedef union {
u32 lock;
struct {
u16 ticket;
u16 serving_now;
} h;
} arch_spinlock_t;
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
# 14 "include/linux/spinlock_types.h" 2
# 1 "include/linux/lockdep.h" 1
# 12 "include/linux/lockdep.h"
struct task_struct;
struct lockdep_map;
extern int prove_locking;
extern int lock_stat;
# 337 "include/linux/lockdep.h"
static inline __attribute__((always_inline)) void lockdep_off(void)
{
}
static inline __attribute__((always_inline)) void lockdep_on(void)
{
}
# 378 "include/linux/lockdep.h"
struct lock_class_key { };
# 428 "include/linux/lockdep.h"
extern void early_init_irq_lock_class(void);
# 440 "include/linux/lockdep.h"
static inline __attribute__((always_inline)) void early_boot_irqs_off(void)
{
}
static inline __attribute__((always_inline)) void early_boot_irqs_on(void)
{
}
static inline __attribute__((always_inline)) void print_irqtrace_events(struct task_struct *curr)
{
}
# 19 "include/linux/spinlock_types.h" 2
typedef struct raw_spinlock {
arch_spinlock_t raw_lock;
# 32 "include/linux/spinlock_types.h"
} raw_spinlock_t;
# 64 "include/linux/spinlock_types.h"
typedef struct spinlock {
union {
struct raw_spinlock rlock;
# 75 "include/linux/spinlock_types.h"
};
} spinlock_t;
# 94 "include/linux/spinlock_types.h"
# 1 "include/linux/rwlock_types.h" 1
# 11 "include/linux/rwlock_types.h"
typedef struct {
arch_rwlock_t raw_lock;
# 23 "include/linux/rwlock_types.h"
} rwlock_t;
# 95 "include/linux/spinlock_types.h" 2
# 81 "include/linux/spinlock.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/spinlock.h" 1
# 37 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/spinlock.h"
static inline __attribute__((always_inline)) int arch_spin_is_locked(arch_spinlock_t *lock)
{
u32 counters = (*(volatile typeof(lock->lock) *)&(lock->lock));
return ((counters >> 16) ^ counters) & 0xffff;
}
static inline __attribute__((always_inline)) int arch_spin_is_contended(arch_spinlock_t *lock)
{
u32 counters = (*(volatile typeof(lock->lock) *)&(lock->lock));
return (((counters >> 16) - counters) & 0xffff) > 1;
}
static inline __attribute__((always_inline)) void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
int inc = 0x10000;
if (0) {
__asm__ __volatile__ (
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addu %[my_ticket], %[ticket], %[inc] \n"
" sc %[my_ticket], %[ticket_ptr] \n"
" beqzl %[my_ticket], 1b \n"
" nop \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
" andi %[my_ticket], %[my_ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
" subu %[ticket], 1 \n"
" \n"
" lhu %[ticket], %[serving_now_ptr] \n"
" beq %[ticket], %[my_ticket], 2b \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
" b 4b \n"
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+m" (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
: [inc] "r" (inc));
} else {
__asm__ __volatile__ (
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addu %[my_ticket], %[ticket], %[inc] \n"
" sc %[my_ticket], %[ticket_ptr] \n"
" beqz %[my_ticket], 1b \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
" andi %[my_ticket], %[my_ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0x1fff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
" subu %[ticket], 1 \n"
" \n"
" lhu %[ticket], %[serving_now_ptr] \n"
" beq %[ticket], %[my_ticket], 2b \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
" b 4b \n"
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+m" (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
: [inc] "r" (inc));
}
__asm__ __volatile__(" \n" : : :"memory");
}
static inline __attribute__((always_inline)) void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned int serving_now = lock->h.serving_now + 1;
__asm__ __volatile__( ".set push\n\t" ".set noreorder\n\t" ".set mips2\n\t" "sync\n\t" ".set pop" : : : "memory");
lock->h.serving_now = (u16)serving_now;
__asm__ __volatile__( ".set push\n\t" ".set noreorder\n\t" ".set mips2\n\t" "sync\n\t" ".set pop" : : : "memory");
}
static inline __attribute__((always_inline)) unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
int inc = 0x10000;
if (0) {
__asm__ __volatile__ (
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[my_ticket], %[my_ticket], 0xffff \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
" sc %[ticket], %[ticket_ptr] \n"
" beqzl %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+m" (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
: [inc] "r" (inc));
} else {
__asm__ __volatile__ (
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[my_ticket], %[my_ticket], 0xffff \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+m" (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
: [inc] "r" (inc));
}
__asm__ __volatile__(" \n" : : :"memory");
return tmp;
}
# 227 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/spinlock.h"
static inline __attribute__((always_inline)) void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (0) {
__asm__ __volatile__(
" .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
" nop \n"
" .subsection 2 \n"
"2: ll %1, %2 \n"
" bltz %1, 2b \n"
" addu %1, 1 \n"
" b 1b \n"
" nop \n"
" .previous \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
}
__asm__ __volatile__(" \n" : : :"memory");
}
static inline __attribute__((always_inline)) void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
__asm__ __volatile__(" \n" : : :"memory");
if (0) {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_read_unlock \n"
"1: ll %1, %2 \n"
" sub %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 2f \n"
" nop \n"
" .subsection 2 \n"
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
}
}
static inline __attribute__((always_inline)) void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (0) {
__asm__ __volatile__(
" .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqz %1, 2f \n"
" nop \n"
" .subsection 2 \n"
"2: ll %1, %2 \n"
" bnez %1, 2b \n"
" lui %1, 0x8000 \n"
" b 1b \n"
" nop \n"
" .previous \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp)
: "m" (rw->lock)
: "memory");
}
__asm__ __volatile__(" \n" : : :"memory");
}
static inline __attribute__((always_inline)) void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__("": : :"memory");
__asm__ __volatile__(
" # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
static inline __attribute__((always_inline)) int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (0) {
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" .set reorder \n"
" beqzl %1, 1b \n"
" nop \n"
" \n"
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
" nop \n"
" .set reorder \n"
" \n"
" li %2, 1 \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
}
return ret;
}
static inline __attribute__((always_inline)) int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (0) {
__asm__ __volatile__(
" .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
" \n"
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqz %1, 3f \n"
" li %2, 1 \n"
"2: \n"
" \n"
" .subsection 2 \n"
"3: b 1b \n"
" li %2, 0 \n"
" .previous \n"
" .set reorder \n"
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
}
return ret;
}
# 87 "include/linux/spinlock.h" 2
# 121 "include/linux/spinlock.h"
static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { __asm__ __volatile__("": : :"memory"); }
# 136 "include/linux/spinlock.h"
static inline __attribute__((always_inline)) void do_raw_spin_lock(raw_spinlock_t *lock)
{
(void)0;
arch_spin_lock(&lock->raw_lock);
}
static inline __attribute__((always_inline)) void
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
{
(void)0;
arch_spin_lock(&lock->raw_lock);
}
static inline __attribute__((always_inline)) int do_raw_spin_trylock(raw_spinlock_t *lock)
{
return arch_spin_trylock(&(lock)->raw_lock);
}
static inline __attribute__((always_inline)) void do_raw_spin_unlock(raw_spinlock_t *lock)
{
arch_spin_unlock(&lock->raw_lock);
(void)0;
}
# 255 "include/linux/spinlock.h"
# 1 "include/linux/rwlock.h" 1
# 256 "include/linux/spinlock.h" 2
# 1 "include/linux/spinlock_api_smp.h" 1
# 18 "include/linux/spinlock_api_smp.h"
int in_lock_functions(unsigned long addr);
void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
;
void __attribute__((section(".spinlock.text")))
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text")))
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
;
int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock);
int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock);
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ;
void __attribute__((section(".spinlock.text")))
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
;
# 86 "include/linux/spinlock_api_smp.h"
static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock)
{
do { } while (0);
if (do_raw_spin_trylock(lock)) {
do { } while (0);
return 1;
}
do { } while (0);
return 0;
}
# 104 "include/linux/spinlock_api_smp.h"
static inline __attribute__((always_inline)) unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
# 119 "include/linux/spinlock_api_smp.h"
do_raw_spin_lock_flags(lock, &flags);
return flags;
}
static inline __attribute__((always_inline)) void __raw_spin_lock_irq(raw_spinlock_t *lock)
{
do { raw_local_irq_disable(); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((always_inline)) void __raw_spin_lock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
do { } while (0);
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((always_inline)) void __raw_spin_lock(raw_spinlock_t *lock)
{
do { } while (0);
do { } while (0);
do_raw_spin_lock(lock);
}
static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { do { } while (0); raw_local_irq_enable(); } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
do { } while (0);
do_raw_spin_unlock(lock);
do { } while (0);
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
static inline __attribute__((always_inline)) int __raw_spin_trylock_bh(raw_spinlock_t *lock)
{
local_bh_disable();
do { } while (0);
if (do_raw_spin_trylock(lock)) {
do { } while (0);
return 1;
}
do { } while (0);
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
return 0;
}
# 1 "include/linux/rwlock_api_smp.h" 1
# 18 "include/linux/rwlock_api_smp.h"
void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ;
unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock)
;
unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock)
;
int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock);
int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock);
void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ;
void __attribute__((section(".spinlock.text")))
_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
void __attribute__((section(".spinlock.text")))
_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
;
# 117 "include/linux/rwlock_api_smp.h"
static inline __attribute__((always_inline)) int __raw_read_trylock(rwlock_t *lock)
{
do { } while (0);
if (arch_read_trylock(&(lock)->raw_lock)) {
do { } while (0);
return 1;
}
do { } while (0);
return 0;
}
static inline __attribute__((always_inline)) int __raw_write_trylock(rwlock_t *lock)
{
do { } while (0);
if (arch_write_trylock(&(lock)->raw_lock)) {
do { } while (0);
return 1;
}
do { } while (0);
return 0;
}
# 146 "include/linux/rwlock_api_smp.h"
static inline __attribute__((always_inline)) void __raw_read_lock(rwlock_t *lock)
{
do { } while (0);
do { } while (0);
do {(void)0; arch_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
do {(void)0; arch_read_lock(&((lock))->raw_lock); } while (0)
;
return flags;
}
static inline __attribute__((always_inline)) void __raw_read_lock_irq(rwlock_t *lock)
{
do { raw_local_irq_disable(); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
do {(void)0; arch_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) void __raw_read_lock_bh(rwlock_t *lock)
{
local_bh_disable();
do { } while (0);
do { } while (0);
do {(void)0; arch_read_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
{
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
do {(void)0; arch_write_lock(&((lock))->raw_lock); } while (0)
;
return flags;
}
static inline __attribute__((always_inline)) void __raw_write_lock_irq(rwlock_t *lock)
{
do { raw_local_irq_disable(); do { } while (0); } while (0);
do { } while (0);
do { } while (0);
do {(void)0; arch_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) void __raw_write_lock_bh(rwlock_t *lock)
{
local_bh_disable();
do { } while (0);
do { } while (0);
do {(void)0; arch_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) void __raw_write_lock(rwlock_t *lock)
{
do { } while (0);
do { } while (0);
do {(void)0; arch_write_lock(&(lock)->raw_lock); } while (0);
}
static inline __attribute__((always_inline)) void __raw_write_unlock(rwlock_t *lock)
{
do { } while (0);
do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_read_unlock(rwlock_t *lock)
{
do { } while (0);
do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
do { } while (0);
do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_read_unlock_irq(rwlock_t *lock)
{
do { } while (0);
do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { do { } while (0); raw_local_irq_enable(); } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_read_unlock_bh(rwlock_t *lock)
{
do { } while (0);
do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { } while (0);
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
static inline __attribute__((always_inline)) void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
do { } while (0);
do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_write_unlock_irq(rwlock_t *lock)
{
do { } while (0);
do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { do { } while (0); raw_local_irq_enable(); } while (0);
do { } while (0);
}
static inline __attribute__((always_inline)) void __raw_write_unlock_bh(rwlock_t *lock)
{
do { } while (0);
do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0);
do { } while (0);
local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
# 195 "include/linux/spinlock_api_smp.h" 2
# 262 "include/linux/spinlock.h" 2
# 270 "include/linux/spinlock.h"
static inline __attribute__((always_inline)) raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock)
{
_raw_spin_lock(&lock->rlock);
}
static inline __attribute__((always_inline)) void spin_lock_bh(spinlock_t *lock)
{
_raw_spin_lock_bh(&lock->rlock);
}
static inline __attribute__((always_inline)) int spin_trylock(spinlock_t *lock)
{
return (_raw_spin_trylock(&lock->rlock));
}
# 306 "include/linux/spinlock.h"
static inline __attribute__((always_inline)) void spin_lock_irq(spinlock_t *lock)
{
_raw_spin_lock_irq(&lock->rlock);
}
# 320 "include/linux/spinlock.h"
static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock)
{
__raw_spin_unlock(&lock->rlock);
}
static inline __attribute__((always_inline)) void spin_unlock_bh(spinlock_t *lock)
{
_raw_spin_unlock_bh(&lock->rlock);
}
static inline __attribute__((always_inline)) void spin_unlock_irq(spinlock_t *lock)
{
__raw_spin_unlock_irq(&lock->rlock);
}
static inline __attribute__((always_inline)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0);
}
static inline __attribute__((always_inline)) int spin_trylock_bh(spinlock_t *lock)
{
return (_raw_spin_trylock_bh(&lock->rlock));
}
static inline __attribute__((always_inline)) int spin_trylock_irq(spinlock_t *lock)
{
return ({ do { raw_local_irq_disable(); do { } while (0); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { do { } while (0); raw_local_irq_enable(); } while (0); 0; }); });
}
static inline __attribute__((always_inline)) void spin_unlock_wait(spinlock_t *lock)
{
while (arch_spin_is_locked(&(&lock->rlock)->raw_lock)) { __asm__ __volatile__("": : :"memory"); };
}
static inline __attribute__((always_inline)) int spin_is_locked(spinlock_t *lock)
{
return arch_spin_is_locked(&(&lock->rlock)->raw_lock);
}
static inline __attribute__((always_inline)) int spin_is_contended(spinlock_t *lock)
{
return arch_spin_is_contended(&(&lock->rlock)->raw_lock);
}
static inline __attribute__((always_inline)) int spin_can_lock(spinlock_t *lock)
{
return (!arch_spin_is_locked(&(&lock->rlock)->raw_lock));
}
static inline __attribute__((always_inline)) void assert_spin_locked(spinlock_t *lock)
{
__BUG_ON((unsigned long)(!arch_spin_is_locked(&(&lock->rlock)->raw_lock)));
}
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h" 1
# 50 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
static __inline__ __attribute__((always_inline)) void atomic_add(int i, atomic_t * v)
{
if (1 && 0) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter));
} else if (1) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter));
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
v->counter += i;
raw_local_irq_restore(flags);
}
}
# 95 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
static __inline__ __attribute__((always_inline)) void atomic_sub(int i, atomic_t * v)
{
if (1 && 0) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
" beqzl %0, 1b \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter));
} else if (1) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter));
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
v->counter -= i;
raw_local_irq_restore(flags);
}
}
static __inline__ __attribute__((always_inline)) int atomic_add_return(int i, atomic_t * v)
{
int result;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (1) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqz %0, 2f \n"
" addu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
result = v->counter;
result += i;
v->counter = result;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return result;
}
static __inline__ __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t * v)
{
int result;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (1) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" beqz %0, 2f \n"
" subu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
result = v->counter;
result -= i;
v->counter = result;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return result;
}
# 248 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
static __inline__ __attribute__((always_inline)) int atomic_sub_if_positive(int i, atomic_t * v)
{
int result;
__asm__ __volatile__(" \n" : : :"memory");
if (1 && 0) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else if (1) {
int temp;
__asm__ __volatile__(
" .set mips3 \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
" .set noreorder \n"
" beqz %0, 2f \n"
" subu %0, %1, %3 \n"
" .set reorder \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
"1: \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
} else {
unsigned long flags;
__asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory");
result = v->counter;
result -= i;
if (result >= 0)
v->counter = result;
raw_local_irq_restore(flags);
}
__asm__ __volatile__(" \n" : : :"memory");
return result;
}
# 321 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
static __inline__ __attribute__((always_inline)) int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = (*(volatile int *)&(v)->counter);
for (;;) {
if (__builtin_expect(!!(c == (u)), 0))
break;
old = (({ __typeof__(&(((v))->counter)) __ptr = (&(((v))->counter)); __typeof__(*(&(((v))->counter))) __old = ((c)); __typeof__(*(&(((v))->counter))) __new = ((c + (a))); __typeof__(*(&(((v))->counter))) __res = 0; __asm__ __volatile__(" \n" : : :"memory"); switch (sizeof(*(__ptr))) { case 4: __res = ({ __typeof(*(__ptr)) __ret; if (1 && 0) { __asm__ __volatile__( " .set push \n" " .set noat \n" " .set mips3 \n" "1: " "ll" " %0, %2 # __cmpxchg_asm \n" " bne %0, %z3, 2f \n" " .set mips0 \n" " move $1, %z4 \n" " .set mips3 \n" " " "sc" " $1, %1 \n" " beqzl $1, 1b \n" "2: \n" " .set pop \n" : "=&r" (__ret), "=R" (*__ptr) : "R" (*__ptr), "Jr" (__old), "Jr" (__new) : "memory"); } else if (1) { __asm__ __volatile__( " .set push \n" " .set noat \n" " .set mips3 \n" "1: " "ll" " %0, %2 # __cmpxchg_asm \n" " bne %0, %z3, 2f \n" " .set mips0 \n" " move $1, %z4 \n" " .set mips3 \n" " " "sc" " $1, %1 \n" " beqz $1, 3f \n" "2: \n" " .subsection 2 \n" "3: b 1b \n" " .previous \n" " .set pop \n" : "=&r" (__ret), "=R" (*__ptr) : "R" (*__ptr), "Jr" (__old), "Jr" (__new) : "memory"); } else { unsigned long __flags; __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); __ret = *__ptr; if (__ret == __old) *__ptr = __new; raw_local_irq_restore(__flags); } __ret; }); break; case 8: if (sizeof(long) == 8) { __res = ({ __typeof(*(__ptr)) __ret; if (1 && 0) { __asm__ __volatile__( " .set push \n" " .set noat \n" " .set mips3 \n" "1: " "lld" " %0, %2 # __cmpxchg_asm \n" " bne %0, %z3, 2f \n" " .set mips0 \n" " move $1, %z4 \n" " .set mips3 \n" " " "scd" " $1, %1 \n" " beqzl $1, 1b \n" "2: \n" " .set pop \n" : "=&r" (__ret), "=R" (*__ptr) : "R" (*__ptr), "Jr" (__old), "Jr" (__new) : "memory"); } else if (1) { __asm__ __volatile__( " .set push \n" " .set noat \n" " .set mips3 \n" "1: " "lld" " %0, %2 # __cmpxchg_asm \n" " bne %0, %z3, 2f \n" " .set mips0 \n" " move $1, %z4 \n" " .set mips3 \n" " " "scd" " $1, %1 \n" " beqz $1, 3f \n" "2: \n" " .subsection 2 \n" "3: b 1b \n" " .previous \n" " .set pop \n" : "=&r" (__ret), "=R" (*__ptr) : "R" (*__ptr), "Jr" (__old), "Jr" (__new) : "memory"); } else { unsigned long __flags; __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); __ret = *__ptr; if (__ret == __old) *__ptr = __new; raw_local_irq_restore(__flags); } __ret; }); break; } default: __cmpxchg_called_with_bad_pointer(); break; } __asm__ __volatile__(" \n" : : :"memory"); __res; }));
if (__builtin_expect(!!(old == c), 1))
break;
c = old;
}
return c != (u);
}
# 787 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
# 1 "include/asm-generic/atomic64.h" 1
# 15 "include/asm-generic/atomic64.h"
typedef struct {
long long counter;
} atomic64_t;
extern long long atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, long long i);
extern void atomic64_add(long long a, atomic64_t *v);
extern long long atomic64_add_return(long long a, atomic64_t *v);
extern void atomic64_sub(long long a, atomic64_t *v);
extern long long atomic64_sub_return(long long a, atomic64_t *v);
extern long long atomic64_dec_if_positive(atomic64_t *v);
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
# 788 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h" 2
# 800 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h"
# 1 "include/asm-generic/atomic-long.h" 1
# 141 "include/asm-generic/atomic-long.h"
typedef atomic_t atomic_long_t;
static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)(*(volatile int *)&(v)->counter);
}
static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
((v)->counter = (i));
}
static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(1, (v));
}
static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(1, (v));
}
static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(i, v);
}
static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(i, v);
}
static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (atomic_sub_return((i), (v)) == 0);
}
static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (atomic_sub_return(1, (v)) == 0);
}
static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (atomic_add_return(1, (v)) == 0);
}
static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (atomic_add_return(i, (v)) < 0);
}
static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_return(i, v);
}
static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_sub_return(i, v);
}
static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_return(1, (v));
}
static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_sub_return(1, (v));
}
static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_add_unless(v, a, u);
}
# 801 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/atomic.h" 2
# 385 "include/linux/spinlock.h" 2
# 393 "include/linux/spinlock.h"
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
# 30 "include/linux/seqlock.h" 2
typedef struct {
unsigned sequence;
spinlock_t lock;
} seqlock_t;
# 60 "include/linux/seqlock.h"
static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
++sl->sequence;
__asm__ __volatile__("": : :"memory");
}
static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl)
{
__asm__ __volatile__("": : :"memory");
sl->sequence++;
spin_unlock(&sl->lock);
}
static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl)
{
int ret = spin_trylock(&sl->lock);
if (ret) {
++sl->sequence;
__asm__ __volatile__("": : :"memory");
}
return ret;
}
static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret;
repeat:
ret = sl->sequence;
__asm__ __volatile__("": : :"memory");
if (__builtin_expect(!!(ret & 1), 0)) {
__asm__ __volatile__("": : :"memory");
goto repeat;
}
return ret;
}
static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start)
{
__asm__ __volatile__("": : :"memory");
return (sl->sequence != start);
}
# 121 "include/linux/seqlock.h"
typedef struct seqcount {
unsigned sequence;
} seqcount_t;
static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s)
{
unsigned ret;
repeat:
ret = s->sequence;
__asm__ __volatile__("": : :"memory");
if (__builtin_expect(!!(ret & 1), 0)) {
__asm__ __volatile__("": : :"memory");
goto repeat;
}
return ret;
}
static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
__asm__ __volatile__("": : :"memory");
return s->sequence != start;
}
static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s)
{
s->sequence++;
__asm__ __volatile__("": : :"memory");
}
static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s)
{
__asm__ __volatile__("": : :"memory");
s->sequence++;
}
# 9 "include/linux/time.h" 2
# 1 "include/linux/math64.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/div64.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/div64.h"
# 1 "include/asm-generic/div64.h" 1
# 35 "include/asm-generic/div64.h"
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/div64.h" 2
# 6 "include/linux/math64.h" 2
# 41 "include/linux/math64.h"
static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = ({ uint32_t __base = (divisor); uint32_t __rem; (void)(((typeof((dividend)) *)0) == ((uint64_t *)0)); if (__builtin_expect(!!(((dividend) >> 32) == 0), 1)) { __rem = (uint32_t)(dividend) % __base; (dividend) = (uint32_t)(dividend) / __base; } else __rem = __div64_32(&(dividend), __base); __rem; });
return dividend;
}
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
extern u64 div64_u64(u64 dividend, u64 divisor);
# 66 "include/linux/math64.h"
static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
static inline __attribute__((always_inline)) __attribute__((always_inline)) u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
while (dividend >= divisor) {
asm("" : "+rm"(dividend));
dividend -= divisor;
ret++;
}
*remainder = dividend;
return ret;
}
# 10 "include/linux/time.h" 2
struct timespec {
__kernel_time_t tv_sec;
long tv_nsec;
};
struct timeval {
__kernel_time_t tv_sec;
__kernel_suseconds_t tv_usec;
};
struct timezone {
int tz_minuteswest;
int tz_dsttime;
};
extern struct timezone sys_tz;
# 45 "include/linux/time.h"
static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a,
const struct timespec *b)
{
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
}
static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_nsec - rhs->tv_nsec;
}
static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
if (lhs->tv_sec > rhs->tv_sec)
return 1;
return lhs->tv_usec - rhs->tv_usec;
}
extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs);
static inline __attribute__((always_inline)) struct timespec timespec_add(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}
static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
extern seqlock_t xtime_lock;
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
extern int update_persistent_clock(struct timespec now);
extern int no_sync_cmos_clock ;
void timekeeping_init(void);
extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
struct timespec __current_kernel_time(void);
struct timespec __get_wall_to_monotonic(void);
struct timespec get_monotonic_coarse(void);
# 146 "include/linux/time.h"
static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; }
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
extern long do_utimes(int dfd, const char *filename, struct timespec *times, int flags);
struct itimerval;
extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern unsigned int alarm_setitimer(unsigned int seconds);
extern int do_getitimer(int which, struct itimerval *value);
extern void getnstimeofday(struct timespec *tv);
extern void getrawmonotonic(struct timespec *ts);
extern void getboottime(struct timespec *ts);
extern void monotonic_to_bootbased(struct timespec *ts);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_valid_for_hres(void);
extern u64 timekeeping_max_deferment(void);
extern void update_wall_time(void);
extern void timekeeping_leap_insert(int leapsecond);
struct tms;
extern void do_sys_times(struct tms *);
struct tm {
int tm_sec;
int tm_min;
int tm_hour;
int tm_mday;
int tm_mon;
long tm_year;
int tm_wday;
int tm_yday;
};
void time_to_tm(time_t totalsecs, int offset, struct tm *result);
# 208 "include/linux/time.h"
static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts)
{
return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec;
}
# 220 "include/linux/time.h"
static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv)
{
return ((s64) tv->tv_sec * 1000000000L) +
tv->tv_usec * 1000L;
}
extern struct timespec ns_to_timespec(const s64 nsec);
extern struct timeval ns_to_timeval(const s64 nsec);
# 250 "include/linux/time.h"
static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns)
{
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns);
a->tv_nsec = ns;
}
# 273 "include/linux/time.h"
struct itimerspec {
struct timespec it_interval;
struct timespec it_value;
};
struct itimerval {
struct timeval it_interval;
struct timeval it_value;
};
# 61 "include/linux/stat.h" 2
struct kstat {
u64 ino;
dev_t dev;
umode_t mode;
unsigned int nlink;
uid_t uid;
gid_t gid;
dev_t rdev;
loff_t size;
struct timespec atime;
struct timespec mtime;
struct timespec ctime;
unsigned long blksize;
unsigned long long blocks;
};
# 11 "include/linux/module.h" 2
# 1 "include/linux/kmod.h" 1
# 22 "include/linux/kmod.h"
# 1 "include/linux/gfp.h" 1
# 1 "include/linux/mmzone.h" 1
# 9 "include/linux/mmzone.h"
# 1 "include/linux/wait.h" 1
# 26 "include/linux/wait.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/current.h" 1
# 1 "include/asm-generic/current.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/current.h" 2
# 27 "include/linux/wait.h" 2
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
struct __wait_queue {
unsigned int flags;
void *private;
wait_queue_func_t func;
struct list_head task_list;
};
struct wait_bit_key {
void *flags;
int bit_nr;
};
struct wait_bit_queue {
struct wait_bit_key key;
wait_queue_t wait;
};
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
struct task_struct;
# 80 "include/linux/wait.h"
extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
# 98 "include/linux/wait.h"
static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
{
q->flags = 0;
q->private = p;
q->func = default_wake_function;
}
static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q,
wait_queue_func_t func)
{
q->flags = 0;
q->private = ((void *)0);
q->func = func;
}
static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q)
{
return !list_empty(&q->task_list);
}
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
list_add(&new->task_list, &head->task_list);
}
static inline __attribute__((always_inline)) void __add_wait_queue_exclusive(wait_queue_head_t *q,
wait_queue_t *wait)
{
wait->flags |= 0x01;
__add_wait_queue(q, wait);
}
static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head,
wait_queue_t *new)
{
list_add_tail(&new->task_list, &head->task_list);
}
static inline __attribute__((always_inline)) void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
wait_queue_t *wait)
{
wait->flags |= 0x01;
__add_wait_queue_tail(q, wait);
}
static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head,
wait_queue_t *old)
{
list_del(&old->task_list);
}
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
void *key);
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
void __wake_up_bit(wait_queue_head_t *, void *, int);
int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
void wake_up_bit(void *, int);
int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
wait_queue_head_t *bit_waitqueue(void *, int);
# 574 "include/linux/wait.h"
extern void sleep_on(wait_queue_head_t *q);
extern long sleep_on_timeout(wait_queue_head_t *q,
signed long timeout);
extern void interruptible_sleep_on(wait_queue_head_t *q);
extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
signed long timeout);
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
unsigned int mode, void *key);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
# 634 "include/linux/wait.h"
static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit,
int (*action)(void *), unsigned mode)
{
if (!test_bit(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}
# 658 "include/linux/wait.h"
static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit,
int (*action)(void *), unsigned mode)
{
if (!test_and_set_bit(bit, word))
return 0;
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
}
# 10 "include/linux/mmzone.h" 2
# 1 "include/linux/numa.h" 1
# 14 "include/linux/mmzone.h" 2
# 1 "include/linux/init.h" 1
# 131 "include/linux/init.h"
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
extern initcall_t __con_initcall_start[], __con_initcall_end[];
extern initcall_t __security_initcall_start[], __security_initcall_end[];
typedef void (*ctor_fn_t)(void);
extern int do_one_initcall(initcall_t fn);
extern char __attribute__ ((__section__(".init.data"))) boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;
void setup_arch(char **);
void prepare_namespace(void);
extern void (*late_time_init)(void);
extern int initcall_debug;
# 15 "include/linux/mmzone.h" 2
# 1 "include/linux/nodemask.h" 1
# 96 "include/linux/nodemask.h"
typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp)
{
set_bit(node, dstp->bits);
}
static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp)
{
clear_bit(node, dstp->bits);
}
static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr)
{
return test_and_set_bit(node, addr->bits);
}
static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp,
const nodemask_t *srcp, int nbits)
{
bitmap_complement(dstp->bits, srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p,
const nodemask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits)
{
return bitmap_full(srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}
static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp,
const nodemask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp)
{
return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit((srcp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; });
}
static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp)
{
return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_bit(srcp->bits, (1 << 0), n+1)); __min1 < __min2 ? __min1: __min2; });
}
static inline __attribute__((always_inline)) void init_nodemask_of_node(nodemask_t *mask, int node)
{
__nodes_clear(&(*mask), (1 << 0));
__node_set((node), &(*mask));
}
# 264 "include/linux/nodemask.h"
static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp)
{
return ({ int __min1 = ((1 << 0)); int __min2 = (find_next_zero_bit((maskp->bits), ((1 << 0)), 0)); __min1 < __min2 ? __min1: __min2; })
;
}
# 298 "include/linux/nodemask.h"
static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len,
const nodemask_t *srcp, int nbits)
{
return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len,
nodemask_t *dstp, int nbits)
{
return bitmap_parse_user(buf, len, dstp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len,
const nodemask_t *srcp, int nbits)
{
return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
}
static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
{
return bitmap_parselist(buf, dstp->bits, nbits);
}
static inline __attribute__((always_inline)) int __node_remap(int oldbit,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
const nodemask_t *oldp, const nodemask_t *newp, int nbits)
{
bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
const nodemask_t *relmapp, int nbits)
{
bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
}
static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
int sz, int nbits)
{
bitmap_fold(dstp->bits, origp->bits, sz, nbits);
}
# 372 "include/linux/nodemask.h"
enum node_states {
N_POSSIBLE,
N_ONLINE,
N_NORMAL_MEMORY,
N_HIGH_MEMORY = N_NORMAL_MEMORY,
N_CPU,
NR_NODE_STATES
};
extern nodemask_t node_states[NR_NODE_STATES];
# 435 "include/linux/nodemask.h"
static inline __attribute__((always_inline)) int node_state(int node, enum node_states state)
{
return node == 0;
}
static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state)
{
}
static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state)
{
}
static inline __attribute__((always_inline)) int num_node_state(enum node_states state)
{
return 1;
}
# 491 "include/linux/nodemask.h"
struct nodemask_scratch {
nodemask_t mask1;
nodemask_t mask2;
};
# 17 "include/linux/mmzone.h" 2
# 1 "include/linux/pageblock-flags.h" 1
# 29 "include/linux/pageblock-flags.h"
enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + 3 - 1,
NR_PAGEBLOCK_BITS
};
# 60 "include/linux/pageblock-flags.h"
struct page;
unsigned long get_pageblock_flags_group(struct page *page,
int start_bitidx, int end_bitidx);
void set_pageblock_flags_group(struct page *page, unsigned long flags,
int start_bitidx, int end_bitidx);
# 18 "include/linux/mmzone.h" 2
# 1 "include/generated/bounds.h" 1
# 19 "include/linux/mmzone.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 1
# 45 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h"
# 1 "include/linux/pfn.h" 1
# 46 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 1
# 23 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
# 1 "include/asm-generic/iomap.h" 1
# 28 "include/asm-generic/iomap.h"
extern unsigned int ioread8(void *);
extern unsigned int ioread16(void *);
extern unsigned int ioread16be(void *);
extern unsigned int ioread32(void *);
extern unsigned int ioread32be(void *);
extern void iowrite8(u8, void *);
extern void iowrite16(u16, void *);
extern void iowrite16be(u16, void *);
extern void iowrite32(u32, void *);
extern void iowrite32be(u32, void *);
# 51 "include/asm-generic/iomap.h"
extern void ioread8_rep(void *port, void *buf, unsigned long count);
extern void ioread16_rep(void *port, void *buf, unsigned long count);
extern void ioread32_rep(void *port, void *buf, unsigned long count);
extern void iowrite8_rep(void *port, const void *buf, unsigned long count);
extern void iowrite16_rep(void *port, const void *buf, unsigned long count);
extern void iowrite32_rep(void *port, const void *buf, unsigned long count);
extern void *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void *);
struct pci_dev;
extern void *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
extern void pci_iounmap(struct pci_dev *dev, void *);
# 24 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 1
# 25 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-bits.h" 1
# 156 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-bits.h"
static inline __attribute__((always_inline)) uint64_t pte_to_entrylo(unsigned long pte_val)
{
if (0) {
int sa;
sa = 31 - (0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))));
# 170 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-bits.h"
return (pte_val >> ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1)) |
((pte_val & (({if (!0) BUG(); 1 << (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))); }) | ({if (!0) BUG(); 1 << (0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))); }))) << sa);
}
return pte_val >> ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1);
}
# 26 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/ioremap.h" 1
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/ioremap.h"
static inline __attribute__((always_inline)) phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
{
return phys_addr;
}
static inline __attribute__((always_inline)) void *plat_ioremap(phys_t offset, unsigned long size,
unsigned long flags)
{
return ((void *)0);
}
static inline __attribute__((always_inline)) int plat_iounmap(const volatile void *addr)
{
return 0;
}
# 30 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/mangle-port.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/mangle-port.h"
extern unsigned char io_swap_noneed;
# 31 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h" 2
# 61 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
extern const unsigned long mips_io_port_base;
# 72 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void set_io_port_base(unsigned long base)
{
* (unsigned long *) &mips_io_port_base = base;
__asm__ __volatile__("": : :"memory");
}
# 117 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) unsigned long virt_to_phys(volatile const void *address)
{
return (unsigned long)address - ((0x80000000UL) + (0UL)) + (0UL);
}
# 134 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void * phys_to_virt(unsigned long address)
{
return (void *)(address + ((0x80000000UL) + (0UL)) - (0UL));
}
static inline __attribute__((always_inline)) unsigned long isa_virt_to_bus(volatile void * address)
{
return (unsigned long)address - ((0x80000000UL) + (0UL));
}
static inline __attribute__((always_inline)) void * isa_bus_to_virt(unsigned long address)
{
return (void *)(address + ((0x80000000UL) + (0UL)));
}
# 168 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags);
extern void __iounmap(const volatile void *addr);
static inline __attribute__((always_inline)) void * __ioremap_mode(phys_t offset, unsigned long size,
unsigned long flags)
{
void *addr = plat_ioremap(offset, size, flags);
if (addr)
return addr;
if (0) {
u64 base = (0xa0000000UL);
if (flags == (2<<(((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1)))
base = (u64) (0xa0000000UL);
return (void *) (unsigned long) (base + offset);
} else if (__builtin_constant_p(offset) &&
__builtin_constant_p(size) && __builtin_constant_p(flags)) {
phys_t phys_addr, last_addr;
phys_addr = fixup_bigphys_addr(offset, size);
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return ((void *)0);
if ((!((phys_t)(phys_addr) & (phys_t) ~0x1fffffffULL)) && (!((phys_t)(last_addr) & (phys_t) ~0x1fffffffULL)) &&
flags == (2<<(((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1)))
return (void *)
(unsigned long)((((int)(int)(phys_addr)) & 0x1fffffff) | 0xa0000000);
}
return __ioremap(offset, size, flags);
}
# 282 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void iounmap(const volatile void *addr)
{
if (plat_iounmap(addr))
return;
if (0 ||
(__builtin_constant_p(addr) && (((unsigned long)(addr) & ~0x1fffffffUL) == 0xa0000000)))
return;
__iounmap(addr);
}
# 571 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void __raw_writeb(u8 val, volatile void *mem) { volatile u8 *__mem; u8 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u8 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u8 __raw_readb(const volatile void *mem) { volatile u8 *__mem; u8 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); } static inline __attribute__((always_inline)) void writeb(u8 val, volatile void *mem) { volatile u8 *__mem; u8 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u8 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u8 readb(const volatile void *mem) { volatile u8 *__mem; u8 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); } static inline __attribute__((always_inline)) void __mem_writeb(u8 val, volatile void *mem) { volatile u8 *__mem; u8 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u8 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u8 __mem_readb(const volatile void *mem) { volatile u8 *__mem; u8 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u8) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); }
static inline __attribute__((always_inline)) void __raw_writew(u16 val, volatile void *mem) { volatile u16 *__mem; u16 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u16 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u16 __raw_readw(const volatile void *mem) { volatile u16 *__mem; u16 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); } static inline __attribute__((always_inline)) void writew(u16 val, volatile void *mem) { volatile u16 *__mem; u16 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(val))) ); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u16 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u16 readw(const volatile void *mem) { volatile u16 *__mem; u16 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(__val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(__val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(__val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(__val))) ); } static inline __attribute__((always_inline)) void __mem_writew(u16 val, volatile void *mem) { volatile u16 *__mem; u16 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((val))) ? ((__u16)( (((__u16)((val)) & (__u16)0x00ffU) << 8) | (((__u16)((val)) & (__u16)0xff00U) >> 8))) : __fswab16((val)))) : (val) ); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u16 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u16 __mem_readw(const volatile void *mem) { volatile u16 *__mem; u16 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u16) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((__val))) ? ((__u16)( (((__u16)((__val)) & (__u16)0x00ffU) << 8) | (((__u16)((__val)) & (__u16)0xff00U) >> 8))) : __fswab16((__val)))) : (__val) ); }
static inline __attribute__((always_inline)) void __raw_writel(u32 val, volatile void *mem) { volatile u32 *__mem; u32 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u32 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u32 __raw_readl(const volatile void *mem) { volatile u32 *__mem; u32 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); } static inline __attribute__((always_inline)) void writel(u32 val, volatile void *mem) { volatile u32 *__mem; u32 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(val))) ); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u32 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u32 readl(const volatile void *mem) { volatile u32 *__mem; u32 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(__val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(__val))) ); } static inline __attribute__((always_inline)) void __mem_writel(u32 val, volatile void *mem) { volatile u32 *__mem; u32 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((val))) ? ((__u32)( (((__u32)((val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((val)))) : (val) ); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u32 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u32 __mem_readl(const volatile void *mem) { volatile u32 *__mem; u32 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u32) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((__val))) ? ((__u32)( (((__u32)((__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((__val)))) : (__val) ); }
static inline __attribute__((always_inline)) void __raw_writeq(u64 val, volatile void *mem) { volatile u64 *__mem; u64 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u64 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u64 __raw_readq(const volatile void *mem) { volatile u64 *__mem; u64 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); } static inline __attribute__((always_inline)) void writeq(u64 val, volatile void *mem) { volatile u64 *__mem; u64 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u64)(( __u64)(__le64)(val))) ? ((__u64)( (((__u64)(( __u64)(__le64)(val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(val))) ); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u64 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u64 readq(const volatile void *mem) { volatile u64 *__mem; u64 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u64)(( __u64)(__le64)(__val))) ? ((__u64)( (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__le64)(__val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__le64)(__val))) ); } static inline __attribute__((always_inline)) void __mem_writeq(u64 val, volatile void *mem) { volatile u64 *__mem; u64 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((val))) ? ((__u32)( (((__u32)((val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((val)))) : (val) ); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u64 __tmp; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u64 __mem_readq(const volatile void *mem) { volatile u64 *__mem; u64 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (1) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((__val))) ? ((__u32)( (((__u32)((__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((__val)))) : (__val) ); }
# 584 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void outb(u8 val, unsigned long port) { volatile u8 *__addr; u8 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (val); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u8 inb(unsigned long port) { volatile u8 *__addr; u8 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); __val = *__addr; ; return (__val); } static inline __attribute__((always_inline)) void outb_p(u8 val, unsigned long port) { volatile u8 *__addr; u8 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (val); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u8 inb_p(unsigned long port) { volatile u8 *__addr; u8 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); __val = *__addr; ; return (__val); } static inline __attribute__((always_inline)) void __mem_outb(u8 val, unsigned long port) { volatile u8 *__addr; u8 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (val); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u8 __mem_inb(unsigned long port) { volatile u8 *__addr; u8 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); __val = *__addr; ; return (__val); } static inline __attribute__((always_inline)) void __mem_outb_p(u8 val, unsigned long port) { volatile u8 *__addr; u8 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (val); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u8 __mem_inb_p(unsigned long port) { volatile u8 *__addr; u8 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u8) > sizeof(unsigned long)); }))); __val = *__addr; ; return (__val); }
static inline __attribute__((always_inline)) void outw(u16 val, unsigned long port) { volatile u16 *__addr; u16 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(val))) ); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u16 inw(unsigned long port) { volatile u16 *__addr; u16 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(__val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(__val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(__val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(__val))) ); } static inline __attribute__((always_inline)) void outw_p(u16 val, unsigned long port) { volatile u16 *__addr; u16 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(val))) ); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u16 inw_p(unsigned long port) { volatile u16 *__addr; u16 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u16)(( __u16)(__le16)(__val))) ? ((__u16)( (((__u16)(( __u16)(__le16)(__val)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__le16)(__val)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__le16)(__val))) ); } static inline __attribute__((always_inline)) void __mem_outw(u16 val, unsigned long port) { volatile u16 *__addr; u16 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((val))) ? ((__u16)( (((__u16)((val)) & (__u16)0x00ffU) << 8) | (((__u16)((val)) & (__u16)0xff00U) >> 8))) : __fswab16((val)))) : (val) ); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u16 __mem_inw(unsigned long port) { volatile u16 *__addr; u16 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((__val))) ? ((__u16)( (((__u16)((__val)) & (__u16)0x00ffU) << 8) | (((__u16)((__val)) & (__u16)0xff00U) >> 8))) : __fswab16((__val)))) : (__val) ); } static inline __attribute__((always_inline)) void __mem_outw_p(u16 val, unsigned long port) { volatile u16 *__addr; u16 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((val))) ? ((__u16)( (((__u16)((val)) & (__u16)0x00ffU) << 8) | (((__u16)((val)) & (__u16)0xff00U) >> 8))) : __fswab16((val)))) : (val) ); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u16 __mem_inw_p(unsigned long port) { volatile u16 *__addr; u16 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u16) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (( __le16)(__builtin_constant_p((__u16)((__val))) ? ((__u16)( (((__u16)((__val)) & (__u16)0x00ffU) << 8) | (((__u16)((__val)) & (__u16)0xff00U) >> 8))) : __fswab16((__val)))) : (__val) ); }
static inline __attribute__((always_inline)) void outl(u32 val, unsigned long port) { volatile u32 *__addr; u32 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(val))) ); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u32 inl(unsigned long port) { volatile u32 *__addr; u32 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(__val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(__val))) ); } static inline __attribute__((always_inline)) void outl_p(u32 val, unsigned long port) { volatile u32 *__addr; u32 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(val))) ); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u32 inl_p(unsigned long port) { volatile u32 *__addr; u32 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (__val) : (__builtin_constant_p((__u32)(( __u32)(__le32)(__val))) ? ((__u32)( (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__le32)(__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__le32)(__val))) ); } static inline __attribute__((always_inline)) void __mem_outl(u32 val, unsigned long port) { volatile u32 *__addr; u32 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((val))) ? ((__u32)( (((__u32)((val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((val)))) : (val) ); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u32 __mem_inl(unsigned long port) { volatile u32 *__addr; u32 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((__val))) ? ((__u32)( (((__u32)((__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((__val)))) : (__val) ); } static inline __attribute__((always_inline)) void __mem_outl_p(u32 val, unsigned long port) { volatile u32 *__addr; u32 __val; do { } while (0); __addr = (void *)(mips_io_port_base + port); __val = (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((val))) ? ((__u32)( (((__u32)((val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((val)))) : (val) ); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); *__addr = __val; ; } static inline __attribute__((always_inline)) u32 __mem_inl_p(unsigned long port) { volatile u32 *__addr; u32 __val; __addr = (void *)(mips_io_port_base + port); ((void)(sizeof(struct { int:-!!(sizeof(u32) > sizeof(unsigned long)); }))); __val = *__addr; ; return (io_swap_noneed ? (( __le32)(__builtin_constant_p((__u32)((__val))) ? ((__u32)( (((__u32)((__val)) & (__u32)0x000000ffUL) << 24) | (((__u32)((__val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)((__val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)((__val)) & (__u32)0xff000000UL) >> 24))) : __fswab32((__val)))) : (__val) ); }
# 595 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void ____raw_writeq(u64 val, volatile void *mem) { volatile u64 *__mem; u64 __val; do { } while (0); __mem = (void *)((unsigned long)(mem)); __val = (val); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) *__mem = __val; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; u64 __tmp; if (0) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __writeq""\n\t" "dsll32 %L0, %L0, 0" "\n\t" "dsrl32 %L0, %L0, 0" "\n\t" "dsll32 %M0, %M0, 0" "\n\t" "or %L0, %L0, %M0" "\n\t" "sd %L0, %2" "\n\t" ".set mips0" "\n" : "=r" (__tmp) : "0" (__val), "m" (*__mem)); if (0) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else BUG(); } static inline __attribute__((always_inline)) u64 ____raw_readq(const volatile void *mem) { volatile u64 *__mem; u64 __val; __mem = (void *)((unsigned long)(mem)); if (sizeof(u64) != sizeof(u64) || sizeof(u64) == sizeof(long)) __val = *__mem; else if ((cpu_data[0].isa_level & (0x00000004 | 0x00000008 | 0x00000010 | 0x00000080 | 0x00000100))) { unsigned long __flags; if (0) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (__flags) : : "memory"); do { } while (0); } while (0); __asm__ __volatile__( ".set mips3" "\t\t# __readq" "\n\t" "ld %L0, %1" "\n\t" "dsra32 %M0, %L0, 0" "\n\t" "sll %L0, %L0, 0" "\n\t" ".set mips0" "\n" : "=r" (__val) : "m" (*__mem)); if (0) do { ({ unsigned long __dummy; typeof(__flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(__flags)) { raw_local_irq_restore(__flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(__flags); } } while (0); } else { __val = 0; BUG(); } return (__val); }
# 679 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void writesb(volatile void *mem, const void *addr, unsigned int count) { const volatile u8 *__addr = addr; while (count--) { __mem_writeb(*__addr, mem); __addr++; } } static inline __attribute__((always_inline)) void readsb(volatile void *mem, void *addr, unsigned int count) { volatile u8 *__addr = addr; while (count--) { *__addr = __mem_readb(mem); __addr++; } } static inline __attribute__((always_inline)) void outsb(unsigned long port, const void *addr, unsigned int count) { const volatile u8 *__addr = addr; while (count--) { __mem_outb(*__addr, port); __addr++; } } static inline __attribute__((always_inline)) void insb(unsigned long port, void *addr, unsigned int count) { volatile u8 *__addr = addr; while (count--) { *__addr = __mem_inb(port); __addr++; } }
static inline __attribute__((always_inline)) void writesw(volatile void *mem, const void *addr, unsigned int count) { const volatile u16 *__addr = addr; while (count--) { __mem_writew(*__addr, mem); __addr++; } } static inline __attribute__((always_inline)) void readsw(volatile void *mem, void *addr, unsigned int count) { volatile u16 *__addr = addr; while (count--) { *__addr = __mem_readw(mem); __addr++; } } static inline __attribute__((always_inline)) void outsw(unsigned long port, const void *addr, unsigned int count) { const volatile u16 *__addr = addr; while (count--) { __mem_outw(*__addr, port); __addr++; } } static inline __attribute__((always_inline)) void insw(unsigned long port, void *addr, unsigned int count) { volatile u16 *__addr = addr; while (count--) { *__addr = __mem_inw(port); __addr++; } }
static inline __attribute__((always_inline)) void writesl(volatile void *mem, const void *addr, unsigned int count) { const volatile u32 *__addr = addr; while (count--) { __mem_writel(*__addr, mem); __addr++; } } static inline __attribute__((always_inline)) void readsl(volatile void *mem, void *addr, unsigned int count) { volatile u32 *__addr = addr; while (count--) { *__addr = __mem_readl(mem); __addr++; } } static inline __attribute__((always_inline)) void outsl(unsigned long port, const void *addr, unsigned int count) { const volatile u32 *__addr = addr; while (count--) { __mem_outl(*__addr, port); __addr++; } } static inline __attribute__((always_inline)) void insl(unsigned long port, void *addr, unsigned int count) { volatile u32 *__addr = addr; while (count--) { *__addr = __mem_inl(port); __addr++; } }
# 694 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
static inline __attribute__((always_inline)) void memset_io(volatile void *addr, unsigned char val, int count)
{
memset((void *) addr, val, count);
}
static inline __attribute__((always_inline)) void memcpy_fromio(void *dst, const volatile void *src, int count)
{
memcpy(dst, (void *) src, count);
}
static inline __attribute__((always_inline)) void memcpy_toio(volatile void *dst, const void *src, int count)
{
memcpy((void *) dst, src, count);
}
# 729 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/io.h"
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
# 47 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 2
extern void build_clear_page(void);
extern void build_copy_page(void);
# 58 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h"
extern void clear_page(void * page);
extern void copy_page(void * to, void * from);
extern unsigned long shm_align_mask;
static inline __attribute__((always_inline)) unsigned long pages_do_alias(unsigned long addr1,
unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
}
struct page;
static inline __attribute__((always_inline)) void clear_user_page(void *addr, unsigned long vaddr,
struct page *page)
{
extern void (*flush_data_cache_page)(unsigned long addr);
clear_page(addr);
if (pages_do_alias((unsigned long) addr, vaddr & (~((1 << 12) - 1))))
flush_data_cache_page((unsigned long)addr);
}
extern void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to);
struct vm_area_struct;
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
# 103 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h"
typedef struct { unsigned long pte; } pte_t;
typedef struct page *pgtable_t;
# 117 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h"
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
# 210 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h"
# 1 "include/asm-generic/memory_model.h" 1
# 211 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 2
# 1 "include/asm-generic/getorder.h" 1
# 9 "include/asm-generic/getorder.h"
static inline __attribute__((always_inline)) __attribute__((__const__)) int get_order(unsigned long size)
{
int order;
size = (size - 1) >> (12 - 1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}
# 212 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/page.h" 2
# 21 "include/linux/mmzone.h" 2
# 50 "include/linux/mmzone.h"
extern int page_group_by_mobility_disabled;
static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page)
{
return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
}
struct free_area {
struct list_head free_list[5];
unsigned long nr_free;
};
struct pglist_data;
# 71 "include/linux/mmzone.h"
struct zone_padding {
char x[0];
} __attribute__((__aligned__(1 << (5))));
enum zone_stat_item {
NR_FREE_PAGES,
NR_LRU_BASE,
NR_INACTIVE_ANON = NR_LRU_BASE,
NR_ACTIVE_ANON,
NR_INACTIVE_FILE,
NR_ACTIVE_FILE,
NR_UNEVICTABLE,
NR_MLOCK,
NR_ANON_PAGES,
NR_FILE_MAPPED,
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE,
NR_KERNEL_STACK,
NR_UNSTABLE_NFS,
NR_BOUNCE,
NR_VMSCAN_WRITE,
NR_WRITEBACK_TEMP,
NR_ISOLATED_ANON,
NR_ISOLATED_FILE,
NR_SHMEM,
# 115 "include/linux/mmzone.h"
NR_VM_ZONE_STAT_ITEMS };
# 130 "include/linux/mmzone.h"
enum lru_list {
LRU_INACTIVE_ANON = 0,
LRU_ACTIVE_ANON = 0 + 1,
LRU_INACTIVE_FILE = 0 + 2,
LRU_ACTIVE_FILE = 0 + 2 + 1,
LRU_UNEVICTABLE,
NR_LRU_LISTS
};
static inline __attribute__((always_inline)) int is_file_lru(enum lru_list l)
{
return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
}
static inline __attribute__((always_inline)) int is_active_lru(enum lru_list l)
{
return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
}
static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list l)
{
return (l == LRU_UNEVICTABLE);
}
enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
WMARK_HIGH,
NR_WMARK
};
struct per_cpu_pages {
int count;
int high;
int batch;
struct list_head lists[3];
};
struct per_cpu_pageset {
struct per_cpu_pages pcp;
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
};
enum zone_type {
# 226 "include/linux/mmzone.h"
ZONE_NORMAL,
# 238 "include/linux/mmzone.h"
ZONE_MOVABLE,
__MAX_NR_ZONES
};
# 262 "include/linux/mmzone.h"
struct zone_reclaim_stat {
# 271 "include/linux/mmzone.h"
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];
unsigned long nr_saved_scan[NR_LRU_LISTS];
};
struct zone {
unsigned long watermark[NR_WMARK];
unsigned long percpu_drift_mark;
# 301 "include/linux/mmzone.h"
unsigned long lowmem_reserve[2];
# 311 "include/linux/mmzone.h"
struct per_cpu_pageset *pageset;
spinlock_t lock;
int all_unreclaimable;
struct free_area free_area[11];
unsigned long *pageblock_flags;
# 341 "include/linux/mmzone.h"
struct zone_padding _pad1_;
spinlock_t lru_lock;
struct zone_lru {
struct list_head list;
} lru[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
unsigned long pages_scanned;
unsigned long flags;
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
unsigned int inactive_ratio;
struct zone_padding _pad2_;
# 391 "include/linux/mmzone.h"
wait_queue_head_t * wait_table;
unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
struct pglist_data *zone_pgdat;
unsigned long zone_start_pfn;
# 412 "include/linux/mmzone.h"
unsigned long spanned_pages;
unsigned long present_pages;
const char *name;
} __attribute__((__aligned__(1 << (5))));
typedef enum {
ZONE_RECLAIM_LOCKED,
ZONE_OOM_LOCKED,
} zone_flags_t;
static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag)
{
set_bit(flag, &zone->flags);
}
static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
{
return test_and_set_bit(flag, &zone->flags);
}
static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag)
{
clear_bit(flag, &zone->flags);
}
static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone)
{
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
}
static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone)
{
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
unsigned long zone_nr_free_pages(struct zone *zone);
# 545 "include/linux/mmzone.h"
struct zonelist_cache;
struct zoneref {
struct zone *zone;
int zone_idx;
};
# 574 "include/linux/mmzone.h"
struct zonelist {
struct zonelist_cache *zlcache_ptr;
struct zoneref _zonerefs[((1 << 0) * 2) + 1];
};
struct node_active_region {
unsigned long start_pfn;
unsigned long end_pfn;
int nid;
};
extern struct page *mem_map;
# 606 "include/linux/mmzone.h"
struct bootmem_data;
typedef struct pglist_data {
struct zone node_zones[2];
struct zonelist node_zonelists[1];
int nr_zones;
struct page *node_mem_map;
struct page_cgroup *node_page_cgroup;
struct bootmem_data *bdata;
# 630 "include/linux/mmzone.h"
unsigned long node_start_pfn;
unsigned long node_present_pages;
unsigned long node_spanned_pages;
int node_id;
wait_queue_head_t kswapd_wait;
struct task_struct *kswapd;
int kswapd_max_order;
} pg_data_t;
# 649 "include/linux/mmzone.h"
# 1 "include/linux/memory_hotplug.h" 1
# 1 "include/linux/mmzone.h" 1
# 5 "include/linux/memory_hotplug.h" 2
# 1 "include/linux/notifier.h" 1
# 12 "include/linux/notifier.h"
# 1 "include/linux/errno.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/errno.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/errno.h"
# 1 "include/asm-generic/errno-base.h" 1
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/errno.h" 2
# 5 "include/linux/errno.h" 2
# 13 "include/linux/notifier.h" 2
# 1 "include/linux/mutex.h" 1
# 48 "include/linux/mutex.h"
struct mutex {
atomic_t count;
spinlock_t wait_lock;
struct list_head wait_list;
struct thread_info *owner;
# 63 "include/linux/mutex.h"
};
struct mutex_waiter {
struct list_head list;
struct task_struct *task;
};
# 115 "include/linux/mutex.h"
extern void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock)
{
return (*(volatile int *)&(&lock->count)->counter) != 1;
}
# 144 "include/linux/mutex.h"
extern void mutex_lock(struct mutex *lock);
extern int __attribute__((warn_unused_result)) mutex_lock_interruptible(struct mutex *lock);
extern int __attribute__((warn_unused_result)) mutex_lock_killable(struct mutex *lock);
# 159 "include/linux/mutex.h"
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
# 14 "include/linux/notifier.h" 2
# 1 "include/linux/rwsem.h" 1
# 17 "include/linux/rwsem.h"
struct rw_semaphore;
# 1 "include/linux/rwsem-spinlock.h" 1
# 22 "include/linux/rwsem-spinlock.h"
struct rwsem_waiter;
# 31 "include/linux/rwsem-spinlock.h"
struct rw_semaphore {
__s32 activity;
spinlock_t wait_lock;
struct list_head wait_list;
};
# 53 "include/linux/rwsem-spinlock.h"
extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key);
# 63 "include/linux/rwsem-spinlock.h"
extern void __down_read(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
extern int __down_write_trylock(struct rw_semaphore *sem);
extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem);
extern int rwsem_is_locked(struct rw_semaphore *sem);
# 21 "include/linux/rwsem.h" 2
extern void down_read(struct rw_semaphore *sem);
extern int down_read_trylock(struct rw_semaphore *sem);
extern void down_write(struct rw_semaphore *sem);
extern int down_write_trylock(struct rw_semaphore *sem);
extern void up_read(struct rw_semaphore *sem);
extern void up_write(struct rw_semaphore *sem);
extern void downgrade_write(struct rw_semaphore *sem);
# 15 "include/linux/notifier.h" 2
# 1 "include/linux/srcu.h" 1
# 32 "include/linux/srcu.h"
struct srcu_struct_array {
int c[2];
};
struct srcu_struct {
int completed;
struct srcu_struct_array *per_cpu_ref;
struct mutex mutex;
};
# 70 "include/linux/srcu.h"
int init_srcu_struct(struct srcu_struct *sp);
void cleanup_srcu_struct(struct srcu_struct *sp);
int __srcu_read_lock(struct srcu_struct *sp) ;
void __srcu_read_unlock(struct srcu_struct *sp, int idx) ;
void synchronize_srcu(struct srcu_struct *sp);
void synchronize_srcu_expedited(struct srcu_struct *sp);
long srcu_batches_completed(struct srcu_struct *sp);
# 103 "include/linux/srcu.h"
static inline __attribute__((always_inline)) int srcu_read_lock_held(struct srcu_struct *sp)
{
return 1;
}
# 125 "include/linux/srcu.h"
static inline __attribute__((always_inline)) int srcu_read_lock(struct srcu_struct *sp)
{
int retval = __srcu_read_lock(sp);
do { } while (0);
return retval;
}
# 140 "include/linux/srcu.h"
static inline __attribute__((always_inline)) void srcu_read_unlock(struct srcu_struct *sp, int idx)
{
do { } while (0);
__srcu_read_unlock(sp, idx);
}
# 16 "include/linux/notifier.h" 2
# 50 "include/linux/notifier.h"
struct notifier_block {
int (*notifier_call)(struct notifier_block *, unsigned long, void *);
struct notifier_block *next;
int priority;
};
struct atomic_notifier_head {
spinlock_t lock;
struct notifier_block *head;
};
struct blocking_notifier_head {
struct rw_semaphore rwsem;
struct notifier_block *head;
};
struct raw_notifier_head {
struct notifier_block *head;
};
struct srcu_notifier_head {
struct mutex mutex;
struct srcu_struct srcu;
struct notifier_block *head;
};
# 89 "include/linux/notifier.h"
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
# 115 "include/linux/notifier.h"
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_cond_register(
struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
struct notifier_block *nb);
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
struct notifier_block *nb);
extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
struct notifier_block *nb);
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v);
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v);
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v);
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v);
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
# 165 "include/linux/notifier.h"
static inline __attribute__((always_inline)) int notifier_from_errno(int err)
{
if (err)
return 0x8000 | (0x0001 - err);
return 0x0001;
}
static inline __attribute__((always_inline)) int notifier_to_errno(int ret)
{
ret &= ~0x8000;
return ret > 0x0001 ? 0x0001 - ret : 0;
}
# 271 "include/linux/notifier.h"
extern struct blocking_notifier_head reboot_notifier_list;
# 7 "include/linux/memory_hotplug.h" 2
struct page;
struct zone;
struct pglist_data;
struct mem_section;
# 164 "include/linux/memory_hotplug.h"
static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {}
static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone)
{
return 0;
}
static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv)
{
return 0;
}
static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {}
static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {}
static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {}
static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func)
{
printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
dump_stack();
return -89;
}
static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
}
# 198 "include/linux/memory_hotplug.h"
static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn,
unsigned long nr_pages)
{
return 0;
}
extern int mem_online_node(int nid);
extern int add_memory(int nid, u64 start, u64 size);
extern int arch_add_memory(int nid, u64 start, u64 size);
extern int remove_memory(u64 start, u64 size);
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
int nr_pages);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
# 650 "include/linux/mmzone.h" 2
extern struct mutex zonelists_mutex;
void build_all_zonelists(void *data);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
};
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size,
enum memmap_context context);
static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {}
static inline __attribute__((always_inline)) int local_memory_node(int node_id) { return node_id; };
# 685 "include/linux/mmzone.h"
static inline __attribute__((always_inline)) int populated_zone(struct zone *zone)
{
return (!!zone->present_pages);
}
extern int movable_zone;
static inline __attribute__((always_inline)) int zone_movable_is_highmem(void)
{
return 0;
}
static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx)
{
return 0;
}
static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx)
{
return (idx == ZONE_NORMAL);
}
static inline __attribute__((always_inline)) int is_highmem(struct zone *zone)
{
return 0;
}
static inline __attribute__((always_inline)) int is_normal(struct zone *zone)
{
return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}
static inline __attribute__((always_inline)) int is_dma32(struct zone *zone)
{
return 0;
}
static inline __attribute__((always_inline)) int is_dma(struct zone *zone)
{
return 0;
}
struct ctl_table;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[2 -1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int numa_zonelist_order_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern char numa_zonelist_order[];
extern struct pglist_data contig_page_data;
# 788 "include/linux/mmzone.h"
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
# 820 "include/linux/mmzone.h"
static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref)
{
return zoneref->zone;
}
static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref)
{
return 0;
}
# 853 "include/linux/mmzone.h"
struct zoneref *next_zones_zonelist(struct zoneref *z,
enum zone_type highest_zoneidx,
nodemask_t *nodes,
struct zone **zone);
# 870 "include/linux/mmzone.h"
static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
nodemask_t *nodes,
struct zone **zone)
{
return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
zone);
}
# 1094 "include/linux/mmzone.h"
void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long);
# 1128 "include/linux/mmzone.h"
static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone)
{
return 1;
}
# 5 "include/linux/gfp.h" 2
# 1 "include/linux/topology.h" 1
# 33 "include/linux/topology.h"
# 1 "include/linux/smp.h" 1
# 14 "include/linux/smp.h"
extern void cpu_idle(void);
struct call_single_data {
struct list_head list;
void (*func) (void *info);
void *info;
u16 flags;
u16 priv;
};
extern unsigned int total_cpus;
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
int wait);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h" 1
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h"
# 1 "include/linux/smp.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp-ops.h" 1
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp-ops.h"
struct task_struct;
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
};
extern void register_smp_ops(struct plat_smp_ops *ops);
static inline __attribute__((always_inline)) void plat_smp_setup(void)
{
extern struct plat_smp_ops *mp_ops;
mp_ops->smp_setup();
}
# 59 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp-ops.h"
extern struct plat_smp_ops up_smp_ops;
extern struct plat_smp_ops cmp_smp_ops;
extern struct plat_smp_ops vsmp_smp_ops;
# 22 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h" 2
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern int __cpu_number_map[4];
extern int __cpu_logical_map[4];
# 44 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h"
extern volatile cpumask_t cpu_callin_map;
extern void smp_bootstrap(void);
static inline __attribute__((always_inline)) void smp_send_reschedule(int cpu)
{
extern struct plat_smp_ops *mp_ops;
mp_ops->send_ipi_single(cpu, 0x1);
}
# 78 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/smp.h"
extern void smp_call_function_interrupt(void);
static inline __attribute__((always_inline)) void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops;
mp_ops->send_ipi_mask(&(*get_cpu_mask(cpu)), 0x2);
}
static inline __attribute__((always_inline)) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
extern struct plat_smp_ops *mp_ops;
mp_ops->send_ipi_mask(mask, 0x2);
}
# 37 "include/linux/smp.h" 2
# 46 "include/linux/smp.h"
extern void smp_send_stop(void);
extern void smp_send_reschedule(int cpu);
extern void smp_prepare_cpus(unsigned int max_cpus);
extern int __cpu_up(unsigned int cpunum);
extern void smp_cpus_done(unsigned int max_cpus);
int smp_call_function(void(*func)(void *info), void *info, int wait);
void smp_call_function_many(const struct cpumask *mask,
void (*func)(void *info), void *info, int wait);
void __smp_call_function_single(int cpuid, struct call_single_data *data,
int wait);
int smp_call_function_any(const struct cpumask *mask,
void (*func)(void *info), void *info, int wait);
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
void ipi_call_lock(void);
void ipi_call_unlock(void);
void ipi_call_lock_irq(void);
void ipi_call_unlock_irq(void);
int on_each_cpu(void (*func) (void *info), void *info, int wait);
# 113 "include/linux/smp.h"
void smp_prepare_boot_cpu(void);
extern unsigned int setup_max_cpus;
# 183 "include/linux/smp.h"
extern void arch_disable_smp_support(void);
void smp_setup_processor_id(void);
# 34 "include/linux/topology.h" 2
# 1 "include/linux/percpu.h" 1
# 10 "include/linux/percpu.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/percpu.h" 1
# 1 "include/asm-generic/percpu.h" 1
# 1 "include/linux/percpu-defs.h" 1
# 7 "include/asm-generic/percpu.h" 2
# 18 "include/asm-generic/percpu.h"
extern unsigned long __per_cpu_offset[4];
# 5 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/percpu.h" 2
# 11 "include/linux/percpu.h" 2
# 74 "include/linux/percpu.h"
extern void *pcpu_base_addr;
extern const unsigned long *pcpu_unit_offsets;
struct pcpu_group_info {
int nr_units;
unsigned long base_offset;
unsigned int *cpu_map;
};
struct pcpu_alloc_info {
size_t static_size;
size_t reserved_size;
size_t dyn_size;
size_t unit_size;
size_t atom_size;
size_t alloc_size;
size_t __ai_size;
int nr_groups;
struct pcpu_group_info groups[];
};
enum pcpu_fc {
PCPU_FC_AUTO,
PCPU_FC_EMBED,
PCPU_FC_PAGE,
PCPU_FC_NR,
};
extern const char *pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
size_t align);
typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_alloc_alloc_info(int nr_groups,
int nr_units);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr);
# 142 "include/linux/percpu.h"
extern void *__alloc_reserved_percpu(size_t size, size_t align);
extern int is_kernel_percpu_address(unsigned long addr);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_per_cpu_areas(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) percpu_init_late(void);
# 171 "include/linux/percpu.h"
extern void *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
# 237 "include/linux/percpu.h"
extern void __bad_size_call_parameter(void);
# 35 "include/linux/topology.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/topology.h" 1
# 11 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/topology.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/topology.h" 1
# 1 "include/asm-generic/topology.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/topology.h" 2
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/topology.h" 2
# 36 "include/linux/topology.h" 2
# 49 "include/linux/topology.h"
int arch_update_cpu_topology(void);
# 247 "include/linux/topology.h"
static inline __attribute__((always_inline)) int numa_node_id(void)
{
return ((void)((__current_thread_info->cpu)),0);
}
# 297 "include/linux/topology.h"
static inline __attribute__((always_inline)) int numa_mem_id(void)
{
return numa_node_id();
}
# 8 "include/linux/gfp.h" 2
# 1 "include/linux/mmdebug.h" 1
# 9 "include/linux/gfp.h" 2
struct vm_area_struct;
# 143 "include/linux/gfp.h"
static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags)
{
({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/gfp.h", 145); __builtin_expect(!!(__ret_warn_on), 0); });
if (__builtin_expect(!!(page_group_by_mobility_disabled), 0))
return 0;
return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) |
((gfp_flags & (( gfp_t)0x80000u)) != 0);
}
# 238 "include/linux/gfp.h"
static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags)
{
enum zone_type z;
int bit = flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u));
z = (( (ZONE_NORMAL << 0 * 1) | (ZONE_NORMAL << (( gfp_t)0x01u) * 1) | (ZONE_NORMAL << (( gfp_t)0x02u) * 1) | (ZONE_NORMAL << (( gfp_t)0x04u) * 1) | (ZONE_NORMAL << (( gfp_t)0x08u) * 1) | (ZONE_NORMAL << ((( gfp_t)0x08u) | (( gfp_t)0x01u)) * 1) | (ZONE_MOVABLE << ((( gfp_t)0x08u) | (( gfp_t)0x02u)) * 1) | (ZONE_NORMAL << ((( gfp_t)0x08u) | (( gfp_t)0x04u)) * 1)) >> (bit * 1)) &
((1 << 1) - 1);
if (__builtin_constant_p(bit))
((void)sizeof(char[1 - 2 * !!((( 1 << ((( gfp_t)0x01u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x01u) | (( gfp_t)0x04u)) | 1 << ((( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x01u) | (( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x02u) | (( gfp_t)0x01u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x01u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x02u)) | 1 << ((( gfp_t)0x08u) | (( gfp_t)0x04u) | (( gfp_t)0x01u) | (( gfp_t)0x02u))) >> bit) & 1)]));
else {
}
return z;
}
# 263 "include/linux/gfp.h"
static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags)
{
if (0 && __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0))
return 1;
return 0;
}
# 280 "include/linux/gfp.h"
static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags)
{
return (&contig_page_data)->node_zonelists + gfp_zonelist(flags);
}
static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { }
static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { }
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask);
static inline __attribute__((always_inline)) struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0));
}
static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
if (nid < 0)
nid = numa_node_id();
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
unsigned int order)
{
do { (void)(nid < 0 || nid >= (1 << 0)); } while (0);
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}
# 338 "include/linux/gfp.h"
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
void free_pages_exact(void *virt, size_t size);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_cold_page(struct page *page, int cold);
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(void);
void drain_local_pages(void *dummy);
extern gfp_t gfp_allowed_mask;
extern void set_gfp_allowed_mask(gfp_t mask);
extern gfp_t clear_gfp_allowed_mask(gfp_t mask);
# 23 "include/linux/kmod.h" 2
# 1 "include/linux/workqueue.h" 1
# 1 "include/linux/timer.h" 1
# 1 "include/linux/ktime.h" 1
# 25 "include/linux/ktime.h"
# 1 "include/linux/jiffies.h" 1
# 1 "include/linux/timex.h" 1
# 64 "include/linux/timex.h"
struct timex {
unsigned int modes;
long offset;
long freq;
long maxerror;
long esterror;
int status;
long constant;
long precision;
long tolerance;
struct timeval time;
long tick;
long ppsfreq;
long jitter;
int shift;
long stabil;
long jitcnt;
long calcnt;
long errcnt;
long stbcnt;
int tai;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32;
};
# 171 "include/linux/timex.h"
# 1 "include/linux/param.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/param.h" 1
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/param.h"
# 1 "include/asm-generic/param.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/param.h" 2
# 5 "include/linux/param.h" 2
# 172 "include/linux/timex.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/timex.h" 1
# 34 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/timex.h"
typedef unsigned int cycles_t;
static inline __attribute__((always_inline)) cycles_t get_cycles(void)
{
return 0;
}
# 174 "include/linux/timex.h" 2
# 233 "include/linux/timex.h"
extern unsigned long tick_usec;
extern unsigned long tick_nsec;
extern int time_status;
extern void ntp_init(void);
extern void ntp_clear(void);
static inline __attribute__((always_inline)) int ntp_synced(void)
{
return !(time_status & 0x0040);
}
# 266 "include/linux/timex.h"
extern u64 tick_length;
extern void second_overflow(void);
extern void update_ntp_one_tick(void);
extern int do_adjtimex(struct timex *);
int read_current_timer(unsigned long *timer_val);
# 9 "include/linux/jiffies.h" 2
# 81 "include/linux/jiffies.h"
extern u64 __attribute__((section(".data"))) jiffies_64;
extern unsigned long volatile __attribute__((section(".data"))) jiffies;
u64 get_jiffies_64(void);
# 183 "include/linux/jiffies.h"
extern unsigned long preset_lpj;
# 296 "include/linux/jiffies.h"
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
extern clock_t jiffies_to_clock_t(long x);
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
extern unsigned long nsecs_to_jiffies(u64 n);
# 26 "include/linux/ktime.h" 2
# 46 "include/linux/ktime.h"
union ktime {
s64 tv64;
struct {
s32 sec, nsec;
} tv;
};
typedef union ktime ktime_t;
# 151 "include/linux/ktime.h"
static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs)
{
return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
}
# 163 "include/linux/ktime.h"
static inline __attribute__((always_inline)) ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
{
ktime_t res;
res.tv64 = lhs.tv64 - rhs.tv64;
if (res.tv.nsec < 0)
res.tv.nsec += 1000000000L;
return res;
}
# 181 "include/linux/ktime.h"
static inline __attribute__((always_inline)) ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
{
ktime_t res;
res.tv64 = add1.tv64 + add2.tv64;
# 194 "include/linux/ktime.h"
if (res.tv.nsec >= 1000000000L)
res.tv64 += (u32)-1000000000L;
return res;
}
# 207 "include/linux/ktime.h"
extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
# 216 "include/linux/ktime.h"
extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(const struct timespec ts)
{
return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
.nsec = (s32)ts.tv_nsec } };
}
static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(const struct timeval tv)
{
return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
.nsec = (s32)tv.tv_usec * 1000 } };
}
static inline __attribute__((always_inline)) struct timespec ktime_to_timespec(const ktime_t kt)
{
return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
.tv_nsec = (long) kt.tv.nsec };
}
static inline __attribute__((always_inline)) struct timeval ktime_to_timeval(const ktime_t kt)
{
return (struct timeval) {
.tv_sec = (time_t) kt.tv.sec,
.tv_usec = (suseconds_t) (kt.tv.nsec / 1000L) };
}
static inline __attribute__((always_inline)) s64 ktime_to_ns(const ktime_t kt)
{
return (s64) kt.tv.sec * 1000000000L + kt.tv.nsec;
}
# 287 "include/linux/ktime.h"
static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
{
return cmp1.tv64 == cmp2.tv64;
}
static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt)
{
struct timeval tv = ktime_to_timeval(kt);
return (s64) tv.tv_sec * 1000000L + tv.tv_usec;
}
static inline __attribute__((always_inline)) s64 ktime_to_ms(const ktime_t kt)
{
struct timeval tv = ktime_to_timeval(kt);
return (s64) tv.tv_sec * 1000L + tv.tv_usec / 1000L;
}
static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_us(ktime_sub(later, earlier));
}
static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ktime_add_ns(kt, usec * 1000);
}
static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
{
return ktime_sub_ns(kt, usec * 1000);
}
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
# 331 "include/linux/ktime.h"
extern void ktime_get_ts(struct timespec *ts);
static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
return ktime_add_ns(ktime_zero, ns);
}
# 6 "include/linux/timer.h" 2
# 1 "include/linux/debugobjects.h" 1
enum debug_obj_state {
ODEBUG_STATE_NONE,
ODEBUG_STATE_INIT,
ODEBUG_STATE_INACTIVE,
ODEBUG_STATE_ACTIVE,
ODEBUG_STATE_DESTROYED,
ODEBUG_STATE_NOTAVAILABLE,
ODEBUG_STATE_MAX,
};
struct debug_obj_descr;
# 27 "include/linux/debugobjects.h"
struct debug_obj {
struct hlist_node node;
enum debug_obj_state state;
unsigned int astate;
void *object;
struct debug_obj_descr *descr;
};
# 47 "include/linux/debugobjects.h"
struct debug_obj_descr {
const char *name;
int (*fixup_init) (void *addr, enum debug_obj_state state);
int (*fixup_activate) (void *addr, enum debug_obj_state state);
int (*fixup_destroy) (void *addr, enum debug_obj_state state);
int (*fixup_free) (void *addr, enum debug_obj_state state);
};
# 77 "include/linux/debugobjects.h"
static inline __attribute__((always_inline)) void
debug_object_init (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void
debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void
debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void
debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void
debug_object_free (void *addr, struct debug_obj_descr *descr) { }
static inline __attribute__((always_inline)) void debug_objects_early_init(void) { }
static inline __attribute__((always_inline)) void debug_objects_mem_init(void) { }
static inline __attribute__((always_inline)) void
debug_check_no_obj_freed(const void *address, unsigned long size) { }
# 8 "include/linux/timer.h" 2
struct tvec_base;
struct timer_list {
struct list_head entry;
unsigned long expires;
struct tvec_base *base;
void (*function)(unsigned long);
unsigned long data;
int slack;
# 34 "include/linux/timer.h"
};
extern struct tvec_base boot_tvec_bases;
# 65 "include/linux/timer.h"
void init_timer_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key);
void init_timer_deferrable_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key);
# 131 "include/linux/timer.h"
static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { }
static inline __attribute__((always_inline)) void init_timer_on_stack_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
{
init_timer_key(timer, name, key);
}
static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer,
const char *name,
struct lock_class_key *key,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
init_timer_key(timer, name, key);
}
static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key,
void (*function)(unsigned long),
unsigned long data)
{
timer->function = function;
timer->data = data;
init_timer_on_stack_key(timer, name, key);
}
extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key,
void (*function)(unsigned long),
unsigned long data);
# 178 "include/linux/timer.h"
static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer)
{
return timer->entry.next != ((void *)0);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
extern int del_timer(struct timer_list * timer);
extern int mod_timer(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
extern void set_timer_slack(struct timer_list *time, int slack_hz);
# 204 "include/linux/timer.h"
extern unsigned long get_next_timer_interrupt(unsigned long now);
# 236 "include/linux/timer.h"
static inline __attribute__((always_inline)) void init_timer_stats(void)
{
}
static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer)
{
}
static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer)
{
}
extern void add_timer(struct timer_list *timer);
extern int try_to_del_timer_sync(struct timer_list *timer);
extern int del_timer_sync(struct timer_list *timer);
extern void init_timers(void);
extern void run_local_timers(void);
struct hrtimer;
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
unsigned long __round_jiffies(unsigned long j, int cpu);
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
unsigned long round_jiffies(unsigned long j);
unsigned long round_jiffies_relative(unsigned long j);
unsigned long __round_jiffies_up(unsigned long j, int cpu);
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
unsigned long round_jiffies_up(unsigned long j);
unsigned long round_jiffies_up_relative(unsigned long j);
# 9 "include/linux/workqueue.h" 2
struct workqueue_struct;
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
enum {
WORK_STRUCT_PENDING_BIT = 0,
WORK_STRUCT_DELAYED_BIT = 1,
WORK_STRUCT_CWQ_BIT = 2,
WORK_STRUCT_LINKED_BIT = 3,
WORK_STRUCT_COLOR_SHIFT = 4,
WORK_STRUCT_COLOR_BITS = 4,
WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
WORK_STRUCT_STATIC = 0,
WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
WORK_NO_COLOR = WORK_NR_COLORS,
WORK_CPU_UNBOUND = 4,
WORK_CPU_NONE = 4 + 1,
WORK_CPU_LAST = WORK_CPU_NONE,
WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
WORK_STRUCT_COLOR_BITS,
WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
WORK_BUSY_PENDING = 1 << 0,
WORK_BUSY_RUNNING = 1 << 1,
};
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
};
struct delayed_work {
struct work_struct work;
struct timer_list timer;
};
static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work)
{
return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );});
}
struct execute_work {
struct work_struct work;
};
# 155 "include/linux/workqueue.h"
static inline __attribute__((always_inline)) void __init_work(struct work_struct *work, int onstack) { }
static inline __attribute__((always_inline)) void destroy_work_on_stack(struct work_struct *work) { }
static inline __attribute__((always_inline)) unsigned int work_static(struct work_struct *work) { return 0; }
# 242 "include/linux/workqueue.h"
enum {
WQ_NON_REENTRANT = 1 << 0,
WQ_UNBOUND = 1 << 1,
WQ_FREEZEABLE = 1 << 2,
WQ_RESCUER = 1 << 3,
WQ_HIGHPRI = 1 << 4,
WQ_CPU_INTENSIVE = 1 << 5,
WQ_DYING = 1 << 6,
WQ_MAX_ACTIVE = 512,
WQ_MAX_UNBOUND_PER_CPU = 4,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
# 281 "include/linux/workqueue.h"
extern struct workqueue_struct *system_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_nrt_wq;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *
__alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
struct lock_class_key *key, const char *lock_name);
# 316 "include/linux/workqueue.h"
extern void destroy_workqueue(struct workqueue_struct *wq);
extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
extern int queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
extern int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
extern void flush_delayed_work(struct delayed_work *work);
extern int schedule_work(struct work_struct *work);
extern int schedule_work_on(int cpu, struct work_struct *work);
extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
unsigned long delay);
extern int schedule_on_each_cpu(work_func_t func);
extern int keventd_up(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern int flush_work(struct work_struct *work);
extern int cancel_work_sync(struct work_struct *work);
extern void workqueue_set_max_active(struct workqueue_struct *wq,
int max_active);
extern int workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
extern unsigned int work_cpu(struct work_struct *work);
extern unsigned int work_busy(struct work_struct *work);
static inline __attribute__((always_inline)) int cancel_delayed_work(struct delayed_work *work)
{
int ret;
ret = del_timer_sync(&work->timer);
if (ret)
clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
return ret;
}
static inline __attribute__((always_inline)) int __cancel_delayed_work(struct delayed_work *work)
{
int ret;
ret = del_timer(&work->timer);
if (ret)
clear_bit(WORK_STRUCT_PENDING_BIT, ((unsigned long *)(&(&work->work)->data)));
return ret;
}
extern int cancel_delayed_work_sync(struct delayed_work *work);
static inline __attribute__((always_inline))
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
struct delayed_work *work)
{
cancel_delayed_work_sync(work);
}
static inline __attribute__((always_inline))
void cancel_rearming_delayed_work(struct delayed_work *work)
{
cancel_delayed_work_sync(work);
}
long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
# 27 "include/linux/kmod.h" 2
extern char modprobe_path[];
extern int __request_module(int wait, const char *name, ...)
__attribute__((format(printf, 2, 3)));
# 47 "include/linux/kmod.h"
struct key;
struct file;
enum umh_wait {
UMH_NO_WAIT = -1,
UMH_WAIT_EXEC = 0,
UMH_WAIT_PROC = 1,
};
struct subprocess_info {
struct work_struct work;
struct completion *complete;
char *path;
char **argv;
char **envp;
enum umh_wait wait;
int retval;
int (*init)(struct subprocess_info *info);
void (*cleanup)(struct subprocess_info *info);
void *data;
};
struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
char **envp, gfp_t gfp_mask);
void call_usermodehelper_setfns(struct subprocess_info *info,
int (*init)(struct subprocess_info *info),
void (*cleanup)(struct subprocess_info *info),
void *data);
int call_usermodehelper_exec(struct subprocess_info *info, enum umh_wait wait);
void call_usermodehelper_freeinfo(struct subprocess_info *info);
static inline __attribute__((always_inline)) int
call_usermodehelper_fns(char *path, char **argv, char **envp,
enum umh_wait wait,
int (*init)(struct subprocess_info *info),
void (*cleanup)(struct subprocess_info *), void *data)
{
struct subprocess_info *info;
gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
if (info == ((void *)0))
return -12;
call_usermodehelper_setfns(info, init, cleanup, data);
return call_usermodehelper_exec(info, wait);
}
static inline __attribute__((always_inline)) int
call_usermodehelper(char *path, char **argv, char **envp, enum umh_wait wait)
{
return call_usermodehelper_fns(path, argv, envp, wait,
((void *)0), ((void *)0), ((void *)0));
}
extern void usermodehelper_init(void);
extern int usermodehelper_disable(void);
extern void usermodehelper_enable(void);
# 14 "include/linux/module.h" 2
# 1 "include/linux/elf.h" 1
# 1 "include/linux/elf-em.h" 1
# 6 "include/linux/elf.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h" 1
# 170 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h"
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[45];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[33];
# 242 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h"
struct mips_abi;
extern struct mips_abi mips_abi;
extern struct mips_abi mips_abi_32;
extern struct mips_abi mips_abi_n32;
# 313 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h"
struct pt_regs;
struct task_struct;
extern void elf_dump_regs(elf_greg_t *, struct pt_regs *regs);
extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
# 345 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h"
extern const char *__elf_platform;
# 372 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/elf.h"
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
# 8 "include/linux/elf.h" 2
struct file;
# 20 "include/linux/elf.h"
typedef __u32 Elf32_Addr;
typedef __u16 Elf32_Half;
typedef __u32 Elf32_Off;
typedef __s32 Elf32_Sword;
typedef __u32 Elf32_Word;
typedef __u64 Elf64_Addr;
typedef __u16 Elf64_Half;
typedef __s16 Elf64_SHalf;
typedef __u64 Elf64_Off;
typedef __s32 Elf64_Sword;
typedef __u32 Elf64_Word;
typedef __u64 Elf64_Xword;
typedef __s64 Elf64_Sxword;
# 149 "include/linux/elf.h"
typedef struct dynamic{
Elf32_Sword d_tag;
union{
Elf32_Sword d_val;
Elf32_Addr d_ptr;
} d_un;
} Elf32_Dyn;
typedef struct {
Elf64_Sxword d_tag;
union {
Elf64_Xword d_val;
Elf64_Addr d_ptr;
} d_un;
} Elf64_Dyn;
# 172 "include/linux/elf.h"
typedef struct elf32_rel {
Elf32_Addr r_offset;
Elf32_Word r_info;
} Elf32_Rel;
typedef struct elf64_rel {
Elf64_Addr r_offset;
Elf64_Xword r_info;
} Elf64_Rel;
typedef struct elf32_rela{
Elf32_Addr r_offset;
Elf32_Word r_info;
Elf32_Sword r_addend;
} Elf32_Rela;
typedef struct elf64_rela {
Elf64_Addr r_offset;
Elf64_Xword r_info;
Elf64_Sxword r_addend;
} Elf64_Rela;
typedef struct elf32_sym{
Elf32_Word st_name;
Elf32_Addr st_value;
Elf32_Word st_size;
unsigned char st_info;
unsigned char st_other;
Elf32_Half st_shndx;
} Elf32_Sym;
typedef struct elf64_sym {
Elf64_Word st_name;
unsigned char st_info;
unsigned char st_other;
Elf64_Half st_shndx;
Elf64_Addr st_value;
Elf64_Xword st_size;
} Elf64_Sym;
typedef struct elf32_hdr{
unsigned char e_ident[16];
Elf32_Half e_type;
Elf32_Half e_machine;
Elf32_Word e_version;
Elf32_Addr e_entry;
Elf32_Off e_phoff;
Elf32_Off e_shoff;
Elf32_Word e_flags;
Elf32_Half e_ehsize;
Elf32_Half e_phentsize;
Elf32_Half e_phnum;
Elf32_Half e_shentsize;
Elf32_Half e_shnum;
Elf32_Half e_shstrndx;
} Elf32_Ehdr;
typedef struct elf64_hdr {
unsigned char e_ident[16];
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
Elf64_Addr e_entry;
Elf64_Off e_phoff;
Elf64_Off e_shoff;
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
Elf64_Half e_phnum;
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
} Elf64_Ehdr;
typedef struct elf32_phdr{
Elf32_Word p_type;
Elf32_Off p_offset;
Elf32_Addr p_vaddr;
Elf32_Addr p_paddr;
Elf32_Word p_filesz;
Elf32_Word p_memsz;
Elf32_Word p_flags;
Elf32_Word p_align;
} Elf32_Phdr;
typedef struct elf64_phdr {
Elf64_Word p_type;
Elf64_Word p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
Elf64_Xword p_filesz;
Elf64_Xword p_memsz;
Elf64_Xword p_align;
} Elf64_Phdr;
# 311 "include/linux/elf.h"
typedef struct elf32_shdr {
Elf32_Word sh_name;
Elf32_Word sh_type;
Elf32_Word sh_flags;
Elf32_Addr sh_addr;
Elf32_Off sh_offset;
Elf32_Word sh_size;
Elf32_Word sh_link;
Elf32_Word sh_info;
Elf32_Word sh_addralign;
Elf32_Word sh_entsize;
} Elf32_Shdr;
typedef struct elf64_shdr {
Elf64_Word sh_name;
Elf64_Word sh_type;
Elf64_Xword sh_flags;
Elf64_Addr sh_addr;
Elf64_Off sh_offset;
Elf64_Xword sh_size;
Elf64_Word sh_link;
Elf64_Word sh_info;
Elf64_Xword sh_addralign;
Elf64_Xword sh_entsize;
} Elf64_Shdr;
# 401 "include/linux/elf.h"
typedef struct elf32_note {
Elf32_Word n_namesz;
Elf32_Word n_descsz;
Elf32_Word n_type;
} Elf32_Nhdr;
typedef struct elf64_note {
Elf64_Word n_namesz;
Elf64_Word n_descsz;
Elf64_Word n_type;
} Elf64_Nhdr;
extern Elf32_Dyn _DYNAMIC [];
# 439 "include/linux/elf.h"
static inline __attribute__((always_inline)) int elf_coredump_extra_notes_size(void) { return 0; }
static inline __attribute__((always_inline)) int elf_coredump_extra_notes_write(struct file *file,
loff_t *foffset) { return 0; }
# 15 "include/linux/module.h" 2
# 1 "include/linux/kobject.h" 1
# 21 "include/linux/kobject.h"
# 1 "include/linux/sysfs.h" 1
# 19 "include/linux/sysfs.h"
# 1 "include/linux/kobject_ns.h" 1
# 20 "include/linux/kobject_ns.h"
struct sock;
struct kobject;
enum kobj_ns_type {
KOBJ_NS_TYPE_NONE = 0,
KOBJ_NS_TYPE_NET,
KOBJ_NS_TYPES
};
struct kobj_ns_type_operations {
enum kobj_ns_type type;
const void *(*current_ns)(void);
const void *(*netlink_ns)(struct sock *sk);
const void *(*initial_ns)(void);
};
int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
int kobj_ns_type_registered(enum kobj_ns_type type);
const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
const void *kobj_ns_current(enum kobj_ns_type type);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
const void *kobj_ns_initial(enum kobj_ns_type type);
void kobj_ns_exit(enum kobj_ns_type type, const void *ns);
# 20 "include/linux/sysfs.h" 2
struct kobject;
struct module;
enum kobj_ns_type;
struct attribute {
const char *name;
mode_t mode;
};
# 56 "include/linux/sysfs.h"
struct attribute_group {
const char *name;
mode_t (*is_visible)(struct kobject *,
struct attribute *, int);
struct attribute **attrs;
};
# 85 "include/linux/sysfs.h"
struct file;
struct vm_area_struct;
struct bin_attribute {
struct attribute attr;
size_t size;
void *private;
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
# 112 "include/linux/sysfs.h"
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *,char *);
ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
};
struct sysfs_dirent;
# 184 "include/linux/sysfs.h"
static inline __attribute__((always_inline)) int sysfs_schedule_callback(struct kobject *kobj,
void (*func)(void *), void *data, struct module *owner)
{
return -89;
}
static inline __attribute__((always_inline)) int sysfs_create_dir(struct kobject *kobj)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_dir(struct kobject *kobj)
{
}
static inline __attribute__((always_inline)) int sysfs_rename_dir(struct kobject *kobj, const char *new_name)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_move_dir(struct kobject *kobj,
struct kobject *new_parent_kobj)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_create_file(struct kobject *kobj,
const struct attribute *attr)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_create_files(struct kobject *kobj,
const struct attribute **attr)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, mode_t mode)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_file(struct kobject *kobj,
const struct attribute *attr)
{
}
static inline __attribute__((always_inline)) void sysfs_remove_files(struct kobject *kobj,
const struct attribute **attr)
{
}
static inline __attribute__((always_inline)) int sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
}
static inline __attribute__((always_inline)) int sysfs_create_link(struct kobject *kobj,
struct kobject *target, const char *name)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
const char *name)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
static inline __attribute__((always_inline)) int sysfs_rename_link(struct kobject *k, struct kobject *t,
const char *old_name, const char *new_name)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_delete_link(struct kobject *k, struct kobject *t,
const char *name)
{
}
static inline __attribute__((always_inline)) int sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return 0;
}
static inline __attribute__((always_inline)) int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp)
{
}
static inline __attribute__((always_inline)) int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_remove_file_from_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
}
static inline __attribute__((always_inline)) void sysfs_notify(struct kobject *kobj, const char *dir,
const char *attr)
{
}
static inline __attribute__((always_inline)) void sysfs_notify_dirent(struct sysfs_dirent *sd)
{
}
static inline __attribute__((always_inline))
struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
const void *ns,
const unsigned char *name)
{
return ((void *)0);
}
static inline __attribute__((always_inline)) struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
{
return ((void *)0);
}
static inline __attribute__((always_inline)) void sysfs_put(struct sysfs_dirent *sd)
{
}
static inline __attribute__((always_inline)) void sysfs_exit_ns(int type, const void *tag)
{
}
static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) sysfs_init(void)
{
return 0;
}
static inline __attribute__((always_inline)) void sysfs_printk_last_file(void)
{
}
# 22 "include/linux/kobject.h" 2
# 1 "include/linux/kref.h" 1
# 20 "include/linux/kref.h"
struct kref {
atomic_t refcount;
};
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
# 25 "include/linux/kobject.h" 2
# 35 "include/linux/kobject.h"
extern char uevent_helper[];
extern u64 uevent_seqnum;
# 50 "include/linux/kobject.h"
enum kobject_action {
KOBJ_ADD,
KOBJ_REMOVE,
KOBJ_CHANGE,
KOBJ_MOVE,
KOBJ_ONLINE,
KOBJ_OFFLINE,
KOBJ_MAX
};
struct kobject {
const char *name;
struct list_head entry;
struct kobject *parent;
struct kset *kset;
struct kobj_type *ktype;
struct sysfs_dirent *sd;
struct kref kref;
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
};
extern int kobject_set_name(struct kobject *kobj, const char *name, ...)
__attribute__((format(printf, 2, 3)));
extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs);
static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj)
{
return kobj->name;
}
extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
extern int __attribute__((warn_unused_result)) kobject_add(struct kobject *kobj,
struct kobject *parent,
const char *fmt, ...);
extern int __attribute__((warn_unused_result)) kobject_init_and_add(struct kobject *kobj,
struct kobj_type *ktype,
struct kobject *parent,
const char *fmt, ...);
extern void kobject_del(struct kobject *kobj);
extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void);
extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name,
struct kobject *parent);
extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name);
extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
};
struct kobj_uevent_env {
char *envp[32];
int envp_idx;
char buf[2048];
int buflen;
};
struct kset_uevent_ops {
int (* const filter)(struct kset *kset, struct kobject *kobj);
const char *(* const name)(struct kset *kset, struct kobject *kobj);
int (* const uevent)(struct kset *kset, struct kobject *kobj,
struct kobj_uevent_env *env);
};
struct kobj_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
char *buf);
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count);
};
extern const struct sysfs_ops kobj_sysfs_ops;
struct sock;
# 159 "include/linux/kobject.h"
struct kset {
struct list_head list;
spinlock_t list_lock;
struct kobject kobj;
const struct kset_uevent_ops *uevent_ops;
};
extern void kset_init(struct kset *kset);
extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset);
extern void kset_unregister(struct kset *kset);
extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name,
const struct kset_uevent_ops *u,
struct kobject *parent_kobj);
static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj)
{
return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0);
}
static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k)
{
return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0);
}
static inline __attribute__((always_inline)) void kset_put(struct kset *k)
{
kobject_put(&k->kobj);
}
static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj)
{
return kobj->ktype;
}
extern struct kobject *kset_find_obj(struct kset *, const char *);
extern struct kobject *kernel_kobj;
extern struct kobject *mm_kobj;
extern struct kobject *hypervisor_kobj;
extern struct kobject *power_kobj;
extern struct kobject *firmware_kobj;
# 217 "include/linux/kobject.h"
static inline __attribute__((always_inline)) int kobject_uevent(struct kobject *kobj,
enum kobject_action action)
{ return 0; }
static inline __attribute__((always_inline)) int kobject_uevent_env(struct kobject *kobj,
enum kobject_action action,
char *envp[])
{ return 0; }
static inline __attribute__((always_inline)) int add_uevent_var(struct kobj_uevent_env *env,
const char *format, ...)
{ return 0; }
static inline __attribute__((always_inline)) int kobject_action_type(const char *buf, size_t count,
enum kobject_action *type)
{ return -22; }
# 17 "include/linux/module.h" 2
# 1 "include/linux/moduleparam.h" 1
# 32 "include/linux/moduleparam.h"
struct kernel_param;
struct kernel_param_ops {
int (*set)(const char *val, const struct kernel_param *kp);
int (*get)(char *buffer, const struct kernel_param *kp);
void (*free)(void *arg);
};
struct kernel_param {
const char *name;
const struct kernel_param_ops *ops;
u16 perm;
u16 flags;
union {
void *arg;
const struct kparam_string *str;
const struct kparam_array *arr;
};
};
struct kparam_string {
unsigned int maxlen;
char *string;
};
struct kparam_array
{
unsigned int max;
unsigned int *num;
const struct kernel_param_ops *ops;
unsigned int elemsize;
void *elem;
};
# 164 "include/linux/moduleparam.h"
static inline __attribute__((always_inline)) int
__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
{
return 0;
}
# 218 "include/linux/moduleparam.h"
static inline __attribute__((always_inline)) void __kernel_param_lock(void)
{
}
static inline __attribute__((always_inline)) void __kernel_param_unlock(void)
{
}
# 264 "include/linux/moduleparam.h"
extern int parse_args(const char *name,
char *args,
const struct kernel_param *params,
unsigned num,
int (*unknown)(char *param, char *val));
static inline __attribute__((always_inline)) void destroy_params(const struct kernel_param *params,
unsigned num)
{
}
# 286 "include/linux/moduleparam.h"
extern struct kernel_param_ops param_ops_byte;
extern int param_set_byte(const char *val, const struct kernel_param *kp);
extern int param_get_byte(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_short;
extern int param_set_short(const char *val, const struct kernel_param *kp);
extern int param_get_short(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_ushort;
extern int param_set_ushort(const char *val, const struct kernel_param *kp);
extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_int;
extern int param_set_int(const char *val, const struct kernel_param *kp);
extern int param_get_int(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_uint;
extern int param_set_uint(const char *val, const struct kernel_param *kp);
extern int param_get_uint(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_long;
extern int param_set_long(const char *val, const struct kernel_param *kp);
extern int param_get_long(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_ulong;
extern int param_set_ulong(const char *val, const struct kernel_param *kp);
extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_charp;
extern int param_set_charp(const char *val, const struct kernel_param *kp);
extern int param_get_charp(char *buffer, const struct kernel_param *kp);
extern struct kernel_param_ops param_ops_bool;
extern int param_set_bool(const char *val, const struct kernel_param *kp);
extern int param_get_bool(char *buffer, const struct kernel_param *kp);
# 338 "include/linux/moduleparam.h"
extern struct kernel_param_ops param_ops_invbool;
extern int param_set_invbool(const char *val, const struct kernel_param *kp);
extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
# 380 "include/linux/moduleparam.h"
extern struct kernel_param_ops param_array_ops;
extern struct kernel_param_ops param_ops_string;
extern int param_set_copystring(const char *val, const struct kernel_param *);
extern int param_get_string(char *buffer, const struct kernel_param *kp);
struct module;
# 397 "include/linux/moduleparam.h"
static inline __attribute__((always_inline)) int module_param_sysfs_setup(struct module *mod,
const struct kernel_param *kparam,
unsigned int num_params)
{
return 0;
}
static inline __attribute__((always_inline)) void module_param_sysfs_remove(struct module *mod)
{ }
# 18 "include/linux/module.h" 2
# 1 "include/linux/tracepoint.h" 1
# 19 "include/linux/tracepoint.h"
# 1 "include/linux/rcupdate.h" 1
# 42 "include/linux/rcupdate.h"
# 1 "include/linux/completion.h" 1
# 25 "include/linux/completion.h"
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
# 73 "include/linux/completion.h"
static inline __attribute__((always_inline)) void init_completion(struct completion *x)
{
x->done = 0;
do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), &__key); } while (0);
}
extern void wait_for_completion(struct completion *);
extern int wait_for_completion_interruptible(struct completion *x);
extern int wait_for_completion_killable(struct completion *x);
extern unsigned long wait_for_completion_timeout(struct completion *x,
unsigned long timeout);
extern unsigned long wait_for_completion_interruptible_timeout(
struct completion *x, unsigned long timeout);
extern unsigned long wait_for_completion_killable_timeout(
struct completion *x, unsigned long timeout);
extern int try_wait_for_completion(struct completion *x);
extern int completion_done(struct completion *x);
extern void complete(struct completion *);
extern void complete_all(struct completion *);
# 43 "include/linux/rcupdate.h" 2
# 54 "include/linux/rcupdate.h"
struct rcu_head {
struct rcu_head *next;
void (*func)(struct rcu_head *head);
};
extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern void synchronize_sched_expedited(void);
extern int sched_expedited_torture_stats(char *page);
extern void rcu_init(void);
# 1 "include/linux/rcutree.h" 1
# 33 "include/linux/rcutree.h"
struct notifier_block;
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
extern void rcu_note_context_switch(int cpu);
extern int rcu_needs_cpu(int cpu);
# 55 "include/linux/rcutree.h"
static inline __attribute__((always_inline)) void __rcu_read_lock(void)
{
do { } while (0);
}
static inline __attribute__((always_inline)) void __rcu_read_unlock(void)
{
do { } while (0);
}
static inline __attribute__((always_inline)) void exit_rcu(void)
{
}
static inline __attribute__((always_inline)) int rcu_preempt_depth(void)
{
return 0;
}
static inline __attribute__((always_inline)) void __rcu_read_lock_bh(void)
{
local_bh_disable();
}
static inline __attribute__((always_inline)) void __rcu_read_unlock_bh(void)
{
local_bh_enable();
}
extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
extern void synchronize_rcu_bh(void);
extern void synchronize_sched(void);
extern void synchronize_rcu_expedited(void);
static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void)
{
synchronize_sched_expedited();
}
extern void rcu_check_callbacks(int cpu, int user);
extern long rcu_batches_completed(void);
extern long rcu_batches_completed_bh(void);
extern long rcu_batches_completed_sched(void);
extern void rcu_force_quiescent_state(void);
extern void rcu_bh_force_quiescent_state(void);
extern void rcu_sched_force_quiescent_state(void);
static inline __attribute__((always_inline)) void rcu_enter_nohz(void)
{
}
static inline __attribute__((always_inline)) void rcu_exit_nohz(void)
{
}
static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void)
{
return cpumask_weight(cpu_online_mask) == 1;
}
extern void rcu_scheduler_starting(void);
extern int rcu_scheduler_active ;
# 71 "include/linux/rcupdate.h" 2
# 93 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void init_rcu_head_on_stack(struct rcu_head *head)
{
}
static inline __attribute__((always_inline)) void destroy_rcu_head_on_stack(struct rcu_head *head)
{
}
# 186 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) int rcu_read_lock_held(void)
{
return 1;
}
static inline __attribute__((always_inline)) int rcu_read_lock_bh_held(void)
{
return 1;
}
static inline __attribute__((always_inline)) int rcu_read_lock_sched_held(void)
{
return 1;
}
# 322 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void rcu_read_lock(void)
{
__rcu_read_lock();
(void)0;
do { } while (0);
}
# 344 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void rcu_read_unlock(void)
{
do { } while (0);
(void)0;
__rcu_read_unlock();
}
# 362 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void rcu_read_lock_bh(void)
{
__rcu_read_lock_bh();
(void)0;
do { } while (0);
}
static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void)
{
do { } while (0);
(void)0;
__rcu_read_unlock_bh();
}
# 390 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void rcu_read_lock_sched(void)
{
do { } while (0);
(void)0;
do { } while (0);
}
static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void)
{
do { } while (0);
(void)0;
}
static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void)
{
do { } while (0);
(void)0;
do { } while (0);
}
static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void)
{
(void)0;
do { } while (0);
}
# 490 "include/linux/rcupdate.h"
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
extern void wakeme_after_rcu(struct rcu_head *head);
# 508 "include/linux/rcupdate.h"
extern void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
# 529 "include/linux/rcupdate.h"
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
# 560 "include/linux/rcupdate.h"
static inline __attribute__((always_inline)) void debug_rcu_head_queue(struct rcu_head *head)
{
}
static inline __attribute__((always_inline)) void debug_rcu_head_unqueue(struct rcu_head *head)
{
}
# 20 "include/linux/tracepoint.h" 2
struct module;
struct tracepoint;
struct tracepoint_func {
void *func;
void *data;
};
struct tracepoint {
const char *name;
int state;
void (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func *funcs;
} __attribute__((aligned(32)));
# 46 "include/linux/tracepoint.h"
extern int tracepoint_probe_register(const char *name, void *probe, void *data);
extern int
tracepoint_probe_unregister(const char *name, void *probe, void *data);
extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
void *data);
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data);
extern void tracepoint_probe_update_all(void);
struct tracepoint_iter {
struct module *module;
struct tracepoint *tracepoint;
};
extern void tracepoint_iter_start(struct tracepoint_iter *iter);
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
struct tracepoint *begin, struct tracepoint *end);
static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void)
{
synchronize_sched();
}
static inline __attribute__((always_inline)) void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
{ }
# 19 "include/linux/module.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/module.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h" 1
# 207 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
struct __large_struct { unsigned long buf[100]; };
# 221 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern void __get_user_unknown(void);
# 387 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern void __put_user_unknown(void);
# 486 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern void __get_user_unaligned_unknown(void);
# 650 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern void __put_user_unaligned_unknown(void);
# 673 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
# 721 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
# 935 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
static inline __attribute__((always_inline)) __kernel_size_t
__clear_user(void *addr, __kernel_size_t size)
{
__kernel_size_t res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__bzero" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", "$8", "$9", "$31");
return res;
}
# 984 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
static inline __attribute__((always_inline)) long
__strncpy_from_user(char *__to, const char *__from, long __len)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strncpy_from_user_nocheck_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (__to), "r" (__from), "r" (__len)
: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
return res;
}
# 1021 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
static inline __attribute__((always_inline)) long
strncpy_from_user(char *__to, const char *__from, long __len)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strncpy_from_user_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (__to), "r" (__from), "r" (__len)
: "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
return res;
}
static inline __attribute__((always_inline)) long __strlen_user(const char *s)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strlen_user_nocheck_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (s)
: "$2", "$4", "$8", "$31");
return res;
}
# 1071 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
static inline __attribute__((always_inline)) long strlen_user(const char *s)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strlen_user_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (s)
: "$2", "$4", "$8", "$31");
return res;
}
static inline __attribute__((always_inline)) long __strnlen_user(const char *s, long n)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strnlen_user_nocheck_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (s), "r" (n)
: "$2", "$4", "$5", "$8", "$31");
return res;
}
# 1119 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/uaccess.h"
static inline __attribute__((always_inline)) long strnlen_user(const char *s, long n)
{
long res;
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
".set\tnoat\n\t" "la" "\t$1, " "__strnlen_user_asm" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t"
"move\t%0, $2"
: "=r" (res)
: "r" (s), "r" (n)
: "$2", "$4", "$5", "$8", "$31");
return res;
}
struct exception_table_entry
{
unsigned long insn;
unsigned long nextinsn;
};
extern int fixup_exception(struct pt_regs *regs);
# 6 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/module.h" 2
struct mod_arch_specific {
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
};
typedef uint8_t Elf64_Byte;
typedef struct {
Elf64_Addr r_offset;
Elf64_Word r_sym;
Elf64_Byte r_ssym;
Elf64_Byte r_type3;
Elf64_Byte r_type2;
Elf64_Byte r_type;
} Elf64_Mips_Rel;
typedef struct {
Elf64_Addr r_offset;
Elf64_Word r_sym;
Elf64_Byte r_ssym;
Elf64_Byte r_type3;
Elf64_Byte r_type2;
Elf64_Byte r_type;
Elf64_Sxword r_addend;
} Elf64_Mips_Rela;
# 67 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/module.h"
const struct exception_table_entry*search_module_dbetables(unsigned long addr);
# 22 "include/linux/module.h" 2
# 1 "include/trace/events/module.h" 1
# 1 "include/linux/tracepoint.h" 1
# 8 "include/trace/events/module.h" 2
struct module;
static inline __attribute__((always_inline)) void trace_module_load(struct module *mod) { } static inline __attribute__((always_inline)) int register_trace_module_load(void (*probe)(void *__data, struct module *mod), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_module_load(void (*probe)(void *__data, struct module *mod), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_module_load(void (*cb)(void *__data, struct module *mod)) { }
# 35 "include/trace/events/module.h"
;
static inline __attribute__((always_inline)) void trace_module_free(struct module *mod) { } static inline __attribute__((always_inline)) int register_trace_module_free(void (*probe)(void *__data, struct module *mod), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_module_free(void (*probe)(void *__data, struct module *mod), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_module_free(void (*cb)(void *__data, struct module *mod)) { }
# 52 "include/trace/events/module.h"
;
# 77 "include/trace/events/module.h"
;
static inline __attribute__((always_inline)) void trace_module_get(struct module *mod, unsigned long ip) { } static inline __attribute__((always_inline)) int register_trace_module_get(void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_module_get(void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_module_get(void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
;
static inline __attribute__((always_inline)) void trace_module_put(struct module *mod, unsigned long ip) { } static inline __attribute__((always_inline)) int register_trace_module_put(void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_module_put(void (*probe)(void *__data, struct module *mod, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_module_put(void (*cb)(void *__data, struct module *mod, unsigned long ip)) { }
;
static inline __attribute__((always_inline)) void trace_module_request(char *name, int wait, unsigned long ip) { } static inline __attribute__((always_inline)) int register_trace_module_request(void (*probe)(void *__data, char *name, int wait, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_module_request(void (*probe)(void *__data, char *name, int wait, unsigned long ip), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_module_request(void (*cb)(void *__data, char *name, int wait, unsigned long ip)) { }
# 114 "include/trace/events/module.h"
;
# 1 "include/trace/define_trace.h" 1
# 122 "include/trace/events/module.h" 2
# 24 "include/linux/module.h" 2
# 37 "include/linux/module.h"
struct kernel_symbol
{
unsigned long value;
const char *name;
};
struct modversion_info
{
unsigned long crc;
char name[(64 - sizeof(unsigned long))];
};
struct module;
struct module_attribute {
struct attribute attr;
ssize_t (*show)(struct module_attribute *, struct module *, char *);
ssize_t (*store)(struct module_attribute *, struct module *,
const char *, size_t count);
void (*setup)(struct module *, const char *);
int (*test)(struct module *);
void (*free)(struct module *);
};
struct module_kobject
{
struct kobject kobj;
struct module *mod;
struct kobject *drivers_dir;
struct module_param_attrs *mp;
};
extern int init_module(void);
extern void cleanup_module(void);
struct exception_table_entry;
const struct exception_table_entry *
search_extable(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long value);
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish);
void sort_main_extable(void);
void trim_init_extable(struct module *m);
extern struct module __this_module;
# 172 "include/linux/module.h"
const struct exception_table_entry *search_exception_tables(unsigned long add);
struct notifier_block;
extern int modules_disabled;
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
struct module_use {
struct list_head source_list;
struct list_head target_list;
struct module *source, *target;
};
# 237 "include/linux/module.h"
enum module_state
{
MODULE_STATE_LIVE,
MODULE_STATE_COMING,
MODULE_STATE_GOING,
};
struct module
{
enum module_state state;
struct list_head list;
char name[(64 - sizeof(unsigned long))];
struct module_kobject mkobj;
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
struct kobject *holders_dir;
const struct kernel_symbol *syms;
const unsigned long *crcs;
unsigned int num_syms;
struct kernel_param *kp;
unsigned int num_kp;
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const unsigned long *gpl_crcs;
# 288 "include/linux/module.h"
const struct kernel_symbol *gpl_future_syms;
const unsigned long *gpl_future_crcs;
unsigned int num_gpl_future_syms;
unsigned int num_exentries;
struct exception_table_entry *extable;
int (*init)(void);
void *module_init;
void *module_core;
unsigned int init_size, core_size;
unsigned int init_text_size, core_text_size;
struct mod_arch_specific arch;
unsigned int taints;
# 329 "include/linux/module.h"
Elf32_Sym *symtab, *core_symtab;
unsigned int num_symtab, core_num_syms;
char *strtab, *core_strtab;
struct module_sect_attrs *sect_attrs;
struct module_notes_attrs *notes_attrs;
void *percpu;
unsigned int percpu_size;
char *args;
# 369 "include/linux/module.h"
struct list_head source_list;
struct list_head target_list;
struct task_struct *waiter;
void (*exit)(void);
struct module_ref {
unsigned int incs;
unsigned int decs;
} *refptr;
ctor_fn_t *ctors;
unsigned int num_ctors;
};
extern struct mutex module_mutex;
static inline __attribute__((always_inline)) int module_is_live(struct module *mod)
{
return mod->state != MODULE_STATE_GOING;
}
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
int is_module_address(unsigned long addr);
int is_module_percpu_address(unsigned long addr);
int is_module_text_address(unsigned long addr);
static inline __attribute__((always_inline)) int within_module_core(unsigned long addr, struct module *mod)
{
return (unsigned long)mod->module_core <= addr &&
addr < (unsigned long)mod->module_core + mod->core_size;
}
static inline __attribute__((always_inline)) int within_module_init(unsigned long addr, struct module *mod)
{
return (unsigned long)mod->module_init <= addr &&
addr < (unsigned long)mod->module_init + mod->init_size;
}
struct module *find_module(const char *name);
struct symsearch {
const struct kernel_symbol *start, *stop;
const unsigned long *crcs;
enum {
NOT_GPL_ONLY,
GPL_ONLY,
WILL_BE_GPL_ONLY,
} licence;
int unused;
};
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
int gplok,
int warn);
int each_symbol(int (*fn)(const struct symsearch *arr, struct module *owner,
unsigned int symnum, void *data), void *data);
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *name, char *module_name, int *exported);
unsigned long module_kallsyms_lookup_name(const char *name);
int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
struct module *, unsigned long),
void *data);
extern void __module_put_and_exit(struct module *mod, long code)
__attribute__((noreturn));
unsigned int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
void symbol_put_addr(void *addr);
static inline __attribute__((always_inline)) void __module_get(struct module *module)
{
if (module) {
do { } while (0);
do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
do { } while (0);
}
}
static inline __attribute__((always_inline)) int try_module_get(struct module *module)
{
int ret = 1;
if (module) {
do { } while (0);
if (__builtin_expect(!!(module_is_live(module)), 1)) {
do { do { const void *__vpp_verify = (typeof(&(((module->refptr->incs)))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((module->refptr->incs)))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&((((module->refptr->incs)))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))); (typeof((typeof(*(&((((module->refptr->incs)))))) *)(&((((module->refptr->incs))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
trace_module_get(module, ({ __label__ __here; __here: (unsigned long)&&__here; }));
} else
ret = 0;
do { } while (0);
}
return ret;
}
extern void module_put(struct module *module);
# 517 "include/linux/module.h"
int use_module(struct module *a, struct module *b);
# 529 "include/linux/module.h"
const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname,
char *namebuf);
int lookup_module_symbol_name(unsigned long addr, char *symname);
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
const struct exception_table_entry *search_module_extables(unsigned long addr);
int register_module_notifier(struct notifier_block * nb);
int unregister_module_notifier(struct notifier_block * nb);
extern void print_modules(void);
extern void module_update_tracepoints(void);
extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
# 695 "include/linux/module.h"
static inline __attribute__((always_inline)) void module_bug_finalize(const Elf32_Ehdr *hdr,
const Elf32_Shdr *sechdrs,
struct module *mod)
{
}
static inline __attribute__((always_inline)) void module_bug_cleanup(struct module *mod) {}
# 32 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/netdevice.h" 1
# 28 "include/linux/netdevice.h"
# 1 "include/linux/if.h" 1
# 23 "include/linux/if.h"
# 1 "include/linux/socket.h" 1
# 11 "include/linux/socket.h"
struct __kernel_sockaddr_storage {
unsigned short ss_family;
char __data[128 - sizeof(unsigned short)];
} __attribute__ ((aligned((__alignof__ (struct sockaddr *)))));
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/socket.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/socket.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sockios.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sockios.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ioctl.h" 1
# 25 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ioctl.h"
# 1 "include/asm-generic/ioctl.h" 1
# 73 "include/asm-generic/ioctl.h"
extern unsigned int __invalid_size_argument_for_IOC;
# 26 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ioctl.h" 2
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sockios.h" 2
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/socket.h" 2
# 104 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/socket.h"
enum sock_type {
SOCK_DGRAM = 1,
SOCK_STREAM = 2,
SOCK_RAW = 3,
SOCK_RDM = 4,
SOCK_SEQPACKET = 5,
SOCK_DCCP = 6,
SOCK_PACKET = 10,
};
# 22 "include/linux/socket.h" 2
# 1 "include/linux/sockios.h" 1
# 23 "include/linux/socket.h" 2
# 1 "include/linux/uio.h" 1
# 16 "include/linux/uio.h"
struct iovec
{
void *iov_base;
__kernel_size_t iov_len;
};
# 31 "include/linux/uio.h"
struct kvec {
void *iov_base;
size_t iov_len;
};
# 43 "include/linux/uio.h"
static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
{
unsigned long seg;
size_t ret = 0;
for (seg = 0; seg < nr_segs; seg++)
ret += iov[seg].iov_len;
return ret;
}
unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
# 24 "include/linux/socket.h" 2
struct pid;
struct cred;
struct seq_file;
extern void socket_seq_show(struct seq_file *seq);
typedef unsigned short sa_family_t;
struct sockaddr {
sa_family_t sa_family;
char sa_data[14];
};
struct linger {
int l_onoff;
int l_linger;
};
# 64 "include/linux/socket.h"
struct msghdr {
void * msg_name;
int msg_namelen;
struct iovec * msg_iov;
__kernel_size_t msg_iovlen;
void * msg_control;
__kernel_size_t msg_controllen;
unsigned msg_flags;
};
struct mmsghdr {
struct msghdr msg_hdr;
unsigned msg_len;
};
struct cmsghdr {
__kernel_size_t cmsg_len;
int cmsg_level;
int cmsg_type;
};
# 128 "include/linux/socket.h"
static inline __attribute__((always_inline)) struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg)
{
struct cmsghdr * __ptr;
__ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + ( ((__cmsg->cmsg_len)+sizeof(long)-1) & ~(sizeof(long)-1) ));
if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
return (struct cmsghdr *)0;
return __ptr;
}
static inline __attribute__((always_inline)) struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
{
return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
}
struct ucred {
__u32 pid;
__u32 uid;
__u32 gid;
};
# 315 "include/linux/socket.h"
extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
int offset, int len);
extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
unsigned int len, __wsum *csump);
extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode);
extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
int offset, int len);
extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void *uaddr, int *ulen);
extern int move_addr_to_kernel(void *uaddr, int ulen, struct sockaddr *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
struct timespec;
extern int __sys_recvmmsg(int fd, struct mmsghdr *mmsg, unsigned int vlen,
unsigned int flags, struct timespec *timeout);
# 24 "include/linux/if.h" 2
# 1 "include/linux/hdlc/ioctl.h" 1
# 37 "include/linux/hdlc/ioctl.h"
typedef struct {
unsigned int clock_rate;
unsigned int clock_type;
unsigned short loopback;
} sync_serial_settings;
typedef struct {
unsigned int clock_rate;
unsigned int clock_type;
unsigned short loopback;
unsigned int slot_map;
} te1_settings;
typedef struct {
unsigned short encoding;
unsigned short parity;
} raw_hdlc_proto;
typedef struct {
unsigned int t391;
unsigned int t392;
unsigned int n391;
unsigned int n392;
unsigned int n393;
unsigned short lmi;
unsigned short dce;
} fr_proto;
typedef struct {
unsigned int dlci;
} fr_proto_pvc;
typedef struct {
unsigned int dlci;
char master[16];
}fr_proto_pvc_info;
typedef struct {
unsigned int interval;
unsigned int timeout;
} cisco_proto;
# 29 "include/linux/if.h" 2
# 112 "include/linux/if.h"
enum {
IF_OPER_UNKNOWN,
IF_OPER_NOTPRESENT,
IF_OPER_DOWN,
IF_OPER_LOWERLAYERDOWN,
IF_OPER_TESTING,
IF_OPER_DORMANT,
IF_OPER_UP,
};
enum {
IF_LINK_MODE_DEFAULT,
IF_LINK_MODE_DORMANT,
};
# 138 "include/linux/if.h"
struct ifmap {
unsigned long mem_start;
unsigned long mem_end;
unsigned short base_addr;
unsigned char irq;
unsigned char dma;
unsigned char port;
};
struct if_settings {
unsigned int type;
unsigned int size;
union {
raw_hdlc_proto *raw_hdlc;
cisco_proto *cisco;
fr_proto *fr;
fr_proto_pvc *fr_pvc;
fr_proto_pvc_info *fr_pvc_info;
sync_serial_settings *sync;
te1_settings *te1;
} ifs_ifsu;
};
# 172 "include/linux/if.h"
struct ifreq {
union
{
char ifrn_name[16];
} ifr_ifrn;
union {
struct sockaddr ifru_addr;
struct sockaddr ifru_dstaddr;
struct sockaddr ifru_broadaddr;
struct sockaddr ifru_netmask;
struct sockaddr ifru_hwaddr;
short ifru_flags;
int ifru_ivalue;
int ifru_mtu;
struct ifmap ifru_map;
char ifru_slave[16];
char ifru_newname[16];
void * ifru_data;
struct if_settings ifru_settings;
} ifr_ifru;
};
# 221 "include/linux/if.h"
struct ifconf {
int ifc_len;
union {
char *ifcu_buf;
struct ifreq *ifcu_req;
} ifc_ifcu;
};
# 29 "include/linux/netdevice.h" 2
# 1 "include/linux/if_ether.h" 1
# 122 "include/linux/if_ether.h"
struct ethhdr {
unsigned char h_dest[6];
unsigned char h_source[6];
__be16 h_proto;
} __attribute__((packed));
# 1 "include/linux/skbuff.h" 1
# 18 "include/linux/skbuff.h"
# 1 "include/linux/kmemcheck.h" 1
# 1 "include/linux/mm_types.h" 1
# 1 "include/linux/auxvec.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/auxvec.h" 1
# 5 "include/linux/auxvec.h" 2
# 5 "include/linux/mm_types.h" 2
# 1 "include/linux/prio_tree.h" 1
# 14 "include/linux/prio_tree.h"
struct raw_prio_tree_node {
struct prio_tree_node *left;
struct prio_tree_node *right;
struct prio_tree_node *parent;
};
struct prio_tree_node {
struct prio_tree_node *left;
struct prio_tree_node *right;
struct prio_tree_node *parent;
unsigned long start;
unsigned long last;
};
struct prio_tree_root {
struct prio_tree_node *prio_tree_node;
unsigned short index_bits;
unsigned short raw;
};
struct prio_tree_iter {
struct prio_tree_node *cur;
unsigned long mask;
unsigned long value;
int size_level;
struct prio_tree_root *root;
unsigned long r_index;
unsigned long h_index;
};
static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter,
struct prio_tree_root *root, unsigned long r_index, unsigned long h_index)
{
iter->root = root;
iter->r_index = r_index;
iter->h_index = h_index;
iter->cur = ((void *)0);
}
# 84 "include/linux/prio_tree.h"
static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root)
{
return root->prio_tree_node == ((void *)0);
}
static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node)
{
return node->parent == node;
}
static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node)
{
return node->left == node;
}
static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node)
{
return node->right == node;
}
struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root,
struct prio_tree_node *old, struct prio_tree_node *node);
struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root,
struct prio_tree_node *node);
void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node);
struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter);
# 10 "include/linux/mm_types.h" 2
# 1 "include/linux/rbtree.h" 1
# 100 "include/linux/rbtree.h"
struct rb_node
{
unsigned long rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
struct rb_root
{
struct rb_node *rb_node;
};
# 123 "include/linux/rbtree.h"
static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p)
{
rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p;
}
static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color)
{
rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
}
# 139 "include/linux/rbtree.h"
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
typedef void (*rb_augment_f)(struct rb_node *node, void *data);
extern void rb_augment_insert(struct rb_node *node,
rb_augment_f func, void *data);
extern struct rb_node *rb_augment_erase_begin(struct rb_node *node);
extern void rb_augment_erase_end(struct rb_node *node,
rb_augment_f func, void *data);
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->rb_parent_color = (unsigned long )parent;
node->rb_left = node->rb_right = ((void *)0);
*rb_link = node;
}
# 11 "include/linux/mm_types.h" 2
# 1 "include/linux/page-debug-flags.h" 1
# 14 "include/linux/page-debug-flags.h"
enum page_debug_flags {
PAGE_DEBUG_FLAG_POISON,
};
# 15 "include/linux/mm_types.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mmu.h" 1
typedef struct {
unsigned long asid[4];
void *vdso;
} mm_context_t;
# 17 "include/linux/mm_types.h" 2
struct address_space;
# 34 "include/linux/mm_types.h"
struct page {
unsigned long flags;
atomic_t _count;
union {
atomic_t _mapcount;
struct {
u16 inuse;
u16 objects;
};
};
union {
struct {
unsigned long private;
struct address_space *mapping;
};
spinlock_t ptl;
struct kmem_cache *slab;
struct page *first_page;
};
union {
unsigned long index;
void *freelist;
};
struct list_head lru;
# 103 "include/linux/mm_types.h"
};
struct vm_region {
struct rb_node vm_rb;
unsigned long vm_flags;
unsigned long vm_start;
unsigned long vm_end;
unsigned long vm_top;
unsigned long vm_pgoff;
struct file *vm_file;
int vm_usage;
int vm_icache_flushed : 1;
};
struct vm_area_struct {
struct mm_struct * vm_mm;
unsigned long vm_start;
unsigned long vm_end;
struct vm_area_struct *vm_next, *vm_prev;
pgprot_t vm_page_prot;
unsigned long vm_flags;
struct rb_node vm_rb;
union {
struct {
struct list_head list;
void *parent;
struct vm_area_struct *head;
} vm_set;
struct raw_prio_tree_node prio_tree_node;
} shared;
struct list_head anon_vma_chain;
struct anon_vma *anon_vma;
const struct vm_operations_struct *vm_ops;
unsigned long vm_pgoff;
struct file * vm_file;
void * vm_private_data;
unsigned long vm_truncate_count;
};
struct core_thread {
struct task_struct *task;
struct core_thread *next;
};
struct core_state {
atomic_t nr_threads;
struct core_thread dumper;
struct completion startup;
};
enum {
MM_FILEPAGES,
MM_ANONPAGES,
MM_SWAPENTS,
NR_MM_COUNTERS
};
struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};
struct task_rss_stat {
int events;
int count[NR_MM_COUNTERS];
};
struct mm_struct {
struct vm_area_struct * mmap;
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache;
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
unsigned long mmap_base;
unsigned long task_size;
unsigned long cached_hole_size;
unsigned long free_area_cache;
pgd_t * pgd;
atomic_t mm_users;
atomic_t mm_count;
int map_count;
struct rw_semaphore mmap_sem;
spinlock_t page_table_lock;
struct list_head mmlist;
unsigned long hiwater_rss;
unsigned long hiwater_vm;
unsigned long total_vm, locked_vm, shared_vm, exec_vm;
unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long saved_auxv[(2*(0 + 19 + 1))];
struct mm_rss_stat rss_stat;
struct linux_binfmt *binfmt;
cpumask_t cpu_vm_mask;
mm_context_t context;
# 280 "include/linux/mm_types.h"
unsigned int faultstamp;
unsigned int token_priority;
unsigned int last_interval;
unsigned long flags;
struct core_state *core_state;
spinlock_t ioctx_lock;
struct hlist_head ioctx_list;
# 302 "include/linux/mm_types.h"
struct task_struct *owner;
struct file *exe_file;
unsigned long num_exe_file_vmas;
};
# 5 "include/linux/kmemcheck.h" 2
# 92 "include/linux/kmemcheck.h"
static inline __attribute__((always_inline)) void
kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
{
}
static inline __attribute__((always_inline)) void
kmemcheck_free_shadow(struct page *page, int order)
{
}
static inline __attribute__((always_inline)) void
kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
size_t size)
{
}
static inline __attribute__((always_inline)) void kmemcheck_slab_free(struct kmem_cache *s, void *object,
size_t size)
{
}
static inline __attribute__((always_inline)) void kmemcheck_pagealloc_alloc(struct page *p,
unsigned int order, gfp_t gfpflags)
{
}
static inline __attribute__((always_inline)) int kmemcheck_page_is_tracked(struct page *p)
{
return false;
}
static inline __attribute__((always_inline)) void kmemcheck_mark_unallocated(void *address, unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_uninitialized(void *address, unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_initialized(void *address, unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_freed(void *address, unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_unallocated_pages(struct page *p,
unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_uninitialized_pages(struct page *p,
unsigned int n)
{
}
static inline __attribute__((always_inline)) void kmemcheck_mark_initialized_pages(struct page *p,
unsigned int n)
{
}
static inline __attribute__((always_inline)) int kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
{
return true;
}
# 19 "include/linux/skbuff.h" 2
# 1 "include/linux/net.h" 1
# 46 "include/linux/net.h"
typedef enum {
SS_FREE = 0,
SS_UNCONNECTED,
SS_CONNECTING,
SS_CONNECTED,
SS_DISCONNECTING
} socket_state;
# 1 "include/linux/random.h" 1
# 11 "include/linux/random.h"
# 1 "include/linux/ioctl.h" 1
# 12 "include/linux/random.h" 2
# 1 "include/linux/irqnr.h" 1
# 26 "include/linux/irqnr.h"
extern int nr_irqs;
extern struct irq_desc *irq_to_desc(unsigned int irq);
# 13 "include/linux/random.h" 2
# 37 "include/linux/random.h"
struct rand_pool_info {
int entropy_count;
int buf_size;
__u32 buf[0];
};
struct rnd_state {
__u32 s1, s2, s3;
};
extern void rand_initialize_irq(int irq);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
extern void add_interrupt_randomness(int irq);
extern void get_random_bytes(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
extern __u32 secure_ip_id(__be32 daddr);
extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
__be16 sport, __be16 dport);
extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
unsigned int get_random_int(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 random32(void);
void srandom32(u32 seed);
u32 prandom32(struct rnd_state *);
static inline __attribute__((always_inline)) u32 __seed(u32 x, u32 m)
{
return (x < m) ? x + m : x;
}
static inline __attribute__((always_inline)) void prandom32_seed(struct rnd_state *state, u64 seed)
{
u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
state->s1 = __seed(i, 1);
state->s2 = __seed(i, 7);
state->s3 = __seed(i, 15);
}
# 59 "include/linux/net.h" 2
# 1 "include/linux/fcntl.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fcntl.h" 1
# 61 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fcntl.h"
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
long l_sysid;
__kernel_pid_t l_pid;
long pad[4];
};
# 1 "include/asm-generic/fcntl.h" 1
# 123 "include/asm-generic/fcntl.h"
struct f_owner_ex {
int type;
pid_t pid;
};
# 185 "include/asm-generic/fcntl.h"
struct flock64 {
short l_type;
short l_whence;
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
};
# 76 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fcntl.h" 2
# 5 "include/linux/fcntl.h" 2
# 61 "include/linux/net.h" 2
struct poll_table_struct;
struct pipe_inode_info;
struct inode;
struct net;
# 114 "include/linux/net.h"
enum sock_shutdown_cmd {
SHUT_RD = 0,
SHUT_WR = 1,
SHUT_RDWR = 2,
};
struct socket_wq {
wait_queue_head_t wait;
struct fasync_struct *fasync_list;
struct rcu_head rcu;
} __attribute__((__aligned__((1 << 5))));
# 136 "include/linux/net.h"
struct socket {
socket_state state;
;
short type;
;
unsigned long flags;
struct socket_wq *wq;
struct file *file;
struct sock *sk;
const struct proto_ops *ops;
};
struct vm_area_struct;
struct page;
struct kiocb;
struct sockaddr;
struct msghdr;
struct module;
struct proto_ops {
int family;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
struct sockaddr *myaddr,
int sockaddr_len);
int (*connect) (struct socket *sock,
struct sockaddr *vaddr,
int sockaddr_len, int flags);
int (*socketpair)(struct socket *sock1,
struct socket *sock2);
int (*accept) (struct socket *sock,
struct socket *newsock, int flags);
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int *sockaddr_len, int peer);
unsigned int (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
int optname, char *optval, unsigned int optlen);
int (*getsockopt)(struct socket *sock, int level,
int optname, char *optval, int *optlen);
int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len);
int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags);
int (*mmap) (struct file *file, struct socket *sock,
struct vm_area_struct * vma);
ssize_t (*sendpage) (struct socket *sock, struct page *page,
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
};
struct net_proto_family {
int family;
int (*create)(struct net *net, struct socket *sock,
int protocol, int kern);
struct module *owner;
};
struct iovec;
struct kvec;
enum {
SOCK_WAKE_IO,
SOCK_WAKE_WAITD,
SOCK_WAKE_SPACE,
SOCK_WAKE_URG,
};
extern int sock_wake_async(struct socket *sk, int how, int band);
extern int sock_register(const struct net_proto_family *fam);
extern void sock_unregister(int family);
extern int sock_create(int family, int type, int proto,
struct socket **res);
extern int sock_create_kern(int family, int type, int proto,
struct socket **res);
extern int sock_create_lite(int family, int type, int proto,
struct socket **res);
extern void sock_release(struct socket *sock);
extern int sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len);
extern int sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags);
extern int sock_map_fd(struct socket *sock, int flags);
extern struct socket *sockfd_lookup(int fd, int *err);
extern int net_ratelimit(void);
extern int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num, size_t len);
extern int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
struct kvec *vec, size_t num,
size_t len, int flags);
extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
int addrlen);
extern int kernel_listen(struct socket *sock, int backlog);
extern int kernel_accept(struct socket *sock, struct socket **newsock,
int flags);
extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
int addrlen, int flags);
extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
int *addrlen);
extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
int *addrlen);
extern int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen);
extern int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen);
extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
size_t size, int flags);
extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
extern int kernel_sock_shutdown(struct socket *sock,
enum sock_shutdown_cmd how);
# 289 "include/linux/net.h"
# 1 "include/linux/sysctl.h" 1
# 29 "include/linux/sysctl.h"
struct completion;
struct __sysctl_args {
int *name;
int nlen;
void *oldval;
size_t *oldlenp;
void *newval;
size_t newlen;
unsigned long __unused[4];
};
enum
{
CTL_KERN=1,
CTL_VM=2,
CTL_NET=3,
CTL_PROC=4,
CTL_FS=5,
CTL_DEBUG=6,
CTL_DEV=7,
CTL_BUS=8,
CTL_ABI=9,
CTL_CPU=10,
CTL_ARLAN=254,
CTL_S390DBF=5677,
CTL_SUNRPC=7249,
CTL_PM=9899,
CTL_FRV=9898,
};
enum
{
CTL_BUS_ISA=1
};
enum
{
INOTIFY_MAX_USER_INSTANCES=1,
INOTIFY_MAX_USER_WATCHES=2,
INOTIFY_MAX_QUEUED_EVENTS=3
};
enum
{
KERN_OSTYPE=1,
KERN_OSRELEASE=2,
KERN_OSREV=3,
KERN_VERSION=4,
KERN_SECUREMASK=5,
KERN_PROF=6,
KERN_NODENAME=7,
KERN_DOMAINNAME=8,
KERN_PANIC=15,
KERN_REALROOTDEV=16,
KERN_SPARC_REBOOT=21,
KERN_CTLALTDEL=22,
KERN_PRINTK=23,
KERN_NAMETRANS=24,
KERN_PPC_HTABRECLAIM=25,
KERN_PPC_ZEROPAGED=26,
KERN_PPC_POWERSAVE_NAP=27,
KERN_MODPROBE=28,
KERN_SG_BIG_BUFF=29,
KERN_ACCT=30,
KERN_PPC_L2CR=31,
KERN_RTSIGNR=32,
KERN_RTSIGMAX=33,
KERN_SHMMAX=34,
KERN_MSGMAX=35,
KERN_MSGMNB=36,
KERN_MSGPOOL=37,
KERN_SYSRQ=38,
KERN_MAX_THREADS=39,
KERN_RANDOM=40,
KERN_SHMALL=41,
KERN_MSGMNI=42,
KERN_SEM=43,
KERN_SPARC_STOP_A=44,
KERN_SHMMNI=45,
KERN_OVERFLOWUID=46,
KERN_OVERFLOWGID=47,
KERN_SHMPATH=48,
KERN_HOTPLUG=49,
KERN_IEEE_EMULATION_WARNINGS=50,
KERN_S390_USER_DEBUG_LOGGING=51,
KERN_CORE_USES_PID=52,
KERN_TAINTED=53,
KERN_CADPID=54,
KERN_PIDMAX=55,
KERN_CORE_PATTERN=56,
KERN_PANIC_ON_OOPS=57,
KERN_HPPA_PWRSW=58,
KERN_HPPA_UNALIGNED=59,
KERN_PRINTK_RATELIMIT=60,
KERN_PRINTK_RATELIMIT_BURST=61,
KERN_PTY=62,
KERN_NGROUPS_MAX=63,
KERN_SPARC_SCONS_PWROFF=64,
KERN_HZ_TIMER=65,
KERN_UNKNOWN_NMI_PANIC=66,
KERN_BOOTLOADER_TYPE=67,
KERN_RANDOMIZE=68,
KERN_SETUID_DUMPABLE=69,
KERN_SPIN_RETRY=70,
KERN_ACPI_VIDEO_FLAGS=71,
KERN_IA64_UNALIGNED=72,
KERN_COMPAT_LOG=73,
KERN_MAX_LOCK_DEPTH=74,
KERN_NMI_WATCHDOG=75,
KERN_PANIC_ON_NMI=76,
};
enum
{
VM_UNUSED1=1,
VM_UNUSED2=2,
VM_UNUSED3=3,
VM_UNUSED4=4,
VM_OVERCOMMIT_MEMORY=5,
VM_UNUSED5=6,
VM_UNUSED7=7,
VM_UNUSED8=8,
VM_UNUSED9=9,
VM_PAGE_CLUSTER=10,
VM_DIRTY_BACKGROUND=11,
VM_DIRTY_RATIO=12,
VM_DIRTY_WB_CS=13,
VM_DIRTY_EXPIRE_CS=14,
VM_NR_PDFLUSH_THREADS=15,
VM_OVERCOMMIT_RATIO=16,
VM_PAGEBUF=17,
VM_HUGETLB_PAGES=18,
VM_SWAPPINESS=19,
VM_LOWMEM_RESERVE_RATIO=20,
VM_MIN_FREE_KBYTES=21,
VM_MAX_MAP_COUNT=22,
VM_LAPTOP_MODE=23,
VM_BLOCK_DUMP=24,
VM_HUGETLB_GROUP=25,
VM_VFS_CACHE_PRESSURE=26,
VM_LEGACY_VA_LAYOUT=27,
VM_SWAP_TOKEN_TIMEOUT=28,
VM_DROP_PAGECACHE=29,
VM_PERCPU_PAGELIST_FRACTION=30,
VM_ZONE_RECLAIM_MODE=31,
VM_MIN_UNMAPPED=32,
VM_PANIC_ON_OOM=33,
VM_VDSO_ENABLED=34,
VM_MIN_SLAB=35,
};
enum
{
NET_CORE=1,
NET_ETHER=2,
NET_802=3,
NET_UNIX=4,
NET_IPV4=5,
NET_IPX=6,
NET_ATALK=7,
NET_NETROM=8,
NET_AX25=9,
NET_BRIDGE=10,
NET_ROSE=11,
NET_IPV6=12,
NET_X25=13,
NET_TR=14,
NET_DECNET=15,
NET_ECONET=16,
NET_SCTP=17,
NET_LLC=18,
NET_NETFILTER=19,
NET_DCCP=20,
NET_IRDA=412,
};
enum
{
RANDOM_POOLSIZE=1,
RANDOM_ENTROPY_COUNT=2,
RANDOM_READ_THRESH=3,
RANDOM_WRITE_THRESH=4,
RANDOM_BOOT_ID=5,
RANDOM_UUID=6
};
enum
{
PTY_MAX=1,
PTY_NR=2
};
enum
{
BUS_ISA_MEM_BASE=1,
BUS_ISA_PORT_BASE=2,
BUS_ISA_PORT_SHIFT=3
};
enum
{
NET_CORE_WMEM_MAX=1,
NET_CORE_RMEM_MAX=2,
NET_CORE_WMEM_DEFAULT=3,
NET_CORE_RMEM_DEFAULT=4,
NET_CORE_MAX_BACKLOG=6,
NET_CORE_FASTROUTE=7,
NET_CORE_MSG_COST=8,
NET_CORE_MSG_BURST=9,
NET_CORE_OPTMEM_MAX=10,
NET_CORE_HOT_LIST_LENGTH=11,
NET_CORE_DIVERT_VERSION=12,
NET_CORE_NO_CONG_THRESH=13,
NET_CORE_NO_CONG=14,
NET_CORE_LO_CONG=15,
NET_CORE_MOD_CONG=16,
NET_CORE_DEV_WEIGHT=17,
NET_CORE_SOMAXCONN=18,
NET_CORE_BUDGET=19,
NET_CORE_AEVENT_ETIME=20,
NET_CORE_AEVENT_RSEQTH=21,
NET_CORE_WARNINGS=22,
NET_CORE_QUEUE_MASK=23,
};
enum
{
NET_UNIX_DESTROY_DELAY=1,
NET_UNIX_DELETE_DELAY=2,
NET_UNIX_MAX_DGRAM_QLEN=3,
};
enum
{
NET_NF_CONNTRACK_MAX=1,
NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
NET_NF_CONNTRACK_UDP_TIMEOUT=10,
NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
NET_NF_CONNTRACK_BUCKETS=14,
NET_NF_CONNTRACK_LOG_INVALID=15,
NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
NET_NF_CONNTRACK_TCP_LOOSE=17,
NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_NF_CONNTRACK_COUNT=27,
NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
NET_NF_CONNTRACK_CHECKSUM=32,
NET_NF_CONNTRACK_FTP_ENABLE=33,
NET_NF_CONNTRACK_SIP_ENABLE=34,
NET_NF_CONNTRACK_H323_ENABLE=35,
NET_NF_CONNTRACK_RTSP_ENABLE=36,
NET_NF_CONNTRACK_L2TP_ENABLE=37,
NET_NF_CONNTRACK_IPSEC_ENABLE=38,
NET_NF_CONNTRACK_PPTP_ENABLE=39,
NET_NF_CONNTRACK_PORTSCAN_ENABLE=40,
NET_NF_CONNTRACK_FTP_PORT=41,
NET_NF_CONNTRACK_ESP_TIMEOUT=42,
};
enum
{
NET_IPV4_FORWARD=8,
NET_IPV4_DYNADDR=9,
NET_IPV4_CONF=16,
NET_IPV4_NEIGH=17,
NET_IPV4_ROUTE=18,
NET_IPV4_FIB_HASH=19,
NET_IPV4_NETFILTER=20,
NET_IPV4_TCP_TIMESTAMPS=33,
NET_IPV4_TCP_WINDOW_SCALING=34,
NET_IPV4_TCP_SACK=35,
NET_IPV4_TCP_RETRANS_COLLAPSE=36,
NET_IPV4_DEFAULT_TTL=37,
NET_IPV4_AUTOCONFIG=38,
NET_IPV4_NO_PMTU_DISC=39,
NET_IPV4_TCP_SYN_RETRIES=40,
NET_IPV4_IPFRAG_HIGH_THRESH=41,
NET_IPV4_IPFRAG_LOW_THRESH=42,
NET_IPV4_IPFRAG_TIME=43,
NET_IPV4_TCP_MAX_KA_PROBES=44,
NET_IPV4_TCP_KEEPALIVE_TIME=45,
NET_IPV4_TCP_KEEPALIVE_PROBES=46,
NET_IPV4_TCP_RETRIES1=47,
NET_IPV4_TCP_RETRIES2=48,
NET_IPV4_TCP_FIN_TIMEOUT=49,
NET_IPV4_IP_MASQ_DEBUG=50,
NET_TCP_SYNCOOKIES=51,
NET_TCP_STDURG=52,
NET_TCP_RFC1337=53,
NET_TCP_SYN_TAILDROP=54,
NET_TCP_MAX_SYN_BACKLOG=55,
NET_IPV4_LOCAL_PORT_RANGE=56,
NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
NET_IPV4_ICMP_DESTUNREACH_RATE=60,
NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
NET_IPV4_ICMP_PARAMPROB_RATE=62,
NET_IPV4_ICMP_ECHOREPLY_RATE=63,
NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
NET_TCP_TW_RECYCLE=66,
NET_IPV4_ALWAYS_DEFRAG=67,
NET_IPV4_TCP_KEEPALIVE_INTVL=68,
NET_IPV4_INET_PEER_THRESHOLD=69,
NET_IPV4_INET_PEER_MINTTL=70,
NET_IPV4_INET_PEER_MAXTTL=71,
NET_IPV4_INET_PEER_GC_MINTIME=72,
NET_IPV4_INET_PEER_GC_MAXTIME=73,
NET_TCP_ORPHAN_RETRIES=74,
NET_TCP_ABORT_ON_OVERFLOW=75,
NET_TCP_SYNACK_RETRIES=76,
NET_TCP_MAX_ORPHANS=77,
NET_TCP_MAX_TW_BUCKETS=78,
NET_TCP_FACK=79,
NET_TCP_REORDERING=80,
NET_TCP_ECN=81,
NET_TCP_DSACK=82,
NET_TCP_MEM=83,
NET_TCP_WMEM=84,
NET_TCP_RMEM=85,
NET_TCP_APP_WIN=86,
NET_TCP_ADV_WIN_SCALE=87,
NET_IPV4_NONLOCAL_BIND=88,
NET_IPV4_ICMP_RATELIMIT=89,
NET_IPV4_ICMP_RATEMASK=90,
NET_TCP_TW_REUSE=91,
NET_TCP_FRTO=92,
NET_TCP_LOW_LATENCY=93,
NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
NET_IPV4_IGMP_MAX_MSF=96,
NET_TCP_NO_METRICS_SAVE=97,
NET_TCP_DEFAULT_WIN_SCALE=105,
NET_TCP_MODERATE_RCVBUF=106,
NET_TCP_TSO_WIN_DIVISOR=107,
NET_TCP_BIC_BETA=108,
NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
NET_TCP_CONG_CONTROL=110,
NET_TCP_ABC=111,
NET_IPV4_IPFRAG_MAX_DIST=112,
NET_TCP_MTU_PROBING=113,
NET_TCP_BASE_MSS=114,
NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
NET_TCP_DMA_COPYBREAK=116,
NET_TCP_SLOW_START_AFTER_IDLE=117,
NET_CIPSOV4_CACHE_ENABLE=118,
NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
NET_CIPSOV4_RBM_OPTFMT=120,
NET_CIPSOV4_RBM_STRICTVALID=121,
NET_TCP_AVAIL_CONG_CONTROL=122,
NET_TCP_ALLOWED_CONG_CONTROL=123,
NET_TCP_MAX_SSTHRESH=124,
NET_TCP_FRTO_RESPONSE=125,
};
enum {
NET_IPV4_ROUTE_FLUSH=1,
NET_IPV4_ROUTE_MIN_DELAY=2,
NET_IPV4_ROUTE_MAX_DELAY=3,
NET_IPV4_ROUTE_GC_THRESH=4,
NET_IPV4_ROUTE_MAX_SIZE=5,
NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
NET_IPV4_ROUTE_GC_TIMEOUT=7,
NET_IPV4_ROUTE_GC_INTERVAL=8,
NET_IPV4_ROUTE_REDIRECT_LOAD=9,
NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
NET_IPV4_ROUTE_ERROR_COST=12,
NET_IPV4_ROUTE_ERROR_BURST=13,
NET_IPV4_ROUTE_GC_ELASTICITY=14,
NET_IPV4_ROUTE_MTU_EXPIRES=15,
NET_IPV4_ROUTE_MIN_PMTU=16,
NET_IPV4_ROUTE_MIN_ADVMSS=17,
NET_IPV4_ROUTE_SECRET_INTERVAL=18,
NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
};
enum
{
NET_PROTO_CONF_ALL=-2,
NET_PROTO_CONF_DEFAULT=-3
};
enum
{
NET_IPV4_CONF_FORWARDING=1,
NET_IPV4_CONF_MC_FORWARDING=2,
NET_IPV4_CONF_PROXY_ARP=3,
NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
NET_IPV4_CONF_SECURE_REDIRECTS=5,
NET_IPV4_CONF_SEND_REDIRECTS=6,
NET_IPV4_CONF_SHARED_MEDIA=7,
NET_IPV4_CONF_RP_FILTER=8,
NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
NET_IPV4_CONF_BOOTP_RELAY=10,
NET_IPV4_CONF_LOG_MARTIANS=11,
NET_IPV4_CONF_TAG=12,
NET_IPV4_CONF_ARPFILTER=13,
NET_IPV4_CONF_MEDIUM_ID=14,
NET_IPV4_CONF_NOXFRM=15,
NET_IPV4_CONF_NOPOLICY=16,
NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
NET_IPV4_CONF_ARP_ANNOUNCE=18,
NET_IPV4_CONF_ARP_IGNORE=19,
NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
NET_IPV4_CONF_ARP_ACCEPT=21,
NET_IPV4_CONF_ARP_NOTIFY=22,
NET_IPV4_CONF_DISABLE_IPV4=23,
};
enum
{
NET_IPV4_NF_CONNTRACK_MAX=1,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
NET_IPV4_NF_CONNTRACK_BUCKETS=14,
NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
NET_IPV4_NF_CONNTRACK_COUNT=27,
NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
};
enum {
NET_IPV6_CONF=16,
NET_IPV6_NEIGH=17,
NET_IPV6_ROUTE=18,
NET_IPV6_ICMP=19,
NET_IPV6_BINDV6ONLY=20,
NET_IPV6_IP6FRAG_HIGH_THRESH=21,
NET_IPV6_IP6FRAG_LOW_THRESH=22,
NET_IPV6_IP6FRAG_TIME=23,
NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
NET_IPV6_MLD_MAX_MSF=25,
};
enum {
NET_IPV6_ROUTE_FLUSH=1,
NET_IPV6_ROUTE_GC_THRESH=2,
NET_IPV6_ROUTE_MAX_SIZE=3,
NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
NET_IPV6_ROUTE_GC_TIMEOUT=5,
NET_IPV6_ROUTE_GC_INTERVAL=6,
NET_IPV6_ROUTE_GC_ELASTICITY=7,
NET_IPV6_ROUTE_MTU_EXPIRES=8,
NET_IPV6_ROUTE_MIN_ADVMSS=9,
NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
};
enum {
NET_IPV6_FORWARDING=1,
NET_IPV6_HOP_LIMIT=2,
NET_IPV6_MTU=3,
NET_IPV6_ACCEPT_RA=4,
NET_IPV6_ACCEPT_REDIRECTS=5,
NET_IPV6_AUTOCONF=6,
NET_IPV6_DAD_TRANSMITS=7,
NET_IPV6_RTR_SOLICITS=8,
NET_IPV6_RTR_SOLICIT_INTERVAL=9,
NET_IPV6_RTR_SOLICIT_DELAY=10,
NET_IPV6_USE_TEMPADDR=11,
NET_IPV6_TEMP_VALID_LFT=12,
NET_IPV6_TEMP_PREFERED_LFT=13,
NET_IPV6_REGEN_MAX_RETRY=14,
NET_IPV6_MAX_DESYNC_FACTOR=15,
NET_IPV6_MAX_ADDRESSES=16,
NET_IPV6_FORCE_MLD_VERSION=17,
NET_IPV6_ACCEPT_RA_DEFRTR=18,
NET_IPV6_ACCEPT_RA_PINFO=19,
NET_IPV6_ACCEPT_RA_RTR_PREF=20,
NET_IPV6_RTR_PROBE_INTERVAL=21,
NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
NET_IPV6_PROXY_NDP=23,
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
__NET_IPV6_MAX
};
enum {
NET_IPV6_ICMP_RATELIMIT=1
};
enum {
NET_NEIGH_MCAST_SOLICIT=1,
NET_NEIGH_UCAST_SOLICIT=2,
NET_NEIGH_APP_SOLICIT=3,
NET_NEIGH_RETRANS_TIME=4,
NET_NEIGH_REACHABLE_TIME=5,
NET_NEIGH_DELAY_PROBE_TIME=6,
NET_NEIGH_GC_STALE_TIME=7,
NET_NEIGH_UNRES_QLEN=8,
NET_NEIGH_PROXY_QLEN=9,
NET_NEIGH_ANYCAST_DELAY=10,
NET_NEIGH_PROXY_DELAY=11,
NET_NEIGH_LOCKTIME=12,
NET_NEIGH_GC_INTERVAL=13,
NET_NEIGH_GC_THRESH1=14,
NET_NEIGH_GC_THRESH2=15,
NET_NEIGH_GC_THRESH3=16,
NET_NEIGH_RETRANS_TIME_MS=17,
NET_NEIGH_REACHABLE_TIME_MS=18,
NET_NEIGH_DEFAULT_ROUTE=19,
};
enum {
NET_DCCP_DEFAULT=1,
};
enum {
NET_IPX_PPROP_BROADCASTING=1,
NET_IPX_FORWARDING=2
};
enum {
NET_LLC2=1,
NET_LLC_STATION=2,
};
enum {
NET_LLC2_TIMEOUT=1,
};
enum {
NET_LLC_STATION_ACK_TIMEOUT=1,
};
enum {
NET_LLC2_ACK_TIMEOUT=1,
NET_LLC2_P_TIMEOUT=2,
NET_LLC2_REJ_TIMEOUT=3,
NET_LLC2_BUSY_TIMEOUT=4,
};
enum {
NET_ATALK_AARP_EXPIRY_TIME=1,
NET_ATALK_AARP_TICK_TIME=2,
NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
NET_ATALK_AARP_RESOLVE_TIME=4
};
enum {
NET_NETROM_DEFAULT_PATH_QUALITY=1,
NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
NET_NETROM_NETWORK_TTL_INITIALISER=3,
NET_NETROM_TRANSPORT_TIMEOUT=4,
NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
NET_NETROM_TRANSPORT_BUSY_DELAY=7,
NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
NET_NETROM_ROUTING_CONTROL=10,
NET_NETROM_LINK_FAILS_COUNT=11,
NET_NETROM_RESET=12
};
enum {
NET_AX25_IP_DEFAULT_MODE=1,
NET_AX25_DEFAULT_MODE=2,
NET_AX25_BACKOFF_TYPE=3,
NET_AX25_CONNECT_MODE=4,
NET_AX25_STANDARD_WINDOW=5,
NET_AX25_EXTENDED_WINDOW=6,
NET_AX25_T1_TIMEOUT=7,
NET_AX25_T2_TIMEOUT=8,
NET_AX25_T3_TIMEOUT=9,
NET_AX25_IDLE_TIMEOUT=10,
NET_AX25_N2=11,
NET_AX25_PACLEN=12,
NET_AX25_PROTOCOL=13,
NET_AX25_DAMA_SLAVE_TIMEOUT=14
};
enum {
NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
NET_ROSE_CALL_REQUEST_TIMEOUT=2,
NET_ROSE_RESET_REQUEST_TIMEOUT=3,
NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
NET_ROSE_ROUTING_CONTROL=6,
NET_ROSE_LINK_FAIL_TIMEOUT=7,
NET_ROSE_MAX_VCS=8,
NET_ROSE_WINDOW_SIZE=9,
NET_ROSE_NO_ACTIVITY_TIMEOUT=10
};
enum {
NET_X25_RESTART_REQUEST_TIMEOUT=1,
NET_X25_CALL_REQUEST_TIMEOUT=2,
NET_X25_RESET_REQUEST_TIMEOUT=3,
NET_X25_CLEAR_REQUEST_TIMEOUT=4,
NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
NET_X25_FORWARD=6
};
enum
{
NET_TR_RIF_TIMEOUT=1
};
enum {
NET_DECNET_NODE_TYPE = 1,
NET_DECNET_NODE_ADDRESS = 2,
NET_DECNET_NODE_NAME = 3,
NET_DECNET_DEFAULT_DEVICE = 4,
NET_DECNET_TIME_WAIT = 5,
NET_DECNET_DN_COUNT = 6,
NET_DECNET_DI_COUNT = 7,
NET_DECNET_DR_COUNT = 8,
NET_DECNET_DST_GC_INTERVAL = 9,
NET_DECNET_CONF = 10,
NET_DECNET_NO_FC_MAX_CWND = 11,
NET_DECNET_MEM = 12,
NET_DECNET_RMEM = 13,
NET_DECNET_WMEM = 14,
NET_DECNET_DEBUG_LEVEL = 255
};
enum {
NET_DECNET_CONF_LOOPBACK = -2,
NET_DECNET_CONF_DDCMP = -3,
NET_DECNET_CONF_PPP = -4,
NET_DECNET_CONF_X25 = -5,
NET_DECNET_CONF_GRE = -6,
NET_DECNET_CONF_ETHER = -7
};
enum {
NET_DECNET_CONF_DEV_PRIORITY = 1,
NET_DECNET_CONF_DEV_T1 = 2,
NET_DECNET_CONF_DEV_T2 = 3,
NET_DECNET_CONF_DEV_T3 = 4,
NET_DECNET_CONF_DEV_FORWARDING = 5,
NET_DECNET_CONF_DEV_BLKSIZE = 6,
NET_DECNET_CONF_DEV_STATE = 7
};
enum {
NET_SCTP_RTO_INITIAL = 1,
NET_SCTP_RTO_MIN = 2,
NET_SCTP_RTO_MAX = 3,
NET_SCTP_RTO_ALPHA = 4,
NET_SCTP_RTO_BETA = 5,
NET_SCTP_VALID_COOKIE_LIFE = 6,
NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
NET_SCTP_PATH_MAX_RETRANS = 8,
NET_SCTP_MAX_INIT_RETRANSMITS = 9,
NET_SCTP_HB_INTERVAL = 10,
NET_SCTP_PRESERVE_ENABLE = 11,
NET_SCTP_MAX_BURST = 12,
NET_SCTP_ADDIP_ENABLE = 13,
NET_SCTP_PRSCTP_ENABLE = 14,
NET_SCTP_SNDBUF_POLICY = 15,
NET_SCTP_SACK_TIMEOUT = 16,
NET_SCTP_RCVBUF_POLICY = 17,
};
enum {
NET_BRIDGE_NF_CALL_ARPTABLES = 1,
NET_BRIDGE_NF_CALL_IPTABLES = 2,
NET_BRIDGE_NF_CALL_IP6TABLES = 3,
NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
};
enum {
NET_IRDA_DISCOVERY=1,
NET_IRDA_DEVNAME=2,
NET_IRDA_DEBUG=3,
NET_IRDA_FAST_POLL=4,
NET_IRDA_DISCOVERY_SLOTS=5,
NET_IRDA_DISCOVERY_TIMEOUT=6,
NET_IRDA_SLOT_TIMEOUT=7,
NET_IRDA_MAX_BAUD_RATE=8,
NET_IRDA_MIN_TX_TURN_TIME=9,
NET_IRDA_MAX_TX_DATA_SIZE=10,
NET_IRDA_MAX_TX_WINDOW=11,
NET_IRDA_MAX_NOREPLY_TIME=12,
NET_IRDA_WARN_NOREPLY_TIME=13,
NET_IRDA_LAP_KEEPALIVE_TIME=14,
};
enum
{
FS_NRINODE=1,
FS_STATINODE=2,
FS_MAXINODE=3,
FS_NRDQUOT=4,
FS_MAXDQUOT=5,
FS_NRFILE=6,
FS_MAXFILE=7,
FS_DENTRY=8,
FS_NRSUPER=9,
FS_MAXSUPER=10,
FS_OVERFLOWUID=11,
FS_OVERFLOWGID=12,
FS_LEASES=13,
FS_DIR_NOTIFY=14,
FS_LEASE_TIME=15,
FS_DQSTATS=16,
FS_XFS=17,
FS_AIO_NR=18,
FS_AIO_MAX_NR=19,
FS_INOTIFY=20,
FS_OCFS2=988,
};
enum {
FS_DQ_LOOKUPS = 1,
FS_DQ_DROPS = 2,
FS_DQ_READS = 3,
FS_DQ_WRITES = 4,
FS_DQ_CACHE_HITS = 5,
FS_DQ_ALLOCATED = 6,
FS_DQ_FREE = 7,
FS_DQ_SYNCS = 8,
FS_DQ_WARNINGS = 9,
};
enum {
DEV_CDROM=1,
DEV_HWMON=2,
DEV_PARPORT=3,
DEV_RAID=4,
DEV_MAC_HID=5,
DEV_SCSI=6,
DEV_IPMI=7,
};
enum {
DEV_CDROM_INFO=1,
DEV_CDROM_AUTOCLOSE=2,
DEV_CDROM_AUTOEJECT=3,
DEV_CDROM_DEBUG=4,
DEV_CDROM_LOCK=5,
DEV_CDROM_CHECK_MEDIA=6
};
enum {
DEV_PARPORT_DEFAULT=-3
};
enum {
DEV_RAID_SPEED_LIMIT_MIN=1,
DEV_RAID_SPEED_LIMIT_MAX=2
};
enum {
DEV_PARPORT_DEFAULT_TIMESLICE=1,
DEV_PARPORT_DEFAULT_SPINTIME=2
};
enum {
DEV_PARPORT_SPINTIME=1,
DEV_PARPORT_BASE_ADDR=2,
DEV_PARPORT_IRQ=3,
DEV_PARPORT_DMA=4,
DEV_PARPORT_MODES=5,
DEV_PARPORT_DEVICES=6,
DEV_PARPORT_AUTOPROBE=16
};
enum {
DEV_PARPORT_DEVICES_ACTIVE=-3,
};
enum {
DEV_PARPORT_DEVICE_TIMESLICE=1,
};
enum {
DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
};
enum {
DEV_SCSI_LOGGING_LEVEL=1,
};
enum {
DEV_IPMI_POWEROFF_POWERCYCLE=1,
};
enum
{
ABI_DEFHANDLER_COFF=1,
ABI_DEFHANDLER_ELF=2,
ABI_DEFHANDLER_LCALL7=3,
ABI_DEFHANDLER_LIBCSO=4,
ABI_TRACE=5,
ABI_FAKE_UTSNAME=6,
};
struct ctl_table;
struct nsproxy;
struct ctl_table_root;
struct ctl_table_set {
struct list_head list;
struct ctl_table_set *parent;
int (*is_seen)(struct ctl_table_set *);
};
extern void setup_sysctl_set(struct ctl_table_set *p,
struct ctl_table_set *parent,
int (*is_seen)(struct ctl_table_set *));
struct ctl_table_header;
extern void sysctl_head_get(struct ctl_table_header *);
extern void sysctl_head_put(struct ctl_table_header *);
extern int sysctl_is_seen(struct ctl_table_header *);
extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
struct ctl_table_header *prev);
extern void sysctl_head_finish(struct ctl_table_header *prev);
extern int sysctl_perm(struct ctl_table_root *root,
struct ctl_table *table, int op);
typedef struct ctl_table ctl_table;
typedef int proc_handler (struct ctl_table *ctl, int write,
void *buffer, size_t *lenp, loff_t *ppos);
extern int proc_dostring(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_doulongvec_minmax(struct ctl_table *, int,
void *, size_t *, loff_t *);
extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
void *, size_t *, loff_t *);
extern int proc_do_large_bitmap(struct ctl_table *, int,
void *, size_t *, loff_t *);
# 1035 "include/linux/sysctl.h"
struct ctl_table
{
const char *procname;
void *data;
int maxlen;
mode_t mode;
struct ctl_table *child;
struct ctl_table *parent;
proc_handler *proc_handler;
void *extra1;
void *extra2;
};
struct ctl_table_root {
struct list_head root_list;
struct ctl_table_set default_set;
struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
struct nsproxy *namespaces);
int (*permissions)(struct ctl_table_root *root,
struct nsproxy *namespaces, struct ctl_table *table);
};
struct ctl_table_header
{
struct ctl_table *ctl_table;
struct list_head ctl_entry;
int used;
int count;
struct completion *unregistering;
struct ctl_table *ctl_table_arg;
struct ctl_table_root *root;
struct ctl_table_set *set;
struct ctl_table *attached_by;
struct ctl_table *attached_to;
struct ctl_table_header *parent;
};
struct ctl_path {
const char *procname;
};
void register_sysctl_root(struct ctl_table_root *root);
struct ctl_table_header *__register_sysctl_paths(
struct ctl_table_root *root, struct nsproxy *namespaces,
const struct ctl_path *path, struct ctl_table *table);
struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
struct ctl_table *table);
void unregister_sysctl_table(struct ctl_table_header * table);
int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
# 290 "include/linux/net.h" 2
# 1 "include/linux/ratelimit.h" 1
# 10 "include/linux/ratelimit.h"
struct ratelimit_state {
spinlock_t lock;
int interval;
int burst;
int printed;
int missed;
unsigned long begin;
};
# 28 "include/linux/ratelimit.h"
static inline __attribute__((always_inline)) void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
do { spinlock_check(&rs->lock); do { *(&(&rs->lock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
rs->interval = interval;
rs->burst = burst;
rs->printed = 0;
rs->missed = 0;
rs->begin = 0;
}
extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
# 291 "include/linux/net.h" 2
extern struct ratelimit_state net_ratelimit_state;
# 27 "include/linux/skbuff.h" 2
# 1 "include/linux/textsearch.h" 1
# 1 "include/linux/err.h" 1
# 22 "include/linux/err.h"
static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_PTR(long error)
{
return (void *) error;
}
static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) PTR_ERR(const void *ptr)
{
return (long) ptr;
}
static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR(const void *ptr)
{
return __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0);
}
static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0);
}
# 49 "include/linux/err.h"
static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_CAST(const void *ptr)
{
return (void *) ptr;
}
# 9 "include/linux/textsearch.h" 2
# 1 "include/linux/slab.h" 1
# 98 "include/linux/slab.h"
void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void);
int slab_is_available(void);
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
int kern_ptr_validate(const void *ptr, unsigned long size);
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
# 142 "include/linux/slab.h"
void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t);
void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
# 168 "include/linux/slab.h"
# 1 "include/linux/slub_def.h" 1
# 13 "include/linux/slub_def.h"
# 1 "include/linux/kmemleak.h" 1
# 59 "include/linux/kmemleak.h"
static inline __attribute__((always_inline)) void kmemleak_init(void)
{
}
static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp)
{
}
static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size,
int min_count, unsigned long flags,
gfp_t gfp)
{
}
static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr)
{
}
static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size)
{
}
static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags)
{
}
static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr)
{
}
static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr)
{
}
static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
{
}
static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr)
{
}
static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr)
{
}
# 14 "include/linux/slub_def.h" 2
# 1 "include/trace/events/kmem.h" 1
# 1 "include/linux/tracepoint.h" 1
# 9 "include/trace/events/kmem.h" 2
# 1 "include/trace/events/gfpflags.h" 1
# 10 "include/trace/events/kmem.h" 2
# 43 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_kmalloc(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) { } static inline __attribute__((always_inline)) int register_trace_kmalloc(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kmalloc(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kmalloc(void (*cb)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { }
;
static inline __attribute__((always_inline)) void trace_kmem_cache_alloc(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) { } static inline __attribute__((always_inline)) int register_trace_kmem_cache_alloc(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_alloc(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kmem_cache_alloc(void (*cb)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags)) { }
;
# 97 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_kmalloc_node(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { } static inline __attribute__((always_inline)) int register_trace_kmalloc_node(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kmalloc_node(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kmalloc_node(void (*cb)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { }
;
static inline __attribute__((always_inline)) void trace_kmem_cache_alloc_node(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) { } static inline __attribute__((always_inline)) int register_trace_kmem_cache_alloc_node(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_alloc_node(void (*probe)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kmem_cache_alloc_node(void (*cb)(void *__data, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node)) { }
;
# 134 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_kfree(unsigned long call_site, const void *ptr) { } static inline __attribute__((always_inline)) int register_trace_kfree(void (*probe)(void *__data, unsigned long call_site, const void *ptr), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kfree(void (*probe)(void *__data, unsigned long call_site, const void *ptr), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kfree(void (*cb)(void *__data, unsigned long call_site, const void *ptr)) { }
;
static inline __attribute__((always_inline)) void trace_kmem_cache_free(unsigned long call_site, const void *ptr) { } static inline __attribute__((always_inline)) int register_trace_kmem_cache_free(void (*probe)(void *__data, unsigned long call_site, const void *ptr), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_kmem_cache_free(void (*probe)(void *__data, unsigned long call_site, const void *ptr), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_kmem_cache_free(void (*cb)(void *__data, unsigned long call_site, const void *ptr)) { }
;
static inline __attribute__((always_inline)) void trace_mm_page_free_direct(struct page *page, unsigned int order) { } static inline __attribute__((always_inline)) int register_trace_mm_page_free_direct(void (*probe)(void *__data, struct page *page, unsigned int order), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_page_free_direct(void (*probe)(void *__data, struct page *page, unsigned int order), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_page_free_direct(void (*cb)(void *__data, struct page *page, unsigned int order)) { }
# 170 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_mm_pagevec_free(struct page *page, int cold) { } static inline __attribute__((always_inline)) int register_trace_mm_pagevec_free(void (*probe)(void *__data, struct page *page, int cold), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_pagevec_free(void (*probe)(void *__data, struct page *page, int cold), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_pagevec_free(void (*cb)(void *__data, struct page *page, int cold)) { }
# 192 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_mm_page_alloc(struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype) { } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc(void (*probe)(void *__data, struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc(void (*probe)(void *__data, struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_page_alloc(void (*cb)(void *__data, struct page *page, unsigned int order, gfp_t gfp_flags, int migratetype)) { }
# 221 "include/trace/events/kmem.h"
;
# 247 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_mm_page_alloc_zone_locked(struct page *page, unsigned int order, int migratetype) { } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc_zone_locked(void (*probe)(void *__data, struct page *page, unsigned int order, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc_zone_locked(void (*probe)(void *__data, struct page *page, unsigned int order, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_page_alloc_zone_locked(void (*cb)(void *__data, struct page *page, unsigned int order, int migratetype)) { }
;
static inline __attribute__((always_inline)) void trace_mm_page_pcpu_drain(struct page *page, unsigned int order, int migratetype) { } static inline __attribute__((always_inline)) int register_trace_mm_page_pcpu_drain(void (*probe)(void *__data, struct page *page, unsigned int order, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_page_pcpu_drain(void (*probe)(void *__data, struct page *page, unsigned int order, int migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_page_pcpu_drain(void (*cb)(void *__data, struct page *page, unsigned int order, int migratetype)) { }
# 265 "include/trace/events/kmem.h"
;
static inline __attribute__((always_inline)) void trace_mm_page_alloc_extfrag(struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype) { } static inline __attribute__((always_inline)) int register_trace_mm_page_alloc_extfrag(void (*probe)(void *__data, struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) int unregister_trace_mm_page_alloc_extfrag(void (*probe)(void *__data, struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype), void *data) { return -89; } static inline __attribute__((always_inline)) void check_trace_callback_type_mm_page_alloc_extfrag(void (*cb)(void *__data, struct page *page, int alloc_order, int fallback_order, int alloc_migratetype, int fallback_migratetype)) { }
# 303 "include/trace/events/kmem.h"
;
# 1 "include/trace/define_trace.h" 1
# 308 "include/trace/events/kmem.h" 2
# 16 "include/linux/slub_def.h" 2
enum stat_item {
ALLOC_FASTPATH,
ALLOC_SLOWPATH,
FREE_FASTPATH,
FREE_SLOWPATH,
FREE_FROZEN,
FREE_ADD_PARTIAL,
FREE_REMOVE_PARTIAL,
ALLOC_FROM_PARTIAL,
ALLOC_SLAB,
ALLOC_REFILL,
FREE_SLAB,
CPUSLAB_FLUSH,
DEACTIVATE_FULL,
DEACTIVATE_EMPTY,
DEACTIVATE_TO_HEAD,
DEACTIVATE_TO_TAIL,
DEACTIVATE_REMOTE_FREES,
ORDER_FALLBACK,
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
void **freelist;
struct page *page;
int node;
};
struct kmem_cache_node {
spinlock_t list_lock;
unsigned long nr_partial;
struct list_head partial;
};
struct kmem_cache_order_objects {
unsigned long x;
};
struct kmem_cache {
struct kmem_cache_cpu *cpu_slab;
unsigned long flags;
int size;
int objsize;
int offset;
struct kmem_cache_order_objects oo;
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags;
int refcount;
void (*ctor)(void *);
int inuse;
int align;
unsigned long min_partial;
const char *name;
struct list_head list;
# 102 "include/linux/slub_def.h"
struct kmem_cache_node local_node;
};
# 154 "include/linux/slub_def.h"
extern struct kmem_cache kmalloc_caches[(12 + 2)];
static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size)
{
if (!size)
return 0;
if (size <= 128)
return ( __builtin_constant_p(128) ? ( (128) < 1 ? ____ilog2_NaN() : (128) & (1ULL << 63) ? 63 : (128) & (1ULL << 62) ? 62 : (128) & (1ULL << 61) ? 61 : (128) & (1ULL << 60) ? 60 : (128) & (1ULL << 59) ? 59 : (128) & (1ULL << 58) ? 58 : (128) & (1ULL << 57) ? 57 : (128) & (1ULL << 56) ? 56 : (128) & (1ULL << 55) ? 55 : (128) & (1ULL << 54) ? 54 : (128) & (1ULL << 53) ? 53 : (128) & (1ULL << 52) ? 52 : (128) & (1ULL << 51) ? 51 : (128) & (1ULL << 50) ? 50 : (128) & (1ULL << 49) ? 49 : (128) & (1ULL << 48) ? 48 : (128) & (1ULL << 47) ? 47 : (128) & (1ULL << 46) ? 46 : (128) & (1ULL << 45) ? 45 : (128) & (1ULL << 44) ? 44 : (128) & (1ULL << 43) ? 43 : (128) & (1ULL << 42) ? 42 : (128) & (1ULL << 41) ? 41 : (128) & (1ULL << 40) ? 40 : (128) & (1ULL << 39) ? 39 : (128) & (1ULL << 38) ? 38 : (128) & (1ULL << 37) ? 37 : (128) & (1ULL << 36) ? 36 : (128) & (1ULL << 35) ? 35 : (128) & (1ULL << 34) ? 34 : (128) & (1ULL << 33) ? 33 : (128) & (1ULL << 32) ? 32 : (128) & (1ULL << 31) ? 31 : (128) & (1ULL << 30) ? 30 : (128) & (1ULL << 29) ? 29 : (128) & (1ULL << 28) ? 28 : (128) & (1ULL << 27) ? 27 : (128) & (1ULL << 26) ? 26 : (128) & (1ULL << 25) ? 25 : (128) & (1ULL << 24) ? 24 : (128) & (1ULL << 23) ? 23 : (128) & (1ULL << 22) ? 22 : (128) & (1ULL << 21) ? 21 : (128) & (1ULL << 20) ? 20 : (128) & (1ULL << 19) ? 19 : (128) & (1ULL << 18) ? 18 : (128) & (1ULL << 17) ? 17 : (128) & (1ULL << 16) ? 16 : (128) & (1ULL << 15) ? 15 : (128) & (1ULL << 14) ? 14 : (128) & (1ULL << 13) ? 13 : (128) & (1ULL << 12) ? 12 : (128) & (1ULL << 11) ? 11 : (128) & (1ULL << 10) ? 10 : (128) & (1ULL << 9) ? 9 : (128) & (1ULL << 8) ? 8 : (128) & (1ULL << 7) ? 7 : (128) & (1ULL << 6) ? 6 : (128) & (1ULL << 5) ? 5 : (128) & (1ULL << 4) ? 4 : (128) & (1ULL << 3) ? 3 : (128) & (1ULL << 2) ? 2 : (128) & (1ULL << 1) ? 1 : (128) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(128) <= 4) ? __ilog2_u32(128) : __ilog2_u64(128) );
if (128 <= 32 && size > 64 && size <= 96)
return 1;
if (128 <= 64 && size > 128 && size <= 192)
return 2;
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
if (size <= 64) return 6;
if (size <= 128) return 7;
if (size <= 256) return 8;
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
return -1;
# 204 "include/linux/slub_def.h"
}
static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size)
{
int index = kmalloc_index(size);
if (index == 0)
return ((void *)0);
return &kmalloc_caches[index];
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
static inline __attribute__((always_inline)) __attribute__((always_inline)) void *
kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
{
return kmem_cache_alloc(s, gfpflags);
}
static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags)
{
unsigned int order = get_order(size);
void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order);
kmemleak_alloc(ret, size, 1, flags);
trace_kmalloc(({ __label__ __here; __here: (unsigned long)&&__here; }), ret, size, ((1UL) << 12) << order, flags);
return ret;
}
static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags)
{
void *ret;
if (__builtin_constant_p(size)) {
if (size > (2 * ((1UL) << 12)))
return kmalloc_large(size, flags);
if (!(flags & ( gfp_t)0)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
return ((void *)16);
ret = kmem_cache_alloc_notrace(s, flags);
trace_kmalloc(({ __label__ __here; __here: (unsigned long)&&__here; }), ret, size, s->size, flags);
return ret;
}
}
return __kmalloc(size, flags);
}
# 169 "include/linux/slab.h" 2
# 226 "include/linux/slab.h"
static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags)
{
if (size != 0 && n > (~0UL) / size)
return ((void *)0);
return __kmalloc(n * size, flags | (( gfp_t)0x8000u));
}
# 244 "include/linux/slab.h"
static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc(size, flags);
}
static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep,
gfp_t flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
# 273 "include/linux/slab.h"
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
# 311 "include/linux/slab.h"
static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
{
return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u));
}
static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags)
{
return kmalloc(size, flags | (( gfp_t)0x8000u));
}
static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc_node(size, flags | (( gfp_t)0x8000u), node);
}
void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void);
# 10 "include/linux/textsearch.h" 2
struct ts_config;
# 21 "include/linux/textsearch.h"
struct ts_state
{
unsigned int offset;
char cb[40];
};
# 37 "include/linux/textsearch.h"
struct ts_ops
{
const char *name;
struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
unsigned int (*find)(struct ts_config *,
struct ts_state *);
void (*destroy)(struct ts_config *);
void * (*get_pattern)(struct ts_config *);
unsigned int (*get_pattern_len)(struct ts_config *);
struct module *owner;
struct list_head list;
};
# 57 "include/linux/textsearch.h"
struct ts_config
{
struct ts_ops *ops;
int flags;
# 74 "include/linux/textsearch.h"
unsigned int (*get_next_block)(unsigned int consumed,
const u8 **dst,
struct ts_config *conf,
struct ts_state *state);
# 87 "include/linux/textsearch.h"
void (*finish)(struct ts_config *conf,
struct ts_state *state);
};
# 103 "include/linux/textsearch.h"
static inline __attribute__((always_inline)) unsigned int textsearch_next(struct ts_config *conf,
struct ts_state *state)
{
unsigned int ret = conf->ops->find(conf, state);
if (conf->finish)
conf->finish(conf, state);
return ret;
}
# 122 "include/linux/textsearch.h"
static inline __attribute__((always_inline)) unsigned int textsearch_find(struct ts_config *conf,
struct ts_state *state)
{
state->offset = 0;
return textsearch_next(conf, state);
}
static inline __attribute__((always_inline)) void *textsearch_get_pattern(struct ts_config *conf)
{
return conf->ops->get_pattern(conf);
}
static inline __attribute__((always_inline)) unsigned int textsearch_get_pattern_len(struct ts_config *conf)
{
return conf->ops->get_pattern_len(conf);
}
extern int textsearch_register(struct ts_ops *);
extern int textsearch_unregister(struct ts_ops *);
extern struct ts_config *textsearch_prepare(const char *, const void *,
unsigned int, gfp_t, int);
extern void textsearch_destroy(struct ts_config *conf);
extern unsigned int textsearch_find_continuous(struct ts_config *,
struct ts_state *,
const void *, unsigned int);
static inline __attribute__((always_inline)) struct ts_config *alloc_ts_config(size_t payload,
gfp_t gfp_mask)
{
struct ts_config *conf;
conf = kzalloc((((sizeof(*conf)) + 8 -1) & ~(8 -1)) + payload, gfp_mask);
if (conf == ((void *)0))
return ERR_PTR(-12);
return conf;
}
static inline __attribute__((always_inline)) void *ts_config_priv(struct ts_config *conf)
{
return ((u8 *) conf + (((sizeof(struct ts_config)) + 8 -1) & ~(8 -1)));
}
# 28 "include/linux/skbuff.h" 2
# 1 "include/net/checksum.h" 1
# 26 "include/net/checksum.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h" 1
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h"
# 1 "include/linux/in6.h" 1
# 30 "include/linux/in6.h"
struct in6_addr {
union {
__u8 u6_addr8[16];
__be16 u6_addr16[8];
__be32 u6_addr32[4];
} in6_u;
};
extern const struct in6_addr in6addr_any;
extern const struct in6_addr in6addr_loopback;
extern const struct in6_addr in6addr_linklocal_allnodes;
extern const struct in6_addr in6addr_linklocal_allrouters;
struct sockaddr_in6 {
unsigned short int sin6_family;
__be16 sin6_port;
__be32 sin6_flowinfo;
struct in6_addr sin6_addr;
__u32 sin6_scope_id;
};
struct ipv6_mreq {
struct in6_addr ipv6mr_multiaddr;
int ipv6mr_ifindex;
};
struct in6_flowlabel_req {
struct in6_addr flr_dst;
__be32 flr_label;
__u8 flr_action;
__u8 flr_share;
__u16 flr_flags;
__u16 flr_expires;
__u16 flr_linger;
__u32 __flr_pad;
};
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h" 2
# 30 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h"
__wsum csum_partial(const void *buff, int len, __wsum sum);
__wsum __csum_partial_copy_user(const void *src, void *dst,
int len, __wsum sum, int *err_ptr);
static inline __attribute__((always_inline))
__wsum csum_partial_copy_from_user(const void *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
return __csum_partial_copy_user(( void *)src, dst,
len, sum, err_ptr);
}
static inline __attribute__((always_inline))
__wsum csum_and_copy_to_user(const void *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_fault();
if (__builtin_expect(!!(({ unsigned long __addr = (unsigned long) ((dst)); unsigned long __size = (len); unsigned long __mask = (__current_thread_info->addr_limit).seg; unsigned long __ok; (void)0; __ok = (signed long)(__mask & (__addr | (__addr + __size) | ((__builtin_constant_p(__size) && (signed long) (__size) > 0) ? 0 : (__size)))); __ok == 0; })), 1))
return __csum_partial_copy_user(src, ( void *)dst,
len, sum, err_ptr);
if (len)
*err_ptr = -14;
return ( __wsum)-1;
}
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
static inline __attribute__((always_inline)) __sum16 csum_fold(__wsum sum)
{
__asm__(
" .set push # csum_fold\n"
" .set noat \n"
" sll $1, %0, 16 \n"
" addu %0, $1 \n"
" sltu $1, %0, $1 \n"
" srl %0, %0, 16 \n"
" addu %0, $1 \n"
" xori %0, 0xffff \n"
" .set pop"
: "=r" (sum)
: "0" (sum));
return ( __sum16)sum;
}
# 101 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h"
static inline __attribute__((always_inline)) __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
const unsigned int *word = iph;
const unsigned int *stop = word + ihl;
unsigned int csum;
int carry;
csum = word[0];
csum += word[1];
carry = (csum < word[1]);
csum += carry;
csum += word[2];
carry = (csum < word[2]);
csum += carry;
csum += word[3];
carry = (csum < word[3]);
csum += carry;
word += 4;
do {
csum += *word;
carry = (csum < *word);
csum += carry;
word++;
} while (word != stop);
return csum_fold(csum);
}
static inline __attribute__((always_inline)) __wsum csum_tcpudp_nofold(__be32 saddr,
__be32 daddr, unsigned short len, unsigned short proto,
__wsum sum)
{
__asm__(
" .set push # csum_tcpudp_nofold\n"
" .set noat \n"
" addu %0, %2 \n"
" sltu $1, %0, %2 \n"
" addu %0, $1 \n"
" addu %0, %3 \n"
" sltu $1, %0, %3 \n"
" addu %0, $1 \n"
" addu %0, %4 \n"
" sltu $1, %0, %4 \n"
" addu %0, $1 \n"
# 160 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/checksum.h"
" .set pop"
: "=r" (sum)
: "0" (( unsigned long)daddr),
"r" (( unsigned long)saddr),
"r" (proto + len),
"r" (( unsigned long)sum));
return sum;
}
static inline __attribute__((always_inline)) __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
static inline __attribute__((always_inline)) __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
static __inline__ __attribute__((always_inline)) __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
__asm__(
" .set push # csum_ipv6_magic\n"
" .set noreorder \n"
" .set noat \n"
" addu %0, %5 # proto (long in network byte order)\n"
" sltu $1, %0, %5 \n"
" addu %0, $1 \n"
" addu %0, %6 # csum\n"
" sltu $1, %0, %6 \n"
" lw %1, 0(%2) # four words source address\n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 4(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 8(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 12(%2) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 0(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 4(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 8(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" lw %1, 12(%3) \n"
" addu %0, $1 \n"
" addu %0, %1 \n"
" sltu $1, %0, %1 \n"
" addu %0, $1 # Add final carry\n"
" .set pop"
: "=r" (sum), "=r" (proto)
: "r" (saddr), "r" (daddr),
"0" ((( __be32)(__u32)(len))), "1" ((( __be32)(__u32)(proto))), "r" (sum));
return csum_fold(sum);
}
# 27 "include/net/checksum.h" 2
static inline __attribute__((always_inline))
__wsum csum_and_copy_from_user (const void *src, void *dst,
int len, __wsum sum, int *err_ptr)
{
if (__builtin_expect(!!(({ unsigned long __addr = (unsigned long) ((src)); unsigned long __size = (len); unsigned long __mask = (__current_thread_info->addr_limit).seg; unsigned long __ok; (void)0; __ok = (signed long)(__mask & (__addr | (__addr + __size) | ((__builtin_constant_p(__size) && (signed long) (__size) > 0) ? 0 : (__size)))); __ok == 0; })), 1))
return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
if (len)
*err_ptr = -14;
return sum;
}
# 60 "include/net/checksum.h"
static inline __attribute__((always_inline)) __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = ( u32)csum;
res += ( u32)addend;
return ( __wsum)(res + (res < ( u32)addend));
}
static inline __attribute__((always_inline)) __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
static inline __attribute__((always_inline)) __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
u32 sum = ( u32)csum2;
if (offset&1)
sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
return csum_add(csum, ( __wsum)sum);
}
static inline __attribute__((always_inline)) __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
u32 sum = ( u32)csum2;
if (offset&1)
sum = ((sum&0xFF00FF)<<8)+((sum>>8)&0xFF00FF);
return csum_sub(csum, ( __wsum)sum);
}
static inline __attribute__((always_inline)) __wsum csum_unfold(__sum16 n)
{
return ( __wsum)n;
}
static inline __attribute__((always_inline)) void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__be32 diff[] = { ~from, to };
*sum = csum_fold(csum_partial(diff, sizeof(diff), ~csum_unfold(*sum)));
}
static inline __attribute__((always_inline)) void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
{
csum_replace4(sum, ( __be32)from, ( __be32)to);
}
struct sk_buff;
extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, int pseudohdr);
static inline __attribute__((always_inline)) void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
int pseudohdr)
{
inet_proto_csum_replace4(sum, skb, ( __be32)from,
( __be32)to, pseudohdr);
}
# 29 "include/linux/skbuff.h" 2
# 1 "include/linux/dmaengine.h" 1
# 24 "include/linux/dmaengine.h"
# 1 "include/linux/device.h" 1
# 16 "include/linux/device.h"
# 1 "include/linux/ioport.h" 1
# 18 "include/linux/ioport.h"
struct resource {
resource_size_t start;
resource_size_t end;
const char *name;
unsigned long flags;
struct resource *parent, *sibling, *child;
};
struct resource_list {
struct resource_list *next;
struct resource *res;
struct pci_dev *dev;
};
# 113 "include/linux/ioport.h"
extern struct resource ioport_resource;
extern struct resource iomem_resource;
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline __attribute__((always_inline)) resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
static inline __attribute__((always_inline)) unsigned long resource_type(const struct resource *res)
{
return res->flags & 0x00001f00;
}
# 155 "include/linux/ioport.h"
extern struct resource * __request_region(struct resource *,
resource_size_t start,
resource_size_t n,
const char *name, int flags);
extern int __check_region(struct resource *, resource_size_t, resource_size_t);
extern void __release_region(struct resource *, resource_size_t,
resource_size_t);
static inline __attribute__((always_inline)) int __attribute__((deprecated)) check_region(resource_size_t s,
resource_size_t n)
{
return __check_region(&ioport_resource, s, n);
}
struct device;
extern struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name);
extern void __devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n);
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
extern int iomem_is_exclusive(u64 addr);
extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *));
# 17 "include/linux/device.h" 2
# 1 "include/linux/klist.h" 1
# 19 "include/linux/klist.h"
struct klist_node;
struct klist {
spinlock_t k_lock;
struct list_head k_list;
void (*get)(struct klist_node *);
void (*put)(struct klist_node *);
} __attribute__ ((aligned (4)));
# 36 "include/linux/klist.h"
extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *));
struct klist_node {
void *n_klist;
struct list_head n_node;
struct kref n_ref;
};
extern void klist_add_tail(struct klist_node *n, struct klist *k);
extern void klist_add_head(struct klist_node *n, struct klist *k);
extern void klist_add_after(struct klist_node *n, struct klist_node *pos);
extern void klist_add_before(struct klist_node *n, struct klist_node *pos);
extern void klist_del(struct klist_node *n);
extern void klist_remove(struct klist_node *n);
extern int klist_node_attached(struct klist_node *n);
struct klist_iter {
struct klist *i_klist;
struct klist_node *i_cur;
};
extern void klist_iter_init(struct klist *k, struct klist_iter *i);
extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
struct klist_node *n);
extern void klist_iter_exit(struct klist_iter *i);
extern struct klist_node *klist_next(struct klist_iter *i);
# 19 "include/linux/device.h" 2
# 1 "include/linux/pm.h" 1
# 34 "include/linux/pm.h"
extern void (*pm_idle)(void);
extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void);
struct device;
typedef struct pm_message {
int event;
} pm_message_t;
# 198 "include/linux/pm.h"
struct dev_pm_ops {
int (*prepare)(struct device *dev);
void (*complete)(struct device *dev);
int (*suspend)(struct device *dev);
int (*resume)(struct device *dev);
int (*freeze)(struct device *dev);
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
int (*thaw_noirq)(struct device *dev);
int (*poweroff_noirq)(struct device *dev);
int (*restore_noirq)(struct device *dev);
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
};
# 392 "include/linux/pm.h"
enum dpm_state {
DPM_INVALID,
DPM_ON,
DPM_PREPARING,
DPM_RESUMING,
DPM_SUSPENDING,
DPM_OFF,
DPM_OFF_IRQ,
};
# 425 "include/linux/pm.h"
enum rpm_status {
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
};
# 444 "include/linux/pm.h"
enum rpm_request {
RPM_REQ_NONE = 0,
RPM_REQ_IDLE,
RPM_REQ_SUSPEND,
RPM_REQ_RESUME,
};
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int should_wakeup:1;
unsigned async_suspend:1;
enum dpm_state status;
# 484 "include/linux/pm.h"
};
extern void update_pm_runtime_accounting(struct device *dev);
# 572 "include/linux/pm.h"
static inline __attribute__((always_inline)) int dpm_suspend_start(pm_message_t state)
{
return 0;
}
static inline __attribute__((always_inline)) void device_pm_wait_for_dev(struct device *a, struct device *b) {}
static inline __attribute__((always_inline)) void pm_wakeup_event(struct device *dev, unsigned int msec) {}
static inline __attribute__((always_inline)) void pm_stay_awake(struct device *dev) {}
static inline __attribute__((always_inline)) void pm_relax(void) {}
enum dpm_order {
DPM_ORDER_NONE,
DPM_ORDER_DEV_AFTER_PARENT,
DPM_ORDER_PARENT_BEFORE_DEV,
DPM_ORDER_DEV_LAST,
};
extern unsigned int pm_flags;
# 25 "include/linux/device.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/device.h" 1
# 1 "include/asm-generic/device.h" 1
# 9 "include/asm-generic/device.h"
struct dev_archdata {
};
struct pdev_archdata {
};
# 6 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/device.h" 2
# 27 "include/linux/device.h" 2
struct device;
struct device_private;
struct device_driver;
struct driver_private;
struct class;
struct class_private;
struct bus_type;
struct bus_type_private;
struct device_node;
struct bus_attribute {
struct attribute attr;
ssize_t (*show)(struct bus_type *bus, char *buf);
ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
};
extern int __attribute__((warn_unused_result)) bus_create_file(struct bus_type *,
struct bus_attribute *);
extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
struct bus_type {
const char *name;
struct bus_attribute *bus_attrs;
struct device_attribute *dev_attrs;
struct driver_attribute *drv_attrs;
int (*match)(struct device *dev, struct device_driver *drv);
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
int (*probe)(struct device *dev);
int (*remove)(struct device *dev);
void (*shutdown)(struct device *dev);
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
const struct dev_pm_ops *pm;
struct bus_type_private *p;
};
extern int __attribute__((warn_unused_result)) bus_register(struct bus_type *bus);
extern void bus_unregister(struct bus_type *bus);
extern int __attribute__((warn_unused_result)) bus_rescan_devices(struct bus_type *bus);
int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
int (*fn)(struct device *dev, void *data));
struct device *bus_find_device(struct bus_type *bus, struct device *start,
void *data,
int (*match)(struct device *dev, void *data));
struct device *bus_find_device_by_name(struct bus_type *bus,
struct device *start,
const char *name);
int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *));
void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b));
struct notifier_block;
extern int bus_register_notifier(struct bus_type *bus,
struct notifier_block *nb);
extern int bus_unregister_notifier(struct bus_type *bus,
struct notifier_block *nb);
# 120 "include/linux/device.h"
extern struct kset *bus_get_kset(struct bus_type *bus);
extern struct klist *bus_get_device_klist(struct bus_type *bus);
struct device_driver {
const char *name;
struct bus_type *bus;
struct module *owner;
const char *mod_name;
int suppress_bind_attrs;
int (*probe) (struct device *dev);
int (*remove) (struct device *dev);
void (*shutdown) (struct device *dev);
int (*suspend) (struct device *dev, pm_message_t state);
int (*resume) (struct device *dev);
const struct attribute_group **groups;
const struct dev_pm_ops *pm;
struct driver_private *p;
};
extern int __attribute__((warn_unused_result)) driver_register(struct device_driver *drv);
extern void driver_unregister(struct device_driver *drv);
extern struct device_driver *get_driver(struct device_driver *drv);
extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern void wait_for_device_probe(void);
struct driver_attribute {
struct attribute attr;
ssize_t (*show)(struct device_driver *driver, char *buf);
ssize_t (*store)(struct device_driver *driver, const char *buf,
size_t count);
};
extern int __attribute__((warn_unused_result)) driver_create_file(struct device_driver *driver,
const struct driver_attribute *attr);
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
extern int __attribute__((warn_unused_result)) driver_add_kobj(struct device_driver *drv,
struct kobject *kobj,
const char *fmt, ...);
extern int __attribute__((warn_unused_result)) driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,
int (*fn)(struct device *dev,
void *));
struct device *driver_find_device(struct device_driver *drv,
struct device *start, void *data,
int (*match)(struct device *dev, void *data));
struct class {
const char *name;
struct module *owner;
struct class_attribute *class_attrs;
struct device_attribute *dev_attrs;
struct kobject *dev_kobj;
int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(struct device *dev, mode_t *mode);
void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
const struct dev_pm_ops *pm;
struct class_private *p;
};
struct class_dev_iter {
struct klist_iter ki;
const struct device_type *type;
};
extern struct kobject *sysfs_dev_block_kobj;
extern struct kobject *sysfs_dev_char_kobj;
extern int __attribute__((warn_unused_result)) __class_register(struct class *class,
struct lock_class_key *key);
extern void class_unregister(struct class *class);
# 238 "include/linux/device.h"
struct class_compat;
struct class_compat *class_compat_register(const char *name);
void class_compat_unregister(struct class_compat *cls);
int class_compat_create_link(struct class_compat *cls, struct device *dev,
struct device *device_link);
void class_compat_remove_link(struct class_compat *cls, struct device *dev,
struct device *device_link);
extern void class_dev_iter_init(struct class_dev_iter *iter,
struct class *class,
struct device *start,
const struct device_type *type);
extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
extern void class_dev_iter_exit(struct class_dev_iter *iter);
extern int class_for_each_device(struct class *class, struct device *start,
void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *class_find_device(struct class *class,
struct device *start, void *data,
int (*match)(struct device *, void *));
struct class_attribute {
struct attribute attr;
ssize_t (*show)(struct class *class, struct class_attribute *attr,
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
};
extern int __attribute__((warn_unused_result)) class_create_file(struct class *class,
const struct class_attribute *attr);
extern void class_remove_file(struct class *class,
const struct class_attribute *attr);
struct class_attribute_string {
struct class_attribute attr;
char *str;
};
# 290 "include/linux/device.h"
extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
char *buf);
struct class_interface {
struct list_head node;
struct class *class;
int (*add_dev) (struct device *, struct class_interface *);
void (*remove_dev) (struct device *, struct class_interface *);
};
extern int __attribute__((warn_unused_result)) class_interface_register(struct class_interface *);
extern void class_interface_unregister(struct class_interface *);
extern struct class * __attribute__((warn_unused_result)) __class_create(struct module *owner,
const char *name,
struct lock_class_key *key);
extern void class_destroy(struct class *cls);
# 326 "include/linux/device.h"
struct device_type {
const char *name;
const struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
char *(*devnode)(struct device *dev, mode_t *mode);
void (*release)(struct device *dev);
const struct dev_pm_ops *pm;
};
struct device_attribute {
struct attribute attr;
ssize_t (*show)(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t (*store)(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
};
extern int __attribute__((warn_unused_result)) device_create_file(struct device *device,
const struct device_attribute *entry);
extern void device_remove_file(struct device *dev,
const struct device_attribute *attr);
extern int __attribute__((warn_unused_result)) device_create_bin_file(struct device *dev,
const struct bin_attribute *attr);
extern void device_remove_bin_file(struct device *dev,
const struct bin_attribute *attr);
extern int device_schedule_callback_owner(struct device *dev,
void (*func)(struct device *dev), struct module *owner);
typedef void (*dr_release_t)(struct device *dev, void *res);
typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
extern void devres_free(void *res);
extern void devres_add(struct device *dev, void *res);
extern void *devres_find(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
extern void *devres_get(struct device *dev, void *new_res,
dr_match_t match, void *match_data);
extern void *devres_remove(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
extern int devres_destroy(struct device *dev, dr_release_t release,
dr_match_t match, void *match_data);
extern void * __attribute__((warn_unused_result)) devres_open_group(struct device *dev, void *id,
gfp_t gfp);
extern void devres_close_group(struct device *dev, void *id);
extern void devres_remove_group(struct device *dev, void *id);
extern int devres_release_group(struct device *dev, void *id);
extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
extern void devm_kfree(struct device *dev, void *p);
struct device_dma_parameters {
unsigned int max_segment_size;
unsigned long segment_boundary_mask;
};
struct device {
struct device *parent;
struct device_private *p;
struct kobject kobj;
const char *init_name;
struct device_type *type;
struct mutex mutex;
struct bus_type *bus;
struct device_driver *driver;
void *platform_data;
struct dev_pm_info power;
u64 *dma_mask;
u64 coherent_dma_mask;
struct device_dma_parameters *dma_parms;
struct list_head dma_pools;
struct dma_coherent_mem *dma_mem;
struct dev_archdata archdata;
dev_t devt;
spinlock_t devres_lock;
struct list_head devres_head;
struct klist_node knode_class;
struct class *class;
const struct attribute_group **groups;
void (*release)(struct device *dev);
};
# 1 "include/linux/pm_wakeup.h" 1
# 66 "include/linux/pm_wakeup.h"
static inline __attribute__((always_inline)) void device_init_wakeup(struct device *dev, int val)
{
dev->power.can_wakeup = val;
}
static inline __attribute__((always_inline)) void device_set_wakeup_capable(struct device *dev, int capable)
{
dev->power.can_wakeup = capable;
}
static inline __attribute__((always_inline)) int device_can_wakeup(struct device *dev)
{
return dev->power.can_wakeup;
}
static inline __attribute__((always_inline)) void device_set_wakeup_enable(struct device *dev, int enable)
{
}
static inline __attribute__((always_inline)) int device_may_wakeup(struct device *dev)
{
return false;
}
# 462 "include/linux/device.h" 2
static inline __attribute__((always_inline)) const char *dev_name(const struct device *dev)
{
if (dev->init_name)
return dev->init_name;
return kobject_name(&dev->kobj);
}
extern int dev_set_name(struct device *dev, const char *name, ...)
__attribute__((format(printf, 2, 3)));
# 485 "include/linux/device.h"
static inline __attribute__((always_inline)) int dev_to_node(struct device *dev)
{
return -1;
}
static inline __attribute__((always_inline)) void set_dev_node(struct device *dev, int node)
{
}
static inline __attribute__((always_inline)) unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
}
static inline __attribute__((always_inline)) void dev_set_uevent_suppress(struct device *dev, int val)
{
dev->kobj.uevent_suppress = val;
}
static inline __attribute__((always_inline)) int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
}
static inline __attribute__((always_inline)) void device_enable_async_suspend(struct device *dev)
{
if (dev->power.status == DPM_ON)
dev->power.async_suspend = true;
}
static inline __attribute__((always_inline)) void device_disable_async_suspend(struct device *dev)
{
if (dev->power.status == DPM_ON)
dev->power.async_suspend = false;
}
static inline __attribute__((always_inline)) int device_async_suspend_enabled(struct device *dev)
{
return !!dev->power.async_suspend;
}
static inline __attribute__((always_inline)) void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
}
static inline __attribute__((always_inline)) int device_trylock(struct device *dev)
{
return mutex_trylock(&dev->mutex);
}
static inline __attribute__((always_inline)) void device_unlock(struct device *dev)
{
mutex_unlock(&dev->mutex);
}
void driver_init(void);
extern int __attribute__((warn_unused_result)) device_register(struct device *dev);
extern void device_unregister(struct device *dev);
extern void device_initialize(struct device *dev);
extern int __attribute__((warn_unused_result)) device_add(struct device *dev);
extern void device_del(struct device *dev);
extern int device_for_each_child(struct device *dev, void *data,
int (*fn)(struct device *dev, void *data));
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, const char *new_name);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
extern const char *device_get_devnode(struct device *dev,
mode_t *mode, const char **tmp);
extern void *dev_get_drvdata(const struct device *dev);
extern void dev_set_drvdata(struct device *dev, void *data);
extern struct device *__root_device_register(const char *name,
struct module *owner);
static inline __attribute__((always_inline)) struct device *root_device_register(const char *name)
{
return __root_device_register(name, (&__this_module));
}
extern void root_device_unregister(struct device *root);
static inline __attribute__((always_inline)) void *dev_get_platdata(const struct device *dev)
{
return dev->platform_data;
}
extern int __attribute__((warn_unused_result)) device_bind_driver(struct device *dev);
extern void device_release_driver(struct device *dev);
extern int __attribute__((warn_unused_result)) device_attach(struct device *dev);
extern int __attribute__((warn_unused_result)) driver_attach(struct device_driver *drv);
extern int __attribute__((warn_unused_result)) device_reprobe(struct device *dev);
extern struct device *device_create_vargs(struct class *cls,
struct device *parent,
dev_t devt,
void *drvdata,
const char *fmt,
va_list vargs);
extern struct device *device_create(struct class *cls, struct device *parent,
dev_t devt, void *drvdata,
const char *fmt, ...)
__attribute__((format(printf, 5, 6)));
extern void device_destroy(struct class *cls, dev_t devt);
extern int (*platform_notify)(struct device *dev);
extern int (*platform_notify_remove)(struct device *dev);
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
extern void wait_for_device_probe(void);
static inline __attribute__((always_inline)) int devtmpfs_create_node(struct device *dev) { return 0; }
static inline __attribute__((always_inline)) int devtmpfs_delete_node(struct device *dev) { return 0; }
static inline __attribute__((always_inline)) int devtmpfs_mount(const char *mountpoint) { return 0; }
extern void device_shutdown(void);
extern void sysdev_shutdown(void);
extern const char *dev_driver_string(const struct device *dev);
extern int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
extern int dev_emerg(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_alert(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_crit(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_err(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_warn(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int dev_notice(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
extern int _dev_info(const struct device *dev, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
# 25 "include/linux/dmaengine.h" 2
# 1 "include/linux/dma-mapping.h" 1
# 1 "include/linux/dma-attrs.h" 1
# 1 "include/linux/bug.h" 1
enum bug_trap_type {
BUG_TRAP_TYPE_NONE = 0,
BUG_TRAP_TYPE_WARN = 1,
BUG_TRAP_TYPE_BUG = 2,
};
struct pt_regs;
# 31 "include/linux/bug.h"
static inline __attribute__((always_inline)) enum bug_trap_type report_bug(unsigned long bug_addr,
struct pt_regs *regs)
{
return BUG_TRAP_TYPE_BUG;
}
# 7 "include/linux/dma-attrs.h" 2
enum dma_attr {
DMA_ATTR_WRITE_BARRIER,
DMA_ATTR_WEAK_ORDERING,
DMA_ATTR_MAX,
};
struct dma_attrs {
unsigned long flags[(((DMA_ATTR_MAX) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))];
};
static inline __attribute__((always_inline)) void init_dma_attrs(struct dma_attrs *attrs)
{
bitmap_zero(attrs->flags, (((DMA_ATTR_MAX) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))));
}
# 66 "include/linux/dma-attrs.h"
static inline __attribute__((always_inline)) void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
}
static inline __attribute__((always_inline)) int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
{
return 0;
}
# 7 "include/linux/dma-mapping.h" 2
# 1 "include/linux/scatterlist.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/scatterlist.h" 1
# 1 "include/asm-generic/scatterlist.h" 1
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
};
# 5 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/scatterlist.h" 2
# 6 "include/linux/scatterlist.h" 2
# 1 "include/linux/mm.h" 1
# 13 "include/linux/mm.h"
# 1 "include/linux/debug_locks.h" 1
struct task_struct;
extern int debug_locks;
extern int debug_locks_silent;
static inline __attribute__((always_inline)) int __debug_locks_off(void)
{
return ({ ((void)(sizeof(struct { int:-!!(sizeof(*(&debug_locks)) & ~0xc); }))); ((__typeof__(*(&debug_locks))) __xchg((unsigned long)(0), (&debug_locks), sizeof(*(&debug_locks)))); });
}
extern int debug_locks_off(void);
# 48 "include/linux/debug_locks.h"
struct task_struct;
# 57 "include/linux/debug_locks.h"
static inline __attribute__((always_inline)) void debug_show_all_locks(void)
{
}
static inline __attribute__((always_inline)) void __debug_show_held_locks(struct task_struct *task)
{
}
static inline __attribute__((always_inline)) void debug_show_held_locks(struct task_struct *task)
{
}
static inline __attribute__((always_inline)) void
debug_check_no_locks_freed(const void *from, unsigned long len)
{
}
static inline __attribute__((always_inline)) void
debug_check_no_locks_held(struct task_struct *task)
{
}
# 14 "include/linux/mm.h" 2
# 1 "include/linux/range.h" 1
struct range {
u64 start;
u64 end;
};
int add_range(struct range *range, int az, int nr_range,
u64 start, u64 end);
int add_range_with_merge(struct range *range, int az, int nr_range,
u64 start, u64 end);
void subtract_range(struct range *range, int az, u64 start, u64 end);
int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
static inline __attribute__((always_inline)) resource_size_t cap_resource(u64 val)
{
if (val > ((resource_size_t)~0))
return ((resource_size_t)~0);
return val;
}
# 16 "include/linux/mm.h" 2
struct mempolicy;
struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
extern unsigned long max_mapnr;
extern unsigned long num_physpages;
extern unsigned long totalram_pages;
extern void * high_memory;
extern int page_cluster;
extern int sysctl_legacy_va_layout;
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fixmap.h" 1
# 47 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fixmap.h"
enum fixed_addresses {
FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + (8 * 4 * 2),
# 60 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fixmap.h"
__end_of_fixed_addresses
};
# 85 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fixmap.h"
extern void __this_fixmap_does_not_exist(void);
static inline __attribute__((always_inline)) unsigned long fix_to_virt(const unsigned int idx)
{
# 103 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/fixmap.h"
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return (((unsigned long)(long)(int)0xfffe0000) - ((idx) << 12));
}
static inline __attribute__((always_inline)) unsigned long virt_to_fix(const unsigned long vaddr)
{
__BUG_ON((unsigned long)(vaddr >= ((unsigned long)(long)(int)0xfffe0000) || vaddr < (((unsigned long)(long)(int)0xfffe0000) - (__end_of_fixed_addresses << 12))));
return ((((unsigned long)(long)(int)0xfffe0000) - ((vaddr)&(~((1 << 12) - 1)))) >> 12);
}
extern void fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base);
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h" 2
# 1 "include/asm-generic/pgtable-nopmd.h" 1
# 1 "include/asm-generic/pgtable-nopud.h" 1
# 13 "include/asm-generic/pgtable-nopud.h"
typedef struct { pgd_t pgd; } pud_t;
# 25 "include/asm-generic/pgtable-nopud.h"
static inline __attribute__((always_inline)) int pgd_none(pgd_t pgd) { return 0; }
static inline __attribute__((always_inline)) int pgd_bad(pgd_t pgd) { return 0; }
static inline __attribute__((always_inline)) int pgd_present(pgd_t pgd) { return 1; }
static inline __attribute__((always_inline)) void pgd_clear(pgd_t *pgd) { }
# 38 "include/asm-generic/pgtable-nopud.h"
static inline __attribute__((always_inline)) pud_t * pud_offset(pgd_t * pgd, unsigned long address)
{
return (pud_t *)pgd;
}
# 7 "include/asm-generic/pgtable-nopmd.h" 2
struct mm_struct;
# 17 "include/asm-generic/pgtable-nopmd.h"
typedef struct { pud_t pud; } pmd_t;
# 29 "include/asm-generic/pgtable-nopmd.h"
static inline __attribute__((always_inline)) int pud_none(pud_t pud) { return 0; }
static inline __attribute__((always_inline)) int pud_bad(pud_t pud) { return 0; }
static inline __attribute__((always_inline)) int pud_present(pud_t pud) { return 1; }
static inline __attribute__((always_inline)) void pud_clear(pud_t *pud) { }
# 43 "include/asm-generic/pgtable-nopmd.h"
static inline __attribute__((always_inline)) pmd_t * pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *)pud;
}
# 59 "include/asm-generic/pgtable-nopmd.h"
static inline __attribute__((always_inline)) void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
# 20 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h" 2
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
# 34 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h"
extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
# 86 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable-32.h"
extern void load_pgd(unsigned long pg_dir);
extern pte_t invalid_pte_table[((1UL) << 12)/sizeof(pte_t)];
static inline __attribute__((always_inline)) int pmd_none(pmd_t pmd)
{
return ((((((pmd).pud).pgd).pgd))) == (unsigned long) invalid_pte_table;
}
static inline __attribute__((always_inline)) int pmd_present(pmd_t pmd)
{
return ((((((pmd).pud).pgd).pgd))) != (unsigned long) invalid_pte_table;
}
static inline __attribute__((always_inline)) void pmd_clear(pmd_t *pmdp)
{
((((((*pmdp).pud).pgd).pgd))) = ((unsigned long) invalid_pte_table);
}
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h" 2
# 21 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
struct mm_struct;
struct vm_area_struct;
# 67 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
extern unsigned long _page_cachable_default;
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
static inline __attribute__((always_inline)) int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
unsigned long offset_from_zero_pfn = pfn - zero_pfn;
return offset_from_zero_pfn <= (zero_page_mask >> 12);
}
extern void paging_init(void);
# 146 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
static inline __attribute__((always_inline)) void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
if (((pteval).pte) & (1 << ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1))) {
pte_t *buddy = ((pte_t *)((unsigned long)(ptep) ^ sizeof(pte_t)));
if ((!(((*buddy).pte) & ~(1 << ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1)))))
((*buddy).pte) = ((*buddy).pte) | (1 << ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1));
}
}
static inline __attribute__((always_inline)) void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
if (((*((pte_t *)((unsigned long)(ptep) ^ sizeof(pte_t)))).pte) & (1 << ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1)))
set_pte(ptep, ((pte_t) { ((1 << ((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1))) } ));
else
set_pte(ptep, ((pte_t) { (0) } ));
}
# 197 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
extern pgd_t swapper_pg_dir[];
# 260 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
static inline __attribute__((always_inline)) int pte_write(pte_t pte) { return ((pte).pte) & (1 << ((0 ? (0) : (0) + 1) + 1)); }
static inline __attribute__((always_inline)) int pte_dirty(pte_t pte) { return ((pte).pte) & (1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1)); }
static inline __attribute__((always_inline)) int pte_young(pte_t pte) { return ((pte).pte) & (1 << (((0 ? (0) : (0) + 1) + 1) + 1)); }
static inline __attribute__((always_inline)) int pte_file(pte_t pte) { return ((pte).pte) & ((1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1))); }
static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t pte)
{
((pte).pte) &= ~((1 << ((0 ? (0) : (0) + 1) + 1)) | ((1 << ((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1))));
return pte;
}
static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t pte)
{
((pte).pte) &= ~((1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1))|((1 << ((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1))));
return pte;
}
static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t pte)
{
((pte).pte) &= ~((1 << (((0 ? (0) : (0) + 1) + 1) + 1))|((1 << (((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1))));
return pte;
}
static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t pte)
{
((pte).pte) |= (1 << ((0 ? (0) : (0) + 1) + 1));
if (((pte).pte) & (1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))
((pte).pte) |= ((1 << ((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1)));
return pte;
}
static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t pte)
{
((pte).pte) |= (1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1));
if (((pte).pte) & (1 << ((0 ? (0) : (0) + 1) + 1)))
((pte).pte) |= ((1 << ((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1)));
return pte;
}
static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t pte)
{
((pte).pte) |= (1 << (((0 ? (0) : (0) + 1) + 1) + 1));
if (0) {
if (!(((pte).pte) & ({if (!0) BUG(); 1 << (0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))); })))
((pte).pte) |= ((1 << (((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1)));
} else {
if (((pte).pte) & ({if (0) BUG(); 1 << (0 ? (0) : (0) + 1); }))
((pte).pte) |= ((1 << (((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1)));
}
return pte;
}
static inline __attribute__((always_inline)) int pte_huge(pte_t pte) { return ((pte).pte) & ({BUG(); 1; }); }
static inline __attribute__((always_inline)) pte_t pte_mkhuge(pte_t pte)
{
((pte).pte) |= ({BUG(); 1; });
return pte;
}
static inline __attribute__((always_inline)) int pte_special(pte_t pte) { return 0; }
static inline __attribute__((always_inline)) pte_t pte_mkspecial(pte_t pte) { return pte; }
# 333 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
static inline __attribute__((always_inline)) pgprot_t pgprot_noncached(pgprot_t _prot)
{
unsigned long prot = ((_prot).pgprot);
prot = (prot & ~(7 << (((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1))) | (2<<(((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1));
return ((pgprot_t) { (prot) } );
}
# 358 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
static inline __attribute__((always_inline)) pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return ((pte_t) { ((((pte).pte) & ((~((1 << ((12 - 12 + (((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1) + 3))) - 1)) | (1 << (((0 ? (0) : (0) + 1) + 1) + 1)) | (1 << ((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) | (7 << (((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1)))) | ((newprot).pgprot)) } );
}
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
static inline __attribute__((always_inline)) void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
# 397 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h"
# 1 "include/asm-generic/pgtable.h" 1
# 94 "include/asm-generic/pgtable.h"
struct mm_struct;
static inline __attribute__((always_inline)) void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
{
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
}
# 170 "include/asm-generic/pgtable.h"
void pgd_clear_bad(pgd_t *);
void pud_clear_bad(pud_t *);
void pmd_clear_bad(pmd_t *);
static inline __attribute__((always_inline)) int pgd_none_or_clear_bad(pgd_t *pgd)
{
if (pgd_none(*pgd))
return 1;
if (__builtin_expect(!!(pgd_bad(*pgd)), 0)) {
pgd_clear_bad(pgd);
return 1;
}
return 0;
}
static inline __attribute__((always_inline)) int pud_none_or_clear_bad(pud_t *pud)
{
if (pud_none(*pud))
return 1;
if (__builtin_expect(!!(pud_bad(*pud)), 0)) {
pud_clear_bad(pud);
return 1;
}
return 0;
}
static inline __attribute__((always_inline)) int pmd_none_or_clear_bad(pmd_t *pmd)
{
if (pmd_none(*pmd))
return 1;
if (__builtin_expect(!!((((((((*pmd).pud).pgd).pgd))) & ~(~((1 << 12) - 1)))), 0)) {
pmd_clear_bad(pmd);
return 1;
}
return 0;
}
static inline __attribute__((always_inline)) pte_t __ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
return ({ pte_t __pte = *(ptep); pte_clear((mm), (addr), (ptep)); __pte; });
}
static inline __attribute__((always_inline)) void __ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
# 245 "include/asm-generic/pgtable.h"
static inline __attribute__((always_inline)) pte_t ptep_modify_prot_start(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep)
{
return __ptep_modify_prot_start(mm, addr, ptep);
}
static inline __attribute__((always_inline)) void ptep_modify_prot_commit(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, pte_t pte)
{
__ptep_modify_prot_commit(mm, addr, ptep, pte);
}
# 309 "include/asm-generic/pgtable.h"
static inline __attribute__((always_inline)) int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size)
{
return 0;
}
# 322 "include/asm-generic/pgtable.h"
static inline __attribute__((always_inline)) int track_pfn_vma_copy(struct vm_area_struct *vma)
{
return 0;
}
# 335 "include/asm-generic/pgtable.h"
static inline __attribute__((always_inline)) void untrack_pfn_vma(struct vm_area_struct *vma,
unsigned long pfn, unsigned long size)
{
}
# 398 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/pgtable.h" 2
# 41 "include/linux/mm.h" 2
# 57 "include/linux/mm.h"
extern struct kmem_cache *vm_area_cachep;
# 142 "include/linux/mm.h"
extern pgprot_t protection_map[16];
# 156 "include/linux/mm.h"
static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
return (vma->vm_flags & 0x40000000);
}
static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma)
{
return (vma->vm_flags & 0x00000400);
}
# 175 "include/linux/mm.h"
struct vm_fault {
unsigned int flags;
unsigned long pgoff;
void *virtual_address;
struct page *page;
};
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
# 231 "include/linux/mm.h"
};
struct mmu_gather;
struct inode;
# 243 "include/linux/mm.h"
# 1 "include/linux/page-flags.h" 1
# 75 "include/linux/page-flags.h"
enum pageflags {
PG_locked,
PG_error,
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
PG_active,
PG_slab,
PG_owner_priv_1,
PG_arch_1,
PG_reserved,
PG_private,
PG_private_2,
PG_writeback,
PG_head,
PG_tail,
PG_swapcache,
PG_mappedtodisk,
PG_reclaim,
PG_buddy,
PG_swapbacked,
PG_unevictable,
PG_mlocked,
__NR_PAGEFLAGS,
PG_checked = PG_owner_priv_1,
PG_fscache = PG_private_2,
PG_pinned = PG_owner_priv_1,
PG_savepinned = PG_dirty,
PG_slob_free = PG_private,
PG_slub_frozen = PG_active,
};
# 198 "include/linux/page-flags.h"
struct page;
static inline __attribute__((always_inline)) int PageLocked(struct page *page) { return test_bit(PG_locked, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageLocked(struct page *page) { return test_and_set_bit(PG_locked, &page->flags); }
static inline __attribute__((always_inline)) int PageError(struct page *page) { return test_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { set_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { clear_bit(PG_error, &page->flags); }
static inline __attribute__((always_inline)) int PageReferenced(struct page *page) { return test_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { set_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { clear_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return test_and_clear_bit(PG_referenced, &page->flags); }
static inline __attribute__((always_inline)) int PageDirty(struct page *page) { return test_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return test_and_set_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return test_and_clear_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); }
static inline __attribute__((always_inline)) int PageLRU(struct page *page) { return test_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { set_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { clear_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); }
static inline __attribute__((always_inline)) int PageActive(struct page *page) { return test_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { set_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { clear_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); }
static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return test_and_clear_bit(PG_active, &page->flags); }
static inline __attribute__((always_inline)) int PageSlab(struct page *page) { return test_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); }
static inline __attribute__((always_inline)) int PageChecked(struct page *page) { return test_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { set_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { clear_bit(PG_checked, &page->flags); }
static inline __attribute__((always_inline)) int PagePinned(struct page *page) { return test_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { clear_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return test_and_set_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return test_and_clear_bit(PG_pinned, &page->flags); }
static inline __attribute__((always_inline)) int PageSavePinned(struct page *page) { return test_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { set_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { clear_bit(PG_savepinned, &page->flags); };
static inline __attribute__((always_inline)) int PageReserved(struct page *page) { return test_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { set_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { clear_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); }
static inline __attribute__((always_inline)) int PageSwapBacked(struct page *page) { return test_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { set_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { clear_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); }
static inline __attribute__((always_inline)) int PageSlobFree(struct page *page) { return test_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); }
static inline __attribute__((always_inline)) int PageSlubFrozen(struct page *page) { return test_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlubFrozen(struct page *page) { __set_bit(PG_slub_frozen, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlubFrozen(struct page *page) { __clear_bit(PG_slub_frozen, &page->flags); }
static inline __attribute__((always_inline)) int PagePrivate(struct page *page) { return test_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { set_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { clear_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); }
static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); }
static inline __attribute__((always_inline)) int PagePrivate2(struct page *page) { return test_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { clear_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return test_and_set_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return test_and_clear_bit(PG_private_2, &page->flags); }
static inline __attribute__((always_inline)) int PageOwnerPriv1(struct page *page) { return test_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { set_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { clear_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return test_and_clear_bit(PG_owner_priv_1, &page->flags); }
static inline __attribute__((always_inline)) int PageWriteback(struct page *page) { return test_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return test_and_set_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return test_and_clear_bit(PG_writeback, &page->flags); }
static inline __attribute__((always_inline)) int PageBuddy(struct page *page) { return test_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) { __set_bit(PG_buddy, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) { __clear_bit(PG_buddy, &page->flags); }
static inline __attribute__((always_inline)) int PageMappedToDisk(struct page *page) { return test_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { set_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { clear_bit(PG_mappedtodisk, &page->flags); }
static inline __attribute__((always_inline)) int PageReclaim(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { clear_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return test_and_clear_bit(PG_reclaim, &page->flags); }
static inline __attribute__((always_inline)) int PageReadahead(struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { set_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { clear_bit(PG_reclaim, &page->flags); }
# 247 "include/linux/page-flags.h"
static inline __attribute__((always_inline)) int PageHighMem(struct page *page) { return 0; }
static inline __attribute__((always_inline)) int PageSwapCache(struct page *page) { return 0; }
static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { }
static inline __attribute__((always_inline)) int PageUnevictable(struct page *page) { return test_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { set_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { clear_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); }
static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return test_and_clear_bit(PG_unevictable, &page->flags); }
static inline __attribute__((always_inline)) int PageMlocked(struct page *page) { return test_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void ClearPageMlocked(struct page *page) { clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &page->flags); }
static inline __attribute__((always_inline)) int TestSetPageMlocked(struct page *page) { return test_and_set_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return test_and_clear_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return __test_and_clear_bit(PG_mlocked, &page->flags); }
# 271 "include/linux/page-flags.h"
static inline __attribute__((always_inline)) int PageUncached(struct page *page) { return 0; }
static inline __attribute__((always_inline)) int PageHWPoison(struct page *page) { return 0; }
u64 stable_page_flags(struct page *page);
static inline __attribute__((always_inline)) int PageUptodate(struct page *page)
{
int ret = test_bit(PG_uptodate, &(page)->flags);
# 297 "include/linux/page-flags.h"
if (ret)
__asm__ __volatile__("": : :"memory");
return ret;
}
static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page)
{
__asm__ __volatile__("": : :"memory");
__set_bit(PG_uptodate, &(page)->flags);
}
static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page)
{
# 323 "include/linux/page-flags.h"
__asm__ __volatile__("": : :"memory");
set_bit(PG_uptodate, &(page)->flags);
}
static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { clear_bit(PG_uptodate, &page->flags); }
extern void cancel_dirty_page(struct page *page, unsigned int account_size);
int test_clear_page_writeback(struct page *page);
int test_set_page_writeback(struct page *page);
static inline __attribute__((always_inline)) void set_page_writeback(struct page *page)
{
test_set_page_writeback(page);
}
# 347 "include/linux/page-flags.h"
static inline __attribute__((always_inline)) int PageHead(struct page *page) { return test_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); }
static inline __attribute__((always_inline)) int PageTail(struct page *page) { return test_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); }
static inline __attribute__((always_inline)) int PageCompound(struct page *page)
{
return page->flags & ((1L << PG_head) | (1L << PG_tail));
}
# 427 "include/linux/page-flags.h"
static inline __attribute__((always_inline)) int page_has_private(struct page *page)
{
return !!(page->flags & (1 << PG_private | 1 << PG_private_2));
}
# 244 "include/linux/mm.h" 2
# 261 "include/linux/mm.h"
static inline __attribute__((always_inline)) int put_page_testzero(struct page *page)
{
do { (void)((*(volatile int *)&(&page->_count)->counter) == 0); } while (0);
return (atomic_sub_return(1, (&page->_count)) == 0);
}
static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page)
{
return atomic_add_unless((&page->_count), 1, 0);
}
extern int page_is_ram(unsigned long pfn);
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x)
{
unsigned long addr = (unsigned long)x;
return addr >= (0xc0000000UL) && addr < ((((unsigned long)(long)(int)0xfffe0000) - (__end_of_fixed_addresses << 12))-2*((1UL) << 12));
}
extern int is_vmalloc_or_module_addr(const void *x);
static inline __attribute__((always_inline)) struct page *compound_head(struct page *page)
{
if (__builtin_expect(!!(PageTail(page)), 0))
return page->first_page;
return page;
}
static inline __attribute__((always_inline)) int page_count(struct page *page)
{
return (*(volatile int *)&(&compound_head(page)->_count)->counter);
}
static inline __attribute__((always_inline)) void get_page(struct page *page)
{
page = compound_head(page);
do { (void)((*(volatile int *)&(&page->_count)->counter) == 0); } while (0);
atomic_add(1, (&page->_count));
}
static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x)
{
struct page *page = (mem_map + ((((virt_to_phys(x)) >> 12)) - ((((0UL)) + ((1UL) << 12)-1) >> 12)));
return compound_head(page);
}
static inline __attribute__((always_inline)) void init_page_count(struct page *page)
{
((&page->_count)->counter = (1));
}
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
int split_free_page(struct page *page);
typedef void compound_page_dtor(struct page *);
static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page,
compound_page_dtor *dtor)
{
page[1].lru.next = (void *)dtor;
}
static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page)
{
return (compound_page_dtor *)page[1].lru.next;
}
static inline __attribute__((always_inline)) int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return (unsigned long)page[1].lru.prev;
}
static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order)
{
page[1].lru.prev = (void *)order;
}
# 522 "include/linux/mm.h"
static inline __attribute__((always_inline)) enum zone_type page_zonenum(struct page *page)
{
return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 1) * (1 != 0))) & ((1UL << 1) - 1);
}
# 535 "include/linux/mm.h"
static inline __attribute__((always_inline)) int page_zone_id(struct page *page)
{
return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 1))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 1)) * ((0 + 1) != 0))) & ((1UL << (0 + 1)) - 1);
}
static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone)
{
return 0;
}
static inline __attribute__((always_inline)) int page_to_nid(struct page *page)
{
return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1);
}
static inline __attribute__((always_inline)) struct zone *page_zone(struct page *page)
{
return &(&contig_page_data)->node_zones[page_zonenum(page)];
}
# 570 "include/linux/mm.h"
static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone)
{
page->flags &= ~(((1UL << 1) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 1) * (1 != 0)));
page->flags |= (zone & ((1UL << 1) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 1) * (1 != 0));
}
static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node)
{
page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)));
page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0));
}
static inline __attribute__((always_inline)) void set_page_section(struct page *page, unsigned long section)
{
page->flags &= ~(((1UL << 0) - 1) << (((sizeof(unsigned long)*8) - 0) * (0 != 0)));
page->flags |= (section & ((1UL << 0) - 1)) << (((sizeof(unsigned long)*8) - 0) * (0 != 0));
}
static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone,
unsigned long node, unsigned long pfn)
{
set_page_zone(page, zone);
set_page_node(page, node);
set_page_section(page, ((pfn) >> 0));
}
# 1 "include/linux/vmstat.h" 1
# 1 "include/linux/mm.h" 1
# 7 "include/linux/vmstat.h" 2
# 31 "include/linux/vmstat.h"
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGALLOC_NORMAL , PGALLOC_MOVABLE,
PGFREE, PGACTIVATE, PGDEACTIVATE,
PGFAULT, PGMAJFAULT,
PGREFILL_NORMAL , PGREFILL_MOVABLE,
PGSTEAL_NORMAL , PGSTEAL_MOVABLE,
PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_MOVABLE,
PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_MOVABLE,
PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
KSWAPD_SKIP_CONGESTION_WAIT,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
UNEVICTABLE_PGCULLED,
UNEVICTABLE_PGSCANNED,
UNEVICTABLE_PGRESCUED,
UNEVICTABLE_PGMLOCKED,
UNEVICTABLE_PGMUNLOCKED,
UNEVICTABLE_PGCLEARED,
UNEVICTABLE_PGSTRANDED,
UNEVICTABLE_MLOCKFREED,
NR_VM_EVENT_ITEMS
};
extern int sysctl_stat_interval;
# 77 "include/linux/vmstat.h"
struct vm_event_state {
unsigned long event[NR_VM_EVENT_ITEMS];
};
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states;
static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item)
{
do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item)
{
do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); do { } while (0); } while (0);break; case 2: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); do { } while (0); } while (0);break; case 4: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); do { } while (0); } while (0);break; case 8: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((1)); do { } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta)
{
do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta)
{
do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); do { } while (0); } while (0);break; case 2: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); do { } while (0); } while (0);break; case 4: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); do { } while (0); } while (0);break; case 8: do { do { } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }) += ((delta)); do { } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0);
}
extern void all_vm_events(unsigned long *);
static inline __attribute__((always_inline)) void vm_events_fold_cpu(int cpu)
{
}
# 143 "include/linux/vmstat.h"
extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone,
enum zone_stat_item item)
{
atomic_long_add(x, &zone->vm_stat[item]);
atomic_long_add(x, &vm_stat[item]);
}
static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item)
{
long x = atomic_long_read(&vm_stat[item]);
if (x < 0)
x = 0;
return x;
}
static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
if (x < 0)
x = 0;
return x;
}
static inline __attribute__((always_inline)) unsigned long zone_page_state_snapshot(struct zone *zone,
enum zone_stat_item item)
{
long x = atomic_long_read(&zone->vm_stat[item]);
int cpu;
for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (cpu_online_mask)), ((cpu)) < nr_cpu_ids;)
x += ({ do { const void *__vpp_verify = (typeof(((zone->pageset))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item];
if (x < 0)
x = 0;
return x;
}
extern unsigned long global_reclaimable_pages(void);
extern unsigned long zone_reclaimable_pages(struct zone *zone);
# 235 "include/linux/vmstat.h"
static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone)
{
memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
}
extern void inc_zone_state(struct zone *, enum zone_stat_item);
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
void __inc_zone_page_state(struct page *, enum zone_stat_item);
void __dec_zone_page_state(struct page *, enum zone_stat_item);
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
void inc_zone_page_state(struct page *, enum zone_stat_item);
void dec_zone_page_state(struct page *, enum zone_stat_item);
extern void inc_zone_state(struct zone *, enum zone_stat_item);
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
extern void dec_zone_state(struct zone *, enum zone_stat_item);
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
void refresh_cpu_vm_stats(int);
# 600 "include/linux/mm.h" 2
static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(struct page *page)
{
return ((void *)((unsigned long)(((phys_addr_t)(((unsigned long)((page) - mem_map) + ((((0UL)) + ((1UL) << 12)-1) >> 12))) << 12)) + ((0x80000000UL) + (0UL)) - (0UL)));
}
# 651 "include/linux/mm.h"
extern struct address_space swapper_space;
static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page)
{
struct address_space *mapping = page->mapping;
do { (void)(PageSlab(page)); } while (0);
if (__builtin_expect(!!(PageSwapCache(page)), 0))
mapping = &swapper_space;
else if (__builtin_expect(!!((unsigned long)mapping & 1), 0))
mapping = ((void *)0);
return mapping;
}
static inline __attribute__((always_inline)) void *page_rmapping(struct page *page)
{
return (void *)((unsigned long)page->mapping & ~(1 | 2));
}
static inline __attribute__((always_inline)) int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & 1) != 0;
}
static inline __attribute__((always_inline)) unsigned long page_index(struct page *page)
{
if (__builtin_expect(!!(PageSwapCache(page)), 0))
return ((page)->private);
return page->index;
}
static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page)
{
((&(page)->_mapcount)->counter = (-1));
}
static inline __attribute__((always_inline)) int page_mapcount(struct page *page)
{
return (*(volatile int *)&(&(page)->_mapcount)->counter) + 1;
}
static inline __attribute__((always_inline)) int page_mapped(struct page *page)
{
return (*(volatile int *)&(&(page)->_mapcount)->counter) >= 0;
}
# 731 "include/linux/mm.h"
extern void pagefault_out_of_memory(void);
extern void show_free_areas(void);
int shmem_lock(struct file *file, int lock, struct user_struct *user);
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
int shmem_zero_setup(struct vm_area_struct *);
# 749 "include/linux/mm.h"
extern int can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
struct zap_details {
struct vm_area_struct *nonlinear_vma;
struct address_space *check_mapping;
unsigned long first_index;
unsigned long last_index;
spinlock_t *i_mmap_lock;
unsigned long truncate_count;
};
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *);
unsigned long unmap_vmas(struct mmu_gather **tlb,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
# 788 "include/linux/mm.h"
struct mm_walk {
int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
int (*hugetlb_entry)(pte_t *, unsigned long,
unsigned long, unsigned long, struct mm_walk *);
struct mm_struct *mm;
void *private;
};
int walk_page_range(unsigned long addr, unsigned long end,
struct mm_walk *walk);
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen)
{
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
extern int vmtruncate(struct inode *inode, loff_t offset);
extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
# 845 "include/linux/mm.h"
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);
int __set_page_dirty_nobuffers(struct page *page);
int __set_page_dirty_no_writeback(struct page *page);
int redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page);
void account_page_dirtied(struct page *page, struct address_space *mapping);
int set_page_dirty(struct page *page);
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);
static inline __attribute__((always_inline)) int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
{
return vma && (vma->vm_end == addr) && (vma->vm_flags & 0x00000100);
}
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len);
extern unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
extern int mprotect_fixup(struct vm_area_struct *vma,
struct vm_area_struct **pprev, unsigned long start,
unsigned long end, unsigned long newflags);
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
# 896 "include/linux/mm.h"
static inline __attribute__((always_inline)) void set_mm_counter(struct mm_struct *mm, int member, long value)
{
atomic_long_set(&mm->rss_stat.count[member], value);
}
unsigned long get_mm_counter(struct mm_struct *mm, int member);
static inline __attribute__((always_inline)) void add_mm_counter(struct mm_struct *mm, int member, long value)
{
atomic_long_add(value, &mm->rss_stat.count[member]);
}
static inline __attribute__((always_inline)) void inc_mm_counter(struct mm_struct *mm, int member)
{
atomic_long_inc(&mm->rss_stat.count[member]);
}
static inline __attribute__((always_inline)) void dec_mm_counter(struct mm_struct *mm, int member)
{
atomic_long_dec(&mm->rss_stat.count[member]);
}
# 950 "include/linux/mm.h"
static inline __attribute__((always_inline)) unsigned long get_mm_rss(struct mm_struct *mm)
{
return get_mm_counter(mm, MM_FILEPAGES) +
get_mm_counter(mm, MM_ANONPAGES);
}
static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) _max2 = (get_mm_rss(mm)); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
}
static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
}
static inline __attribute__((always_inline)) void update_hiwater_rss(struct mm_struct *mm)
{
unsigned long _rss = get_mm_rss(mm);
if ((mm)->hiwater_rss < _rss)
(mm)->hiwater_rss = _rss;
}
static inline __attribute__((always_inline)) void update_hiwater_vm(struct mm_struct *mm)
{
if (mm->hiwater_vm < mm->total_vm)
mm->hiwater_vm = mm->total_vm;
}
static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss,
struct mm_struct *mm)
{
unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
if (*maxrss < hiwater_rss)
*maxrss = hiwater_rss;
}
void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
# 1012 "include/linux/mm.h"
struct shrinker {
int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
int seeks;
struct list_head list;
long nr;
};
extern void register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
int vma_wants_writenotify(struct vm_area_struct *vma);
extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
{
return 0;
}
static inline __attribute__((always_inline)) int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
unsigned long address)
{
return 0;
}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
static inline __attribute__((always_inline)) pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{
return (__builtin_expect(!!(pgd_none(*pgd)), 0) && __pud_alloc(mm, pgd, address))?
((void *)0): pud_offset(pgd, address);
}
static inline __attribute__((always_inline)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
return (__builtin_expect(!!(pud_none(*pud)), 0) && __pmd_alloc(mm, pud, address))?
((void *)0): pmd_offset(pud, address);
}
# 1091 "include/linux/mm.h"
static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page)
{
do { do { spinlock_check(&((page)->ptl)); do { *(&(&((page)->ptl))->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0); } while (0);
inc_zone_page_state(page, NR_PAGETABLE);
}
static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page)
{
((page)->mapping = ((void *)0));
dec_zone_page_state(page, NR_PAGETABLE);
}
# 1129 "include/linux/mm.h"
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
# 1160 "include/linux/mm.h"
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
extern void add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_all_active_ranges(void);
void sort_node_map(void);
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
extern unsigned long find_min_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
int add_from_early_node_map(struct range *range, int az,
int nr_range, int nid);
void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
u64 goal, u64 limit);
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
extern void sparse_memory_present_with_active_regions(int nid);
# 1193 "include/linux/mm.h"
extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) early_pfn_to_nid(unsigned long pfn);
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void);
extern void calculate_zone_inactive_ratio(struct zone *zone);
extern void mem_init(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void);
extern void show_mem(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
extern void setup_per_cpu_pageset(void);
extern void zone_pcp_update(struct zone *zone);
extern atomic_long_t mmap_pages_allocated;
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
struct prio_tree_iter *iter);
static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma,
struct list_head *list)
{
vma->shared.vm_set.parent = ((void *)0);
list_add_tail(&vma->shared.vm_set.list, list);
}
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long pgoff, struct vm_area_struct *insert);
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long,
struct mempolicy *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int split_vma(struct mm_struct *,
struct vm_area_struct *, unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, unsigned long pgoff);
extern void exit_mmap(struct mm_struct *);
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
extern void added_exe_file_vma(struct mm_struct *mm);
extern void removed_exe_file_vma(struct mm_struct *mm);
# 1272 "include/linux/mm.h"
extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
extern int install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
unsigned long flags, struct page **pages);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long pgoff);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
unsigned long len, unsigned long flags,
unsigned int vm_flags, unsigned long pgoff);
static inline __attribute__((always_inline)) unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset)
{
unsigned long ret = -22;
if ((offset + ((((len)) + ((typeof((len)))((((1UL) << 12))) - 1)) & ~((typeof((len)))((((1UL) << 12))) - 1))) < offset)
goto out;
if (!(offset & ~(~((1 << 12) - 1))))
ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
out:
return ret;
}
extern int do_munmap(struct mm_struct *, unsigned long, size_t);
extern unsigned long do_brk(unsigned long, unsigned long);
extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
int write_one_page(struct page *page, int wait);
void task_dirty_inc(struct task_struct *tsk);
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
unsigned long offset, unsigned long nr_to_read);
void page_cache_sync_readahead(struct address_space *mapping,
struct file_ra_state *ra,
struct file *filp,
unsigned long offset,
unsigned long size);
void page_cache_async_readahead(struct address_space *mapping,
struct file_ra_state *ra,
struct file *filp,
struct page *pg,
unsigned long offset,
unsigned long size);
unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping,
struct file *filp);
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
extern int expand_stack_downwards(struct vm_area_struct *vma,
unsigned long address);
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
{
struct vm_area_struct * vma = find_vma(mm,start_addr);
if (vma && end_addr <= vma->vm_start)
vma = ((void *)0);
return vma;
}
static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> 12;
}
pgprot_t vm_get_page_prot(unsigned long vm_flags);
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
struct page *follow_page(struct vm_area_struct *, unsigned long address,
unsigned int foll_flags);
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
# 1425 "include/linux/mm.h"
static inline __attribute__((always_inline)) void
kernel_map_pages(struct page *page, int numpages, int enable) {}
static inline __attribute__((always_inline)) void enable_debug_pagealloc(void)
{
}
extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
int in_gate_area_no_task(unsigned long addr);
int drop_caches_sysctl_handler(struct ctl_table *, int,
void *, size_t *, loff_t *);
unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
unsigned long lru_pages);
extern int randomize_va_space;
const char * arch_vma_name(struct vm_area_struct *vma);
void print_vma_addr(char *prefix, unsigned long rip);
void sparse_mem_maps_populate_node(struct page **map_map,
unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long map_count,
int nodeid);
struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
void *vmemmap_alloc_block_buf(unsigned long size, int node);
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(struct page *start_page,
unsigned long pages, int node);
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
};
extern void memory_failure(unsigned long pfn, int trapno);
extern int __memory_failure(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t mce_bad_pages;
extern int soft_offline_page(struct page *page, int flags);
static inline __attribute__((always_inline)) int is_hwpoison_address(unsigned long addr)
{
return 0;
}
extern void dump_page(struct page *page);
# 7 "include/linux/scatterlist.h" 2
struct sg_table {
struct scatterlist *sgl;
unsigned int nents;
unsigned int orig_nents;
};
# 55 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & 0x3;
__BUG_ON((unsigned long)((unsigned long) page & 0x03));
sg->page_link = page_link | (unsigned long) page;
}
# 85 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}
static inline __attribute__((always_inline)) struct page *sg_page(struct scatterlist *sg)
{
return (struct page *)((sg)->page_link & ~0x3);
}
# 109 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
sg_set_page(sg, (mem_map + ((((virt_to_phys(buf)) >> 12)) - ((((0UL)) + ((1UL) << 12)-1) >> 12))), buflen, ((unsigned long)(buf) & ~(~((1 << 12) - 1))));
}
# 131 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
BUG();
prv[prv_nents - 1].offset = 0;
prv[prv_nents - 1].length = 0;
prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
}
# 160 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void sg_mark_end(struct scatterlist *sg)
{
sg->page_link |= 0x02;
sg->page_link &= ~0x01;
}
# 182 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) dma_addr_t sg_phys(struct scatterlist *sg)
{
return ((dma_addr_t)((unsigned long)((sg_page(sg)) - mem_map) + ((((0UL)) + ((1UL) << 12)-1) >> 12)) << 12) + sg->offset;
}
# 197 "include/linux/scatterlist.h"
static inline __attribute__((always_inline)) void *sg_virt(struct scatterlist *sg)
{
return lowmem_page_address(sg_page(sg)) + sg->offset;
}
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
# 248 "include/linux/scatterlist.h"
struct sg_mapping_iter {
struct page *page;
void *addr;
size_t length;
size_t consumed;
struct scatterlist *__sg;
unsigned int __nents;
unsigned int __offset;
unsigned int __flags;
};
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
int sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
# 8 "include/linux/dma-mapping.h" 2
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
struct dma_map_ops {
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_single_for_device)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_sg_for_cpu)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
int (*set_dma_mask)(struct device *dev, u64 mask);
int is_phys;
};
typedef u64 DMA_nnBIT_MASK __attribute__((deprecated));
# 80 "include/linux/dma-mapping.h"
static inline __attribute__((always_inline)) int valid_dma_direction(int dma_direction)
{
return ((dma_direction == DMA_BIDIRECTIONAL) ||
(dma_direction == DMA_TO_DEVICE) ||
(dma_direction == DMA_FROM_DEVICE));
}
static inline __attribute__((always_inline)) int is_device_dma_capable(struct device *dev)
{
return dev->dma_mask != ((void *)0) && *dev->dma_mask != 0x0ULL;
}
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/dma-mapping.h" 1
# 1 "include/asm-generic/dma-coherent.h" 1
# 9 "include/asm-generic/dma-coherent.h"
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
extern int
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size, int flags);
extern void
dma_release_declared_memory(struct device *dev);
extern void *
dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size);
# 7 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/dma-mapping.h" 2
void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction);
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction);
static inline __attribute__((always_inline)) void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction)
{
dma_unmap_single(dev, dma_address, size, direction);
}
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction);
extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction);
extern void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
extern void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction);
extern void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle, unsigned long offset, size_t size,
enum dma_data_direction direction);
extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction direction);
extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
extern int dma_supported(struct device *dev, u64 mask);
static inline __attribute__((always_inline)) int
dma_set_mask(struct device *dev, u64 mask)
{
if(!dev->dma_mask || !dma_supported(dev, mask))
return -5;
*dev->dma_mask = mask;
return 0;
}
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
# 94 "include/linux/dma-mapping.h" 2
static inline __attribute__((always_inline)) u64 dma_get_mask(struct device *dev)
{
if (dev && dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return (((32) == 64) ? ~0ULL : ((1ULL<<(32))-1));
}
static inline __attribute__((always_inline)) int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask))
return -5;
dev->coherent_dma_mask = mask;
return 0;
}
extern u64 dma_get_required_mask(struct device *dev);
static inline __attribute__((always_inline)) unsigned int dma_get_max_seg_size(struct device *dev)
{
return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
}
static inline __attribute__((always_inline)) unsigned int dma_set_max_seg_size(struct device *dev,
unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
return 0;
} else
return -5;
}
static inline __attribute__((always_inline)) unsigned long dma_get_seg_boundary(struct device *dev)
{
return dev->dma_parms ?
dev->dma_parms->segment_boundary_mask : 0xffffffff;
}
static inline __attribute__((always_inline)) int dma_set_seg_boundary(struct device *dev, unsigned long mask)
{
if (dev->dma_parms) {
dev->dma_parms->segment_boundary_mask = mask;
return 0;
} else
return -5;
}
static inline __attribute__((always_inline)) int dma_get_cache_alignment(void)
{
return 128;
return 1;
}
# 189 "include/linux/dma-mapping.h"
extern void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dma_addr_t device_addr, size_t size,
int flags);
extern void dmam_release_declared_memory(struct device *dev);
# 216 "include/linux/dma-mapping.h"
struct dma_attrs;
# 27 "include/linux/dmaengine.h" 2
typedef s32 dma_cookie_t;
# 46 "include/linux/dmaengine.h"
enum dma_status {
DMA_SUCCESS,
DMA_IN_PROGRESS,
DMA_PAUSED,
DMA_ERROR,
};
enum dma_transaction_type {
DMA_MEMCPY,
DMA_XOR,
DMA_PQ,
DMA_XOR_VAL,
DMA_PQ_VAL,
DMA_MEMSET,
DMA_INTERRUPT,
DMA_PRIVATE,
DMA_ASYNC_TX,
DMA_SLAVE,
};
# 98 "include/linux/dmaengine.h"
enum dma_ctrl_flags {
DMA_PREP_INTERRUPT = (1 << 0),
DMA_CTRL_ACK = (1 << 1),
DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2),
DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3),
DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4),
DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5),
DMA_PREP_PQ_DISABLE_P = (1 << 6),
DMA_PREP_PQ_DISABLE_Q = (1 << 7),
DMA_PREP_CONTINUE = (1 << 8),
DMA_PREP_FENCE = (1 << 9),
};
# 123 "include/linux/dmaengine.h"
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
DMA_SLAVE_CONFIG,
};
enum sum_check_bits {
SUM_CHECK_P = 0,
SUM_CHECK_Q = 1,
};
enum sum_check_flags {
SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
};
typedef struct { unsigned long bits[((((DMA_SLAVE + 1)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } dma_cap_mask_t;
struct dma_chan_percpu {
unsigned long memcpy_count;
unsigned long bytes_transferred;
};
# 179 "include/linux/dmaengine.h"
struct dma_chan {
struct dma_device *device;
dma_cookie_t cookie;
int chan_id;
struct dma_chan_dev *dev;
struct list_head device_node;
struct dma_chan_percpu *local;
int client_count;
int table_count;
void *private;
};
# 201 "include/linux/dmaengine.h"
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
atomic_t *idr_ref;
};
enum dma_slave_buswidth {
DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
};
# 263 "include/linux/dmaengine.h"
struct dma_slave_config {
enum dma_data_direction direction;
dma_addr_t src_addr;
dma_addr_t dst_addr;
enum dma_slave_buswidth src_addr_width;
enum dma_slave_buswidth dst_addr_width;
u32 src_maxburst;
u32 dst_maxburst;
};
static inline __attribute__((always_inline)) const char *dma_chan_name(struct dma_chan *chan)
{
return dev_name(&chan->dev->device);
}
void dma_chan_cleanup(struct kref *kref);
# 291 "include/linux/dmaengine.h"
typedef int (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
# 311 "include/linux/dmaengine.h"
struct dma_async_tx_descriptor {
dma_cookie_t cookie;
enum dma_ctrl_flags flags;
dma_addr_t phys;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
void *callback_param;
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
spinlock_t lock;
};
# 353 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) void txd_lock(struct dma_async_tx_descriptor *txd)
{
spin_lock_bh(&txd->lock);
}
static inline __attribute__((always_inline)) void txd_unlock(struct dma_async_tx_descriptor *txd)
{
spin_unlock_bh(&txd->lock);
}
static inline __attribute__((always_inline)) void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
{
txd->next = next;
next->parent = txd;
}
static inline __attribute__((always_inline)) void txd_clear_parent(struct dma_async_tx_descriptor *txd)
{
txd->parent = ((void *)0);
}
static inline __attribute__((always_inline)) void txd_clear_next(struct dma_async_tx_descriptor *txd)
{
txd->next = ((void *)0);
}
static inline __attribute__((always_inline)) struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
{
return txd->parent;
}
static inline __attribute__((always_inline)) struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
{
return txd->next;
}
# 393 "include/linux/dmaengine.h"
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
};
# 433 "include/linux/dmaengine.h"
struct dma_device {
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
u8 copy_align;
u8 xor_align;
u8 pq_align;
u8 fill_align;
int dev_id;
struct device *dev;
int (*device_alloc_chan_resources)(struct dma_chan *chan);
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
size_t len, enum sum_check_flags *result, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf,
size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
unsigned int src_cnt, const unsigned char *scf, size_t len,
enum sum_check_flags *pqres, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags);
int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg);
enum dma_status (*device_tx_status)(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
};
static inline __attribute__((always_inline)) int dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
{
size_t mask;
if (!align)
return true;
mask = (1 << align) - 1;
if (mask & (off1 | off2 | len))
return false;
return true;
}
static inline __attribute__((always_inline)) int is_dma_copy_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->copy_align, off1, off2, len);
}
static inline __attribute__((always_inline)) int is_dma_xor_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->xor_align, off1, off2, len);
}
static inline __attribute__((always_inline)) int is_dma_pq_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}
static inline __attribute__((always_inline)) int is_dma_fill_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->fill_align, off1, off2, len);
}
static inline __attribute__((always_inline)) void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
dma->max_pq = maxpq;
if (has_pq_continue)
dma->max_pq |= (1 << 15);
}
static inline __attribute__((always_inline)) int dmaf_continue(enum dma_ctrl_flags flags)
{
return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
}
static inline __attribute__((always_inline)) int dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
{
enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
return (flags & mask) == mask;
}
static inline __attribute__((always_inline)) int dma_dev_has_pq_continue(struct dma_device *dma)
{
return (dma->max_pq & (1 << 15)) == (1 << 15);
}
static inline __attribute__((always_inline)) unsigned short dma_dev_to_maxpq(struct dma_device *dma)
{
return dma->max_pq & ~(1 << 15);
}
# 569 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
{
if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
return dma_dev_to_maxpq(dma);
else if (dmaf_p_disabled_continue(flags))
return dma_dev_to_maxpq(dma) - 1;
else if (dmaf_continue(flags))
return dma_dev_to_maxpq(dma) - 3;
BUG();
}
static inline __attribute__((always_inline)) void dmaengine_get(void)
{
}
static inline __attribute__((always_inline)) void dmaengine_put(void)
{
}
static inline __attribute__((always_inline)) void net_dmaengine_get(void)
{
}
static inline __attribute__((always_inline)) void net_dmaengine_put(void)
{
}
# 615 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) void async_dmaengine_get(void)
{
}
static inline __attribute__((always_inline)) void async_dmaengine_put(void)
{
}
static inline __attribute__((always_inline)) struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)
{
return ((void *)0);
}
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
struct page *page, unsigned int offset, void *kdata, size_t len);
dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
unsigned int src_off, size_t len);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan);
static inline __attribute__((always_inline)) void async_tx_ack(struct dma_async_tx_descriptor *tx)
{
tx->flags |= DMA_CTRL_ACK;
}
static inline __attribute__((always_inline)) void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
{
tx->flags &= ~DMA_CTRL_ACK;
}
static inline __attribute__((always_inline)) int async_tx_test_ack(struct dma_async_tx_descriptor *tx)
{
return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
}
static inline __attribute__((always_inline)) int __first_dma_cap(const dma_cap_mask_t *srcp)
{
return ({ int __min1 = ((DMA_SLAVE + 1)); int __min2 = (find_next_bit((srcp->bits), ((DMA_SLAVE + 1)), 0)); __min1 < __min2 ? __min1: __min2; })
;
}
static inline __attribute__((always_inline)) int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
{
return ({ int __min1 = ((DMA_SLAVE + 1)); int __min2 = (find_next_bit(srcp->bits, (DMA_SLAVE + 1), n+1)); __min1 < __min2 ? __min1: __min2; })
;
}
static inline __attribute__((always_inline)) void
__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
set_bit(tx_type, dstp->bits);
}
static inline __attribute__((always_inline)) void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
clear_bit(tx_type, dstp->bits);
}
static inline __attribute__((always_inline)) void __dma_cap_zero(dma_cap_mask_t *dstp)
{
bitmap_zero(dstp->bits, (DMA_SLAVE + 1));
}
static inline __attribute__((always_inline)) int
__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
{
return test_bit(tx_type, srcp->bits);
}
# 706 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) void dma_async_issue_pending(struct dma_chan *chan)
{
chan->device->device_issue_pending(chan);
}
# 724 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
{
struct dma_tx_state state;
enum dma_status status;
status = chan->device->device_tx_status(chan, cookie, &state);
if (last)
*last = state.last;
if (used)
*used = state.used;
return status;
}
# 750 "include/linux/dmaengine.h"
static inline __attribute__((always_inline)) enum dma_status dma_async_is_complete(dma_cookie_t cookie,
dma_cookie_t last_complete, dma_cookie_t last_used)
{
if (last_complete <= last_used) {
if ((cookie <= last_complete) || (cookie > last_used))
return DMA_SUCCESS;
} else {
if ((cookie <= last_complete) && (cookie > last_used))
return DMA_SUCCESS;
}
return DMA_IN_PROGRESS;
}
static inline __attribute__((always_inline)) void
dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
{
if (st) {
st->last = last;
st->used = used;
st->residue = residue;
}
}
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
static inline __attribute__((always_inline)) enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
return DMA_SUCCESS;
}
static inline __attribute__((always_inline)) void dma_issue_pending_all(void)
{
do { } while (0);
}
int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
void dma_release_channel(struct dma_chan *chan);
struct dma_page_list {
char *base_address;
int nr_pages;
struct page **pages;
};
struct dma_pinned_list {
int nr_iovecs;
struct dma_page_list page_list[0];
};
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
struct dma_pinned_list *pinned_list, struct page *page,
unsigned int offset, size_t len);
# 31 "include/linux/skbuff.h" 2
# 1 "include/linux/hrtimer.h" 1
# 27 "include/linux/hrtimer.h"
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
enum hrtimer_mode {
HRTIMER_MODE_ABS = 0x0,
HRTIMER_MODE_REL = 0x1,
HRTIMER_MODE_PINNED = 0x02,
HRTIMER_MODE_ABS_PINNED = 0x02,
HRTIMER_MODE_REL_PINNED = 0x03,
};
enum hrtimer_restart {
HRTIMER_NORESTART,
HRTIMER_RESTART,
};
# 103 "include/linux/hrtimer.h"
struct hrtimer {
struct rb_node node;
ktime_t _expires;
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
};
# 124 "include/linux/hrtimer.h"
struct hrtimer_sleeper {
struct hrtimer timer;
struct task_struct *task;
};
# 141 "include/linux/hrtimer.h"
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
clockid_t index;
struct rb_root active;
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t softirq_time;
};
# 171 "include/linux/hrtimer.h"
struct hrtimer_cpu_base {
raw_spinlock_t lock;
struct hrtimer_clock_base clock_base[2];
# 183 "include/linux/hrtimer.h"
};
static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = time;
timer->_softexpires = time;
}
static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
{
timer->_softexpires = time;
timer->_expires = ktime_add_safe(time, delta);
}
static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
{
timer->_softexpires = time;
timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
}
static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
{
timer->_expires.tv64 = tv64;
timer->_softexpires.tv64 = tv64;
}
static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
{
timer->_expires = ktime_add_safe(timer->_expires, time);
timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
}
static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
{
timer->_expires = ktime_add_ns(timer->_expires, ns);
timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
}
static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer)
{
return timer->_expires;
}
static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
{
return timer->_softexpires;
}
static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
{
return timer->_expires.tv64;
}
static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
{
return timer->_softexpires.tv64;
}
static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
{
return ktime_to_ns(timer->_expires);
}
static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
{
return ktime_sub(timer->_expires, timer->base->get_time());
}
# 293 "include/linux/hrtimer.h"
static inline __attribute__((always_inline)) void clock_was_set(void) { }
static inline __attribute__((always_inline)) void hrtimer_peek_ahead_timers(void) { }
static inline __attribute__((always_inline)) void hres_timers_resume(void) { }
static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
{
return timer->base->softirq_time;
}
static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
}
extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device;
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer,
clockid_t which_clock,
enum hrtimer_mode mode)
{
hrtimer_init(timer, which_clock, mode);
}
static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
const enum hrtimer_mode mode);
extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long range_ns, const enum hrtimer_mode mode);
extern int
__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
unsigned long delta_ns,
const enum hrtimer_mode mode, int wakeup);
extern int hrtimer_cancel(struct hrtimer *timer);
extern int hrtimer_try_to_cancel(struct hrtimer *timer);
static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer,
enum hrtimer_mode mode)
{
unsigned long delta;
ktime_t soft, hard;
soft = hrtimer_get_softexpires(timer);
hard = hrtimer_get_expires(timer);
delta = ktime_to_ns(ktime_sub(hard, soft));
return hrtimer_start_range_ns(timer, soft, delta, mode);
}
static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer)
{
return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
extern ktime_t hrtimer_get_next_event(void);
static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer)
{
return timer->state != 0x00;
}
static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer)
{
return timer->state & 0x01;
}
static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer)
{
return timer->state & 0x02;
}
extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer,
ktime_t interval)
{
return hrtimer_forward(timer, timer->base->get_time(), interval);
}
extern long hrtimer_nanosleep(struct timespec *rqtp,
struct timespec *rmtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
struct task_struct *tsk);
extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
const enum hrtimer_mode mode);
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
unsigned long delta, const enum hrtimer_mode mode, int clock);
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
extern void hrtimer_run_queues(void);
extern void hrtimer_run_pending(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void);
extern u64 ktime_divns(const ktime_t kt, s64 div);
extern void sysrq_timer_list_show(void);
# 32 "include/linux/skbuff.h" 2
# 109 "include/linux/skbuff.h"
struct net_device;
struct scatterlist;
struct pipe_inode_info;
struct nf_conntrack {
atomic_t use;
__u16 lVlanId;
__u16 tci;
__u8 tci_valid;
};
# 153 "include/linux/skbuff.h"
typedef struct IGMP_HWNATEntry_s
{
struct list_head list;
struct rcu_head rcu;
int proto;
int index;
unsigned long mask;
unsigned char wifinum;
unsigned char grp_addr[16];
unsigned char src_addr[16];
struct timer_list age_timer;
}IGMP_HWNATEntry_t;
struct sk_buff_head {
struct sk_buff *next;
struct sk_buff *prev;
__u32 qlen;
spinlock_t lock;
};
struct sk_buff;
# 188 "include/linux/skbuff.h"
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
struct page *page;
__u32 page_offset;
__u32 size;
};
# 221 "include/linux/skbuff.h"
struct skb_shared_hwtstamps {
ktime_t hwtstamp;
ktime_t syststamp;
};
# 238 "include/linux/skbuff.h"
union skb_shared_tx {
struct {
__u8 hardware:1,
software:1,
in_progress:1,
prevent_sk_orphan:1;
};
__u8 flags;
};
struct skb_shared_info {
unsigned short nr_frags;
unsigned short gso_size;
unsigned short gso_segs;
unsigned short gso_type;
__be32 ip6_frag_id;
union skb_shared_tx tx_flags;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
atomic_t dataref;
void * destructor_arg;
skb_frag_t frags[(65536/((1UL) << 12) + 2)];
};
# 289 "include/linux/skbuff.h"
enum {
SKB_FCLONE_UNAVAILABLE,
SKB_FCLONE_ORIG,
SKB_FCLONE_CLONE,
};
enum {
SKB_GSO_TCPV4 = 1 << 0,
SKB_GSO_UDP = 1 << 1,
SKB_GSO_DODGY = 1 << 2,
SKB_GSO_TCP_ECN = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
SKB_GSO_FCOE = 1 << 5,
};
# 317 "include/linux/skbuff.h"
typedef unsigned char *sk_buff_data_t;
# 420 "include/linux/skbuff.h"
struct sk_buff {
struct sk_buff *next;
struct sk_buff *prev;
ktime_t tstamp;
struct sock *sk;
struct net_device *dev;
char cb[48] __attribute__((aligned(8)));
unsigned long _skb_refdst;
struct sec_path *sp;
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
union {
__wsum csum;
struct {
__u16 csum_start;
__u16 csum_offset;
};
};
__u32 priority;
;
__u8 local_df:1,
cloned:1,
ip_summed:2,
nohdr:1,
nfctinfo:3;
__u8 pkt_type:3,
fclone:2,
ipvs_property:1,
peeked:1,
nf_trace:1;
;
__be16 protocol;
void (*destructor)(struct sk_buff *skb);
int (*skb_recycling_callback)(struct sk_buff *skb);
__u8 skb_recycling_ind;
char foe[8];
__u8 bridge_flag;
__u8 qosEnque;
struct nf_conntrack *nfct;
struct sk_buff *nfct_reasm;
# 499 "include/linux/skbuff.h"
int skb_iif;
__u16 tc_index;
__u16 tc_verd;
__u32 rxhash;
;
__u16 queue_mapping:16;
__u8 ndisc_nodetype:2,
deliver_no_wcard:1;
;
# 536 "include/linux/skbuff.h"
__u32 mark;
__u32 dropcount;
__u16 vlan_tci;
__u32 portbind_mark;
struct net_device *orig_dev;
# 568 "include/linux/skbuff.h"
__u16 vlan_tags[2];
# 591 "include/linux/skbuff.h"
__u32 vlan_tag_flag;
__u32 xpon_igmp_flag;
__u16 pon_vlan_tpid[4];
__u16 pon_vlan_tci[4];
__u8 pon_tag_num;
# 624 "include/linux/skbuff.h"
__u32 pon_vlan_flag;
struct net_device * original_dev;
# 638 "include/linux/skbuff.h"
__u32 pon_mac_filter_flag;
__u8 ppe_info_flag;
__u16 ppe_magic;
__u8 ppe_ai;
__u16 ppe_foe_entry;
# 698 "include/linux/skbuff.h"
__u8 ipsec_pt_flag;
# 714 "include/linux/skbuff.h"
union{
struct{
__u16 gem_type : 1 ;
__u16 gem_port :12 ;
__u16 : 3 ;
};
struct{
__u8 epon_queue ;
__u8 epon_pbit ;
};
__u16 xpon_raw_info ;
};
__u8 pon_mark ;
__u8 v_if ;
# 738 "include/linux/skbuff.h"
__u16 lan_vlan_tci;
__u8 lan_vlan_tci_valid;
sk_buff_data_t transport_header;
sk_buff_data_t network_header;
sk_buff_data_t mac_header;
sk_buff_data_t tail;
sk_buff_data_t end;
unsigned char *head,
*data;
unsigned int truesize;
atomic_t users;
};
# 774 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct dst_entry *skb_dst(const struct sk_buff *skb)
{
({ int __ret_warn_on = !!((skb->_skb_refdst & 1UL) && !rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null(
"include/linux/skbuff.h"
# 779 "include/linux/skbuff.h"
,
781
# 779 "include/linux/skbuff.h"
); __builtin_expect(!!(__ret_warn_on), 0); })
;
return (struct dst_entry *)(skb->_skb_refdst & ~(1UL));
}
# 793 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
skb->_skb_refdst = (unsigned long)dst;
}
# 806 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
({ int __ret_warn_on = !!(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/skbuff.h", 808); __builtin_expect(!!(__ret_warn_on), 0); });
skb->_skb_refdst = (unsigned long)dst | 1UL;
}
static inline __attribute__((always_inline)) int skb_dst_is_noref(const struct sk_buff *skb)
{
return (skb->_skb_refdst & 1UL) && skb_dst(skb);
}
static inline __attribute__((always_inline)) struct rtable *skb_rtable(const struct sk_buff *skb)
{
return (struct rtable *)skb_dst(skb);
}
extern void kfree_skb(struct sk_buff *skb);
extern void consume_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
gfp_t priority, int fclone, int node);
static inline __attribute__((always_inline)) struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 0, -1);
}
static inline __attribute__((always_inline)) struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
return __alloc_skb(size, priority, 1, -1);
}
extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern struct sk_buff *skb_clone(struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
gfp_t priority);
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
gfp_t gfp_mask);
extern int pskb_expand_head(struct sk_buff *skb,
int nhead, int ntail,
gfp_t gfp_mask);
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t priority);
extern int skb_to_sgvec(struct sk_buff *skb,
struct scatterlist *sg, int offset,
int len);
extern int skb_cow_data(struct sk_buff *skb, int tailbits,
struct sk_buff **trailer);
extern int skb_pad(struct sk_buff *skb, int pad);
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int getfrag(void *from, char *to, int offset,
int len,int odd, struct sk_buff *skb),
void *from, int length);
struct skb_seq_state {
__u32 lower_offset;
__u32 upper_offset;
__u32 frag_idx;
__u32 stepped_offset;
struct sk_buff *root_skb;
struct sk_buff *cur_skb;
__u8 *frag_data;
};
extern void skb_prepare_seq_read(struct sk_buff *skb,
unsigned int from, unsigned int to,
struct skb_seq_state *st);
extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
struct skb_seq_state *st);
extern void skb_abort_seq_read(struct skb_seq_state *st);
extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
unsigned int to, struct ts_config *config,
struct ts_state *state);
static inline __attribute__((always_inline)) unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
return skb->end;
}
static inline __attribute__((always_inline)) struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
return &((struct skb_shared_info *)(skb_end_pointer(skb)))->hwtstamps;
}
static inline __attribute__((always_inline)) union skb_shared_tx *skb_tx(struct sk_buff *skb)
{
return &((struct skb_shared_info *)(skb_end_pointer(skb)))->tx_flags;
}
static inline __attribute__((always_inline)) int skb_queue_empty(const struct sk_buff_head *list)
{
return list->next == (struct sk_buff *)list;
}
# 943 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_queue_is_last(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return (skb->next == (struct sk_buff *) list);
}
# 956 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_queue_is_first(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
return (skb->prev == (struct sk_buff *) list);
}
# 970 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
__BUG_ON((unsigned long)(skb_queue_is_last(list, skb)));
return skb->next;
}
# 988 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
const struct sk_buff *skb)
{
__BUG_ON((unsigned long)(skb_queue_is_first(list, skb)));
return skb->prev;
}
# 1005 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_add(1, (&skb->users));
return skb;
}
# 1024 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_cloned(const struct sk_buff *skb)
{
return skb->cloned &&
((*(volatile int *)&(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref)->counter) & ((1 << 16) - 1)) != 1;
}
# 1037 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_header_cloned(const struct sk_buff *skb)
{
int dataref;
if (!skb->cloned)
return 0;
dataref = (*(volatile int *)&(&((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref)->counter);
dataref = (dataref & ((1 << 16) - 1)) - (dataref >> 16);
return dataref != 1;
}
# 1057 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_header_release(struct sk_buff *skb)
{
__BUG_ON((unsigned long)(skb->nohdr));
skb->nohdr = 1;
atomic_add(1 << 16, &((struct skb_shared_info *)(skb_end_pointer(skb)))->dataref);
}
# 1071 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_shared(const struct sk_buff *skb)
{
return (*(volatile int *)&(&skb->users)->counter) != 1;
}
# 1089 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_share_check(struct sk_buff *skb,
gfp_t pri)
{
do { if (pri & (( gfp_t)0x10u)) do { do { } while (0); } while (0); } while (0);
if (skb_shared(skb)) {
struct sk_buff *nskb = skb_clone(skb, pri);
kfree_skb(skb);
skb = nskb;
}
return skb;
}
# 1121 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_unshare(struct sk_buff *skb,
gfp_t pri)
{
do { if (pri & (( gfp_t)0x10u)) do { do { } while (0); } while (0); } while (0);
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, pri);
kfree_skb(skb);
skb = nskb;
}
return skb;
}
# 1146 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = ((void *)0);
return list;
}
# 1167 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
struct sk_buff *list = ((struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = ((void *)0);
return list;
}
static inline __attribute__((always_inline)) __u32 skb_queue_len(const struct sk_buff_head *list_)
{
return list_->qlen;
}
# 1196 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void __skb_queue_head_init(struct sk_buff_head *list)
{
list->prev = list->next = (struct sk_buff *)list;
list->qlen = 0;
}
# 1210 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_queue_head_init(struct sk_buff_head *list)
{
do { spinlock_check(&list->lock); do { *(&(&list->lock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
__skb_queue_head_init(list);
}
static inline __attribute__((always_inline)) void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
skb_queue_head_init(list);
do { (void)(class); } while (0);
}
extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline __attribute__((always_inline)) void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
newsk->next = next;
newsk->prev = prev;
next->prev = prev->next = newsk;
list->qlen++;
}
static inline __attribute__((always_inline)) void __skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *next)
{
struct sk_buff *first = list->next;
struct sk_buff *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
static inline __attribute__((always_inline)) void skb_queue_splice(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
}
}
# 1275 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_queue_splice_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, (struct sk_buff *) head, head->next);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
static inline __attribute__((always_inline)) void skb_queue_splice_tail(const struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
}
}
# 1307 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_queue_splice_tail_init(struct sk_buff_head *list,
struct sk_buff_head *head)
{
if (!skb_queue_empty(list)) {
__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
head->qlen += list->qlen;
__skb_queue_head_init(list);
}
}
# 1328 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void __skb_queue_after(struct sk_buff_head *list,
struct sk_buff *prev,
struct sk_buff *newsk)
{
__skb_insert(newsk, prev, prev->next, list);
}
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
struct sk_buff_head *list);
static inline __attribute__((always_inline)) void __skb_queue_before(struct sk_buff_head *list,
struct sk_buff *next,
struct sk_buff *newsk)
{
__skb_insert(newsk, next->prev, next, list);
}
# 1355 "include/linux/skbuff.h"
extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline __attribute__((always_inline)) void __skb_queue_head(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_after(list, (struct sk_buff *)list, newsk);
}
# 1372 "include/linux/skbuff.h"
extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline __attribute__((always_inline)) void __skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *newsk)
{
__skb_queue_before(list, (struct sk_buff *)list, newsk);
}
extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
static inline __attribute__((always_inline)) void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
list->qlen--;
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = ((void *)0);
next->prev = prev;
prev->next = next;
}
# 1404 "include/linux/skbuff.h"
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline __attribute__((always_inline)) struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
# 1421 "include/linux/skbuff.h"
extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline __attribute__((always_inline)) struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
struct sk_buff *skb = skb_peek_tail(list);
if (skb)
__skb_unlink(skb, list);
return skb;
}
static inline __attribute__((always_inline)) int skb_is_nonlinear(const struct sk_buff *skb)
{
return skb->data_len;
}
static inline __attribute__((always_inline)) unsigned int skb_headlen(const struct sk_buff *skb)
{
return skb->len - skb->data_len;
}
static inline __attribute__((always_inline)) int skb_pagelen(const struct sk_buff *skb)
{
int i, len = 0;
for (i = (int)((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags - 1; i >= 0; i--)
len += ((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i].size;
return len + skb_headlen(skb);
}
static inline __attribute__((always_inline)) void skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
skb_frag_t *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i];
frag->page = page;
frag->page_offset = off;
frag->size = size;
((struct skb_shared_info *)(skb_end_pointer(skb)))->nr_frags = i + 1;
}
extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
int off, int size);
# 1485 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
return skb->tail;
}
static inline __attribute__((always_inline)) void skb_reset_tail_pointer(struct sk_buff *skb)
{
skb->tail = skb->data;
}
static inline __attribute__((always_inline)) void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
skb->tail = skb->data + offset;
}
extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
static inline __attribute__((always_inline)) unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
unsigned char *tmp = skb_tail_pointer(skb);
__BUG_ON((unsigned long)(skb_is_nonlinear(skb)));
skb->tail += len;
skb->len += len;
return tmp;
}
extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
static inline __attribute__((always_inline)) unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
skb->data -= len;
skb->len += len;
return skb->data;
}
extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
static inline __attribute__((always_inline)) unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
skb->len -= len;
__BUG_ON((unsigned long)(skb->len < skb->data_len));
return skb->data += len;
}
static inline __attribute__((always_inline)) unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __skb_pull(skb, len);
}
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline __attribute__((always_inline)) unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
if (len > skb_headlen(skb) &&
!__pskb_pull_tail(skb, len - skb_headlen(skb)))
return ((void *)0);
skb->len -= len;
return skb->data += len;
}
static inline __attribute__((always_inline)) unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{
return __builtin_expect(!!(len > skb->len), 0) ? ((void *)0) : __pskb_pull(skb, len);
}
static inline __attribute__((always_inline)) int pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (__builtin_expect(!!(len <= skb_headlen(skb)), 1))
return 1;
if (__builtin_expect(!!(len > skb->len), 0))
return 0;
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != ((void *)0);
}
static inline __attribute__((always_inline)) unsigned int skb_headroom(const struct sk_buff *skb)
{
return skb->data - skb->head;
}
static inline __attribute__((always_inline)) int skb_tailroom(const struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
}
# 1591 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_reserve(struct sk_buff *skb, int len)
{
skb->data += len;
skb->tail += len;
}
# 1654 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) unsigned char *skb_transport_header(const struct sk_buff *skb)
{
return skb->transport_header;
}
static inline __attribute__((always_inline)) void skb_reset_transport_header(struct sk_buff *skb)
{
skb->transport_header = skb->data;
}
static inline __attribute__((always_inline)) void skb_set_transport_header(struct sk_buff *skb,
const int offset)
{
skb->transport_header = skb->data + offset;
}
static inline __attribute__((always_inline)) unsigned char *skb_network_header(const struct sk_buff *skb)
{
return skb->network_header;
}
static inline __attribute__((always_inline)) void skb_reset_network_header(struct sk_buff *skb)
{
skb->network_header = skb->data;
}
static inline __attribute__((always_inline)) void skb_set_network_header(struct sk_buff *skb, const int offset)
{
skb->network_header = skb->data + offset;
}
static inline __attribute__((always_inline)) unsigned char *skb_mac_header(const struct sk_buff *skb)
{
return skb->mac_header;
}
static inline __attribute__((always_inline)) int skb_mac_header_was_set(const struct sk_buff *skb)
{
return skb->mac_header != ((void *)0);
}
static inline __attribute__((always_inline)) void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data;
}
static inline __attribute__((always_inline)) void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
skb->mac_header = skb->data + offset;
}
static inline __attribute__((always_inline)) int skb_transport_offset(const struct sk_buff *skb)
{
return skb_transport_header(skb) - skb->data;
}
static inline __attribute__((always_inline)) u32 skb_network_header_len(const struct sk_buff *skb)
{
return skb->transport_header - skb->network_header;
}
static inline __attribute__((always_inline)) int skb_network_offset(const struct sk_buff *skb)
{
return skb_network_header(skb) - skb->data;
}
static inline __attribute__((always_inline)) int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
{
return pskb_may_pull(skb, skb_network_offset(skb) + len);
}
# 1778 "include/linux/skbuff.h"
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline __attribute__((always_inline)) void __skb_trim(struct sk_buff *skb, unsigned int len)
{
if (__builtin_expect(!!(skb->data_len), 0)) {
({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/skbuff.h", 1783); __builtin_expect(!!(__ret_warn_on), 0); });
return;
}
skb->len = len;
skb_set_tail_pointer(skb, len);
}
extern void skb_trim(struct sk_buff *skb, unsigned int len);
static inline __attribute__((always_inline)) int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
if (skb->data_len)
return ___pskb_trim(skb, len);
__skb_trim(skb, len);
return 0;
}
static inline __attribute__((always_inline)) int pskb_trim(struct sk_buff *skb, unsigned int len)
{
return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}
# 1814 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
int err = pskb_trim(skb, len);
__BUG_ON((unsigned long)(err));
}
# 1828 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_orphan(struct sk_buff *skb)
{
if (skb->destructor)
skb->destructor(skb);
skb->destructor = ((void *)0);
skb->sk = ((void *)0);
}
# 1844 "include/linux/skbuff.h"
extern void skb_queue_purge(struct sk_buff_head *list);
static inline __attribute__((always_inline)) void __skb_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != ((void *)0))
kfree_skb(skb);
}
# 1864 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *__dev_alloc_skb(unsigned int length,
gfp_t gfp_mask)
{
struct sk_buff *skb = alloc_skb(length + 32, gfp_mask);
if (__builtin_expect(!!(skb), 1))
skb_reserve(skb, 32);
return skb;
}
extern struct sk_buff *dev_alloc_skb(unsigned int length);
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask);
# 1891 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *netdev_alloc_skb(struct net_device *dev,
unsigned int length)
{
return __netdev_alloc_skb(dev, length, ((( gfp_t)0x20u)));
}
static inline __attribute__((always_inline)) struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length)
{
struct sk_buff *skb = netdev_alloc_skb(dev, length + 2);
if (2 && skb)
skb_reserve(skb, 2);
return skb;
}
extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
# 1917 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct page *netdev_alloc_page(struct net_device *dev)
{
return __netdev_alloc_page(dev, ((( gfp_t)0x20u)));
}
static inline __attribute__((always_inline)) void netdev_free_page(struct net_device *dev, struct page *page)
{
__free_pages((page), 0);
}
# 1935 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_clone_writable(struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
}
static inline __attribute__((always_inline)) int __skb_cow(struct sk_buff *skb, unsigned int headroom,
int cloned)
{
int delta = 0;
if (headroom < 32)
headroom = 32;
if (headroom > skb_headroom(skb))
delta = headroom - skb_headroom(skb);
if (delta || cloned)
return pskb_expand_head(skb, ((((delta)) + ((typeof((delta)))((32)) - 1)) & ~((typeof((delta)))((32)) - 1)), 0,
((( gfp_t)0x20u)));
return 0;
}
# 1969 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_cow(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_cloned(skb));
}
# 1984 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
{
return __skb_cow(skb, headroom, skb_header_cloned(skb));
}
# 2000 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_padto(struct sk_buff *skb, unsigned int len)
{
unsigned int size = skb->len;
if (__builtin_expect(!!(size >= len), 1))
return 0;
return skb_pad(skb, len - size);
}
static inline __attribute__((always_inline)) int skb_add_data(struct sk_buff *skb,
char *from, int copy)
{
const int off = skb->len;
if (skb->ip_summed == 0) {
int err = 0;
__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
copy, 0, &err);
if (!err) {
skb->csum = csum_block_add(skb->csum, csum, off);
return 0;
}
} else if (!({ void *__cu_to; const void *__cu_from; long __cu_len; __cu_to = (skb_put(skb, copy)); __cu_from = (from); __cu_len = (copy); if (__builtin_expect(!!(({ unsigned long __addr = (unsigned long) ((__cu_from)); unsigned long __size = (__cu_len); unsigned long __mask = (__current_thread_info->addr_limit).seg; unsigned long __ok; (void)0; __ok = (signed long)(__mask & (__addr | (__addr + __size) | ((__builtin_constant_p(__size) && (signed long) (__size) > 0) ? 0 : (__size)))); __ok == 0; })), 1)) { might_fault(); __cu_len = ({ register void *__cu_to_r __asm__("$4"); register const void *__cu_from_r __asm__("$5"); register long __cu_len_r __asm__("$6"); __cu_to_r = (__cu_to); __cu_from_r = (__cu_from); __cu_len_r = (__cu_len); __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" "la" "\t$1, " "__copy_user" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t" ".set\tnoat\n\t" "addu" "\t$1, %1, %2\n\t" ".set\tat\n\t" ".set\treorder" : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) : : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", "$0", "memory"); __cu_len_r; }); } __cu_len; }))
return 0;
__skb_trim(skb, off);
return -14;
}
static inline __attribute__((always_inline)) int skb_can_coalesce(struct sk_buff *skb, int i,
struct page *page, int off)
{
if (i) {
struct skb_frag_struct *frag = &((struct skb_shared_info *)(skb_end_pointer(skb)))->frags[i - 1];
return page == frag->page &&
off == frag->page_offset + frag->size;
}
return 0;
}
static inline __attribute__((always_inline)) int __skb_linearize(struct sk_buff *skb)
{
return __pskb_pull_tail(skb, skb->data_len) ? 0 : -12;
}
# 2052 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_linearize(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
}
# 2064 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_linearize_cow(struct sk_buff *skb)
{
return skb_is_nonlinear(skb) || skb_cloned(skb) ?
__skb_linearize(skb) : 0;
}
# 2081 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_postpull_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
{
if (skb->ip_summed == 2)
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
}
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
# 2099 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (__builtin_expect(!!(len >= skb->len), 1))
return 0;
if (skb->ip_summed == 2)
skb->ip_summed = 0;
return __pskb_trim(skb, len);
}
# 2133 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) int skb_has_frags(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list != ((void *)0);
}
static inline __attribute__((always_inline)) void skb_frag_list_init(struct sk_buff *skb)
{
((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list = ((void *)0);
}
static inline __attribute__((always_inline)) void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
{
frag->next = ((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list;
((struct skb_shared_info *)(skb_end_pointer(skb)))->frag_list = frag;
}
extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *err);
extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
int noblock, int *err);
extern unsigned int datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
extern int skb_copy_datagram_iovec(const struct sk_buff *from,
int offset, struct iovec *to,
int size);
extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
struct iovec *iov);
extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
int offset,
const struct iovec *from,
int from_offset,
int len);
extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
int offset,
const struct iovec *to,
int to_offset,
int size);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
extern void skb_free_datagram_locked(struct sock *sk,
struct sk_buff *skb);
extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
unsigned int flags);
extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum);
extern int skb_copy_bits(const struct sk_buff *skb, int offset,
void *to, int len);
extern int skb_store_bits(struct sk_buff *skb, int offset,
const void *from, int len);
extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
int offset, u8 *to, int len,
__wsum csum);
extern int skb_splice_bits(struct sk_buff *skb,
unsigned int offset,
struct pipe_inode_info *pipe,
unsigned int len,
unsigned int flags);
extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
static inline __attribute__((always_inline)) void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
{
int hlen = skb_headlen(skb);
if (hlen - offset >= len)
return skb->data + offset;
if (skb_copy_bits(skb, offset, buffer, len) < 0)
return ((void *)0);
return buffer;
}
static inline __attribute__((always_inline)) void skb_copy_from_linear_data(const struct sk_buff *skb,
void *to,
const unsigned int len)
{
memcpy(to, skb->data, len);
}
static inline __attribute__((always_inline)) void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
const int offset, void *to,
const unsigned int len)
{
memcpy(to, skb->data + offset, len);
}
static inline __attribute__((always_inline)) void skb_copy_to_linear_data(struct sk_buff *skb,
const void *from,
const unsigned int len)
{
memcpy(skb->data, from, len);
}
static inline __attribute__((always_inline)) void skb_copy_to_linear_data_offset(struct sk_buff *skb,
const int offset,
const void *from,
const unsigned int len)
{
memcpy(skb->data + offset, from, len);
}
extern void skb_init(void);
static inline __attribute__((always_inline)) ktime_t skb_get_ktime(const struct sk_buff *skb)
{
return skb->tstamp;
}
# 2260 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_get_timestamp(const struct sk_buff *skb,
struct timeval *stamp)
{
*stamp = ktime_to_timeval(skb->tstamp);
}
static inline __attribute__((always_inline)) void skb_get_timestampns(const struct sk_buff *skb,
struct timespec *stamp)
{
*stamp = ktime_to_timespec(skb->tstamp);
}
static inline __attribute__((always_inline)) void __net_timestamp(struct sk_buff *skb)
{
skb->tstamp = ktime_get_real();
}
static inline __attribute__((always_inline)) ktime_t net_timedelta(ktime_t t)
{
return ktime_sub(ktime_get_real(), t);
}
static inline __attribute__((always_inline)) ktime_t net_invalid_timestamp(void)
{
return ktime_set(0, 0);
}
extern void skb_timestamping_init(void);
# 2296 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_clone_tx_timestamp(struct sk_buff *skb)
{
}
static inline __attribute__((always_inline)) int skb_defer_rx_timestamp(struct sk_buff *skb)
{
return false;
}
# 2314 "include/linux/skbuff.h"
void skb_complete_tx_timestamp(struct sk_buff *skb,
struct skb_shared_hwtstamps *hwtstamps);
# 2328 "include/linux/skbuff.h"
extern void skb_tstamp_tx(struct sk_buff *orig_skb,
struct skb_shared_hwtstamps *hwtstamps);
static inline __attribute__((always_inline)) void sw_tx_timestamp(struct sk_buff *skb)
{
union skb_shared_tx *shtx = skb_tx(skb);
if (shtx->software && !shtx->in_progress)
skb_tstamp_tx(skb, ((void *)0));
}
# 2347 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_tx_timestamp(struct sk_buff *skb)
{
skb_clone_tx_timestamp(skb);
sw_tx_timestamp(skb);
}
extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
static inline __attribute__((always_inline)) int skb_csum_unnecessary(const struct sk_buff *skb)
{
return skb->ip_summed & 1;
}
# 2377 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) __sum16 skb_checksum_complete(struct sk_buff *skb)
{
return skb_csum_unnecessary(skb) ?
0 : __skb_checksum_complete(skb);
}
extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
static inline __attribute__((always_inline)) void nf_conntrack_put(struct nf_conntrack *nfct)
{
if (nfct && (atomic_sub_return(1, (&nfct->use)) == 0))
nf_conntrack_destroy(nfct);
}
static inline __attribute__((always_inline)) void nf_conntrack_get(struct nf_conntrack *nfct)
{
if (nfct)
atomic_add(1, (&nfct->use));
}
static inline __attribute__((always_inline)) void nf_conntrack_get_reasm(struct sk_buff *skb)
{
if (skb)
atomic_add(1, (&skb->users));
}
static inline __attribute__((always_inline)) void nf_conntrack_put_reasm(struct sk_buff *skb)
{
if (skb)
kfree_skb(skb);
}
# 2418 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void nf_reset(struct sk_buff *skb)
{
nf_conntrack_put(skb->nfct);
skb->nfct = ((void *)0);
nf_conntrack_put_reasm(skb->nfct_reasm);
skb->nfct_reasm = ((void *)0);
}
static inline __attribute__((always_inline)) void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
dst->nfct = src->nfct;
nf_conntrack_get(src->nfct);
dst->nfctinfo = src->nfctinfo;
dst->nfct_reasm = src->nfct_reasm;
nf_conntrack_get_reasm(src->nfct_reasm);
# 2452 "include/linux/skbuff.h"
dst->qosEnque = src->qosEnque;
dst->portbind_mark = src->portbind_mark;
dst->orig_dev = src->orig_dev;
# 2469 "include/linux/skbuff.h"
dst->vlan_tags[0] = src->vlan_tags[0];
dst->vlan_tags[1] = src->vlan_tags[1];
dst->vlan_tag_flag = src->vlan_tag_flag;
dst->pon_vlan_tpid[0] = src->pon_vlan_tpid[0];
dst->pon_vlan_tpid[1] = src->pon_vlan_tpid[1];
dst->pon_vlan_tpid[2] = src->pon_vlan_tpid[2];
dst->pon_vlan_tpid[3] = src->pon_vlan_tpid[3];
dst->pon_vlan_tci[0] = src->pon_vlan_tci[0];
dst->pon_vlan_tci[1] = src->pon_vlan_tci[1];
dst->pon_vlan_tci[2] = src->pon_vlan_tci[2];
dst->pon_vlan_tci[3] = src->pon_vlan_tci[3];
dst->pon_tag_num = src->pon_tag_num;
dst->pon_vlan_flag = src->pon_vlan_flag;
dst->original_dev = src->original_dev;
dst->pon_mac_filter_flag = src->pon_mac_filter_flag;
dst->ppe_info_flag = src->ppe_info_flag;
dst->ppe_magic = src->ppe_magic;
dst->ppe_ai = src->ppe_ai;
dst->ppe_foe_entry = src->ppe_foe_entry;
dst->xpon_raw_info = src->xpon_raw_info;
dst->pon_mark = src->pon_mark;
dst->v_if = src->v_if;
# 2514 "include/linux/skbuff.h"
dst->lan_vlan_tci = src->lan_vlan_tci;
dst->lan_vlan_tci_valid = src->lan_vlan_tci_valid;
}
static inline __attribute__((always_inline)) void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
{
nf_conntrack_put(dst->nfct);
nf_conntrack_put_reasm(dst->nfct_reasm);
__nf_copy(dst, src);
}
# 2542 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
{ }
static inline __attribute__((always_inline)) void skb_init_secmark(struct sk_buff *skb)
{ }
static inline __attribute__((always_inline)) void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
{
skb->queue_mapping = queue_mapping;
}
static inline __attribute__((always_inline)) u16 skb_get_queue_mapping(const struct sk_buff *skb)
{
return skb->queue_mapping;
}
static inline __attribute__((always_inline)) void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
{
to->queue_mapping = from->queue_mapping;
}
static inline __attribute__((always_inline)) void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
{
skb->queue_mapping = rx_queue + 1;
}
static inline __attribute__((always_inline)) u16 skb_get_rx_queue(const struct sk_buff *skb)
{
return skb->queue_mapping - 1;
}
static inline __attribute__((always_inline)) int skb_rx_queue_recorded(const struct sk_buff *skb)
{
return (skb->queue_mapping != 0);
}
extern u16 skb_tx_hash(const struct net_device *dev,
const struct sk_buff *skb);
static inline __attribute__((always_inline)) struct sec_path *skb_sec_path(struct sk_buff *skb)
{
return skb->sp;
}
static inline __attribute__((always_inline)) int skb_is_gso(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_size;
}
static inline __attribute__((always_inline)) int skb_is_gso_v6(const struct sk_buff *skb)
{
return ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type & SKB_GSO_TCPV6;
}
extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
static inline __attribute__((always_inline)) int skb_warn_if_lro(const struct sk_buff *skb)
{
struct skb_shared_info *shinfo = ((struct skb_shared_info *)(skb_end_pointer(skb)));
if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
__builtin_expect(!!(shinfo->gso_type == 0), 0)) {
__skb_warn_lro_forwarding(skb);
return true;
}
return false;
}
static inline __attribute__((always_inline)) void skb_forward_csum(struct sk_buff *skb)
{
if (skb->ip_summed == 2)
skb->ip_summed = 0;
}
int skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
struct sk_buff *skbmgr_alloc_skb2k(void);
int skbmgr_recycling_callback(struct sk_buff *skb);
static inline __attribute__((always_inline)) struct sk_buff *skbmgr_dev_alloc_skb2k(void)
{
struct sk_buff *skb = skbmgr_alloc_skb2k();
if (__builtin_expect(!!(skb), 1))
skb_reserve(skb, 32);
return skb;
}
struct sk_buff *skbmgr_alloc_skb4k(void);
int skbmgr_4k_recycling_callback(struct sk_buff *skb);
static inline __attribute__((always_inline)) struct sk_buff *skbmgr_dev_alloc_skb4k(void)
{
struct sk_buff *skb = skbmgr_alloc_skb4k();
if (__builtin_expect(!!(skb), 1))
skb_reserve(skb, 32);
return skb;
}
# 2664 "include/linux/skbuff.h"
static inline __attribute__((always_inline)) struct sk_buff *skbmgr_dev_alloc_skb2k_tc_critical(void)
{
struct sk_buff *skb = alloc_skb(((2048) - (((sizeof(struct skb_shared_info)) + ((1 << 5) - 1)) & ~((1 << 5) - 1))), ((( gfp_t)0x20u))|(( gfp_t)0x200u) | (( gfp_t)0x1000000u));
if (__builtin_expect(!!(skb), 1))
skb_reserve(skb, 32);
return skb;
}
# 130 "include/linux/if_ether.h" 2
static inline __attribute__((always_inline)) struct ethhdr *eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_mac_header(skb);
}
int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
extern struct ctl_table ether_table[];
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
# 30 "include/linux/netdevice.h" 2
# 1 "include/linux/if_packet.h" 1
struct sockaddr_pkt {
unsigned short spkt_family;
unsigned char spkt_device[14];
__be16 spkt_protocol;
};
struct sockaddr_ll {
unsigned short sll_family;
__be16 sll_protocol;
int sll_ifindex;
unsigned short sll_hatype;
unsigned char sll_pkttype;
unsigned char sll_halen;
unsigned char sll_addr[8];
};
# 56 "include/linux/if_packet.h"
struct tpacket_stats {
unsigned int tp_packets;
unsigned int tp_drops;
};
struct tpacket_auxdata {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u16 tp_vlan_tci;
};
# 83 "include/linux/if_packet.h"
struct tpacket_hdr {
unsigned long tp_status;
unsigned int tp_len;
unsigned int tp_snaplen;
unsigned short tp_mac;
unsigned short tp_net;
unsigned int tp_sec;
unsigned int tp_usec;
};
struct tpacket2_hdr {
__u32 tp_status;
__u32 tp_len;
__u32 tp_snaplen;
__u16 tp_mac;
__u16 tp_net;
__u32 tp_sec;
__u32 tp_nsec;
__u16 tp_vlan_tci;
};
enum tpacket_versions {
TPACKET_V1,
TPACKET_V2,
};
# 128 "include/linux/if_packet.h"
struct tpacket_req {
unsigned int tp_block_size;
unsigned int tp_block_nr;
unsigned int tp_frame_size;
unsigned int tp_frame_nr;
};
struct packet_mreq {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
unsigned char mr_address[8];
};
# 31 "include/linux/netdevice.h" 2
# 1 "include/linux/if_link.h" 1
# 1 "include/linux/netlink.h" 1
# 30 "include/linux/netlink.h"
struct sockaddr_nl {
sa_family_t nl_family;
unsigned short nl_pad;
__u32 nl_pid;
__u32 nl_groups;
};
struct nlmsghdr {
__u32 nlmsg_len;
__u16 nlmsg_type;
__u16 nlmsg_flags;
__u32 nlmsg_seq;
__u32 nlmsg_pid;
};
# 93 "include/linux/netlink.h"
struct nlmsgerr {
int error;
struct nlmsghdr msg;
};
struct nl_pktinfo {
__u32 group;
};
enum {
NETLINK_UNCONNECTED = 0,
NETLINK_CONNECTED,
};
# 124 "include/linux/netlink.h"
struct nlattr {
__u16 nla_len;
__u16 nla_type;
};
# 149 "include/linux/netlink.h"
# 1 "include/linux/capability.h" 1
# 18 "include/linux/capability.h"
struct task_struct;
# 40 "include/linux/capability.h"
typedef struct __user_cap_header_struct {
__u32 version;
int pid;
} *cap_user_header_t;
typedef struct __user_cap_data_struct {
__u32 effective;
__u32 permitted;
__u32 inheritable;
} *cap_user_data_t;
# 69 "include/linux/capability.h"
struct vfs_cap_data {
__le32 magic_etc;
struct {
__le32 permitted;
__le32 inheritable;
} data[2];
};
# 92 "include/linux/capability.h"
extern int file_caps_enabled;
typedef struct kernel_cap_struct {
__u32 cap[2];
} kernel_cap_t;
struct cpu_vfs_cap_data {
__u32 magic_etc;
kernel_cap_t permitted;
kernel_cap_t inheritable;
};
# 439 "include/linux/capability.h"
static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a,
const kernel_cap_t b)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0);
return dest;
}
static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a,
const kernel_cap_t b)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0);
return dest;
}
static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a,
const kernel_cap_t drop)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0);
return dest;
}
static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c)
{
kernel_cap_t dest;
do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0);
return dest;
}
static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a)
{
unsigned __capi;
for (__capi = 0; __capi < 2; ++__capi) {
if (a.cap[__capi] != 0)
return 0;
}
return 1;
}
# 487 "include/linux/capability.h"
static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
{
kernel_cap_t dest;
dest = cap_drop(a, set);
return cap_isclear(dest);
}
static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]);
}
static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
return cap_drop(a, __cap_fs_set);
}
static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } });
return cap_combine(a,
cap_intersect(permitted, __cap_fs_set));
}
static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
return cap_drop(a, __cap_fs_set);
}
static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
const kernel_cap_t permitted)
{
const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } });
return cap_combine(a,
cap_intersect(permitted, __cap_nfsd_set));
}
extern const kernel_cap_t __cap_empty_set;
extern const kernel_cap_t __cap_full_set;
extern const kernel_cap_t __cap_init_eff_set;
# 560 "include/linux/capability.h"
extern int capable(int cap);
struct dentry;
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
# 150 "include/linux/netlink.h" 2
struct net;
static inline __attribute__((always_inline)) struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
{
return (struct nlmsghdr *)skb->data;
}
struct netlink_skb_parms {
struct ucred creds;
__u32 pid;
__u32 dst_group;
kernel_cap_t eff_cap;
__u32 loginuid;
__u32 sessionid;
__u32 sid;
};
extern void netlink_table_grab(void);
extern void netlink_table_ungrab(void);
extern struct sock *netlink_kernel_create(struct net *net,
int unit,unsigned int groups,
void (*input)(struct sk_buff *skb),
struct mutex *cb_mutex,
struct module *module);
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
__u32 group, gfp_t allocation);
extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
__u32 pid, __u32 group, gfp_t allocation,
int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
void *filter_data);
extern int netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
extern int netlink_register_notifier(struct notifier_block *nb);
extern int netlink_unregister_notifier(struct notifier_block *nb);
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
# 221 "include/linux/netlink.h"
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
int family;
long args[6];
};
struct netlink_notify {
struct net *net;
int pid;
int protocol;
};
static __inline__ __attribute__((always_inline)) struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = ((len)+( ((((int) ( ((sizeof(struct nlmsghdr))+4 -1) & ~(4 -1) )))+4 -1) & ~(4 -1) ));
nlh = (struct nlmsghdr*)skb_put(skb, ( ((size)+4 -1) & ~(4 -1) ));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
if (!__builtin_constant_p(size) || ( ((size)+4 -1) & ~(4 -1) ) - size != 0)
memset(((void*)(((char*)nlh) + ((0)+( ((((int) ( ((sizeof(struct nlmsghdr))+4 -1) & ~(4 -1) )))+4 -1) & ~(4 -1) )))) + len, 0, ( ((size)+4 -1) & ~(4 -1) ) - size);
return nlh;
}
# 262 "include/linux/netlink.h"
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
int (*dump)(struct sk_buff *skb, struct netlink_callback*),
int (*done)(struct netlink_callback*));
extern void netlink_set_nonroot(int protocol, unsigned flag);
# 6 "include/linux/if_link.h" 2
struct rtnl_link_stats {
__u32 rx_packets;
__u32 tx_packets;
__u32 rx_bytes;
__u32 tx_bytes;
__u32 rx_errors;
__u32 tx_errors;
__u32 rx_dropped;
__u32 tx_dropped;
__u32 multicast;
__u32 collisions;
__u32 rx_length_errors;
__u32 rx_over_errors;
__u32 rx_crc_errors;
__u32 rx_frame_errors;
__u32 rx_fifo_errors;
__u32 rx_missed_errors;
__u32 tx_aborted_errors;
__u32 tx_carrier_errors;
__u32 tx_fifo_errors;
__u32 tx_heartbeat_errors;
__u32 tx_window_errors;
__u32 rx_compressed;
__u32 tx_compressed;
};
struct rtnl_link_stats64 {
__u64 rx_packets;
__u64 tx_packets;
__u64 rx_bytes;
__u64 tx_bytes;
__u64 rx_errors;
__u64 tx_errors;
__u64 rx_dropped;
__u64 tx_dropped;
__u64 multicast;
__u64 collisions;
__u64 rx_length_errors;
__u64 rx_over_errors;
__u64 rx_crc_errors;
__u64 rx_frame_errors;
__u64 rx_fifo_errors;
__u64 rx_missed_errors;
__u64 tx_aborted_errors;
__u64 tx_carrier_errors;
__u64 tx_fifo_errors;
__u64 tx_heartbeat_errors;
__u64 tx_window_errors;
__u64 rx_compressed;
__u64 tx_compressed;
};
struct rtnl_link_ifmap {
__u64 mem_start;
__u64 mem_end;
__u64 base_addr;
__u16 irq;
__u8 dma;
__u8 port;
};
enum {
IFLA_UNSPEC,
IFLA_ADDRESS,
IFLA_BROADCAST,
IFLA_IFNAME,
IFLA_MTU,
IFLA_LINK,
IFLA_QDISC,
IFLA_STATS,
IFLA_COST,
IFLA_PRIORITY,
IFLA_MASTER,
IFLA_WIRELESS,
IFLA_PROTINFO,
IFLA_TXQLEN,
IFLA_MAP,
IFLA_WEIGHT,
IFLA_OPERSTATE,
IFLA_LINKMODE,
IFLA_LINKINFO,
IFLA_NET_NS_PID,
IFLA_IFALIAS,
IFLA_NUM_VF,
IFLA_VFINFO_LIST,
IFLA_STATS64,
IFLA_VF_PORTS,
IFLA_PORT_SELF,
__IFLA_MAX
};
# 161 "include/linux/if_link.h"
enum {
IFLA_INET6_UNSPEC,
IFLA_INET6_FLAGS,
IFLA_INET6_CONF,
IFLA_INET6_STATS,
IFLA_INET6_MCAST,
IFLA_INET6_CACHEINFO,
IFLA_INET6_ICMP6STATS,
__IFLA_INET6_MAX
};
struct ifla_cacheinfo {
__u32 max_reasm_len;
__u32 tstamp;
__u32 reachable_time;
__u32 retrans_time;
};
enum {
IFLA_INFO_UNSPEC,
IFLA_INFO_KIND,
IFLA_INFO_DATA,
IFLA_INFO_XSTATS,
__IFLA_INFO_MAX,
};
enum {
IFLA_VLAN_UNSPEC,
IFLA_VLAN_ID,
IFLA_VLAN_FLAGS,
IFLA_VLAN_EGRESS_QOS,
IFLA_VLAN_INGRESS_QOS,
__IFLA_VLAN_MAX,
};
struct ifla_vlan_flags {
__u32 flags;
__u32 mask;
};
enum {
IFLA_VLAN_QOS_UNSPEC,
IFLA_VLAN_QOS_MAPPING,
__IFLA_VLAN_QOS_MAX
};
struct ifla_vlan_qos_mapping {
__u32 from;
__u32 to;
};
enum {
IFLA_MACVLAN_UNSPEC,
IFLA_MACVLAN_MODE,
__IFLA_MACVLAN_MAX,
};
enum macvlan_mode {
MACVLAN_MODE_PRIVATE = 1,
MACVLAN_MODE_VEPA = 2,
MACVLAN_MODE_BRIDGE = 4,
};
enum {
IFLA_VF_INFO_UNSPEC,
IFLA_VF_INFO,
__IFLA_VF_INFO_MAX,
};
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC,
IFLA_VF_VLAN,
IFLA_VF_TX_RATE,
__IFLA_VF_MAX,
};
struct ifla_vf_mac {
__u32 vf;
__u8 mac[32];
};
struct ifla_vf_vlan {
__u32 vf;
__u32 vlan;
__u32 qos;
};
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate;
};
struct ifla_vf_info {
__u32 vf;
__u8 mac[32];
__u32 vlan;
__u32 qos;
__u32 tx_rate;
};
# 296 "include/linux/if_link.h"
enum {
IFLA_VF_PORT_UNSPEC,
IFLA_VF_PORT,
__IFLA_VF_PORT_MAX,
};
enum {
IFLA_PORT_UNSPEC,
IFLA_PORT_VF,
IFLA_PORT_PROFILE,
IFLA_PORT_VSI_TYPE,
IFLA_PORT_INSTANCE_UUID,
IFLA_PORT_HOST_UUID,
IFLA_PORT_REQUEST,
IFLA_PORT_RESPONSE,
__IFLA_PORT_MAX,
};
enum {
PORT_REQUEST_PREASSOCIATE = 0,
PORT_REQUEST_PREASSOCIATE_RR,
PORT_REQUEST_ASSOCIATE,
PORT_REQUEST_DISASSOCIATE,
};
enum {
PORT_VDP_RESPONSE_SUCCESS = 0,
PORT_VDP_RESPONSE_INVALID_FORMAT,
PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES,
PORT_VDP_RESPONSE_UNUSED_VTID,
PORT_VDP_RESPONSE_VTID_VIOLATION,
PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION,
PORT_VDP_RESPONSE_OUT_OF_SYNC,
PORT_PROFILE_RESPONSE_SUCCESS = 0x100,
PORT_PROFILE_RESPONSE_INPROGRESS,
PORT_PROFILE_RESPONSE_INVALID,
PORT_PROFILE_RESPONSE_BADSTATE,
PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES,
PORT_PROFILE_RESPONSE_ERROR,
};
struct ifla_port_vsi {
__u8 vsi_mgr_id;
__u8 vsi_type_id[3];
__u8 vsi_type_version;
__u8 pad[3];
};
# 32 "include/linux/netdevice.h" 2
# 1 "include/linux/pm_qos_params.h" 1
# 1 "include/linux/plist.h" 1
# 80 "include/linux/plist.h"
struct plist_head {
struct list_head prio_list;
struct list_head node_list;
};
struct plist_node {
int prio;
struct plist_head plist;
};
# 144 "include/linux/plist.h"
static inline __attribute__((always_inline)) void
plist_head_init(struct plist_head *head, spinlock_t *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
}
static inline __attribute__((always_inline)) void
plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
{
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list);
}
static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio)
{
node->prio = prio;
plist_head_init(&node->plist, ((void *)0));
}
extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);
# 229 "include/linux/plist.h"
static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head)
{
return list_empty(&head->node_list);
}
static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node)
{
return plist_head_empty(&node->plist);
}
# 285 "include/linux/plist.h"
static inline __attribute__((always_inline)) struct plist_node *plist_first(const struct plist_head *head)
{
return ({ const typeof( ((struct plist_node *)0)->plist.node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,plist.node_list) );})
;
}
static inline __attribute__((always_inline)) struct plist_node *plist_last(const struct plist_head *head)
{
return ({ const typeof( ((struct plist_node *)0)->plist.node_list ) *__mptr = (head->node_list.prev); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,plist.node_list) );})
;
}
# 8 "include/linux/pm_qos_params.h" 2
# 1 "include/linux/miscdevice.h" 1
# 1 "include/linux/major.h" 1
# 5 "include/linux/miscdevice.h" 2
# 45 "include/linux/miscdevice.h"
struct device;
struct miscdevice {
int minor;
const char *name;
const struct file_operations *fops;
struct list_head list;
struct device *parent;
struct device *this_device;
const char *nodename;
mode_t mode;
};
extern int misc_register(struct miscdevice * misc);
extern int misc_deregister(struct miscdevice *misc);
# 10 "include/linux/pm_qos_params.h" 2
# 19 "include/linux/pm_qos_params.h"
struct pm_qos_request_list {
struct plist_node list;
int pm_qos_class;
};
void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
s32 new_value);
void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
int pm_qos_request(int pm_qos_class);
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_request_active(struct pm_qos_request_list *req);
# 35 "include/linux/netdevice.h" 2
# 1 "include/linux/delay.h" 1
# 12 "include/linux/delay.h"
extern unsigned long loops_per_jiffy;
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/delay.h" 1
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/delay.h"
extern void __delay(unsigned int loops);
extern void __ndelay(unsigned int ns);
extern void __udelay(unsigned int us);
# 15 "include/linux/delay.h" 2
# 44 "include/linux/delay.h"
extern unsigned long lpj_fine;
void calibrate_delay(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
void usleep_range(unsigned long min, unsigned long max);
static inline __attribute__((always_inline)) void ssleep(unsigned int seconds)
{
msleep(seconds * 1000);
}
# 37 "include/linux/netdevice.h" 2
# 1 "include/linux/rculist.h" 1
# 18 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
new->next = next;
new->prev = prev;
({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (new); });
next->prev = new;
}
# 43 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head)
{
__list_add_rcu(new, head, head->next);
}
# 64 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new,
struct list_head *head)
{
__list_add_rcu(new, head->prev, head);
}
# 94 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = ((void *) 0x00200200 + 0);
}
# 120 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
n->pprev = ((void *)0);
}
}
# 136 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->prev = old->prev;
({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (new->prev->next) = (new); });
new->next->prev = new;
old->prev = ((void *) 0x00200200 + 0);
}
# 163 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list,
struct list_head *head,
void (*sync)(void))
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
struct list_head *at = head->next;
if (list_empty(head))
return;
INIT_LIST_HEAD(list);
# 185 "include/linux/rculist.h"
sync();
# 195 "include/linux/rculist.h"
last->next = at;
({ if (!__builtin_constant_p(first) || ((first) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (head->next) = (first); });
first->prev = head;
at->prev = last;
}
# 297 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
n->pprev = ((void *) 0x00200200 + 0);
}
# 310 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *new)
{
struct hlist_node *next = old->next;
new->next = next;
new->pprev = old->pprev;
({ if (!__builtin_constant_p(new) || ((new) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*new->pprev) = (new); });
if (next)
new->next->pprev = &new->next;
old->pprev = ((void *) 0x00200200 + 0);
}
# 342 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_head *h)
{
struct hlist_node *first = h->first;
n->next = first;
n->pprev = &h->first;
({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (h->first) = (n); });
if (first)
first->pprev = &n->next;
}
# 372 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
n->pprev = next->pprev;
n->next = next;
({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*(n->pprev)) = (n); });
next->pprev = &n->next;
}
# 399 "include/linux/rculist.h"
static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
{
n->next = prev->next;
n->pprev = &prev->next;
({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (prev->next) = (n); });
if (n->next)
n->next->pprev = &n->next;
}
# 45 "include/linux/netdevice.h" 2
# 1 "include/linux/ethtool.h" 1
# 19 "include/linux/ethtool.h"
struct ethtool_cmd {
__u32 cmd;
__u32 supported;
__u32 advertising;
__u16 speed;
__u8 duplex;
__u8 port;
__u8 phy_address;
__u8 transceiver;
__u8 autoneg;
__u8 mdio_support;
__u32 maxtxpkt;
__u32 maxrxpkt;
__u16 speed_hi;
__u8 eth_tp_mdix;
__u8 reserved2;
__u32 lp_advertising;
__u32 reserved[2];
};
static inline __attribute__((always_inline)) void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
__u32 speed)
{
ep->speed = (__u16)speed;
ep->speed_hi = (__u16)(speed >> 16);
}
static inline __attribute__((always_inline)) __u32 ethtool_cmd_speed(struct ethtool_cmd *ep)
{
return (ep->speed_hi << 16) | ep->speed;
}
struct ethtool_drvinfo {
__u32 cmd;
char driver[32];
char version[32];
char fw_version[32];
char bus_info[32];
char reserved1[32];
char reserved2[12];
__u32 n_priv_flags;
__u32 n_stats;
__u32 testinfo_len;
__u32 eedump_len;
__u32 regdump_len;
};
struct ethtool_wolinfo {
__u32 cmd;
__u32 supported;
__u32 wolopts;
__u8 sopass[6];
};
struct ethtool_value {
__u32 cmd;
__u32 data;
};
struct ethtool_regs {
__u32 cmd;
__u32 version;
__u32 len;
__u8 data[0];
};
struct ethtool_eeprom {
__u32 cmd;
__u32 magic;
__u32 offset;
__u32 len;
__u8 data[0];
};
struct ethtool_coalesce {
__u32 cmd;
__u32 rx_coalesce_usecs;
__u32 rx_max_coalesced_frames;
__u32 rx_coalesce_usecs_irq;
__u32 rx_max_coalesced_frames_irq;
__u32 tx_coalesce_usecs;
__u32 tx_max_coalesced_frames;
__u32 tx_coalesce_usecs_irq;
__u32 tx_max_coalesced_frames_irq;
__u32 stats_block_coalesce_usecs;
# 172 "include/linux/ethtool.h"
__u32 use_adaptive_rx_coalesce;
__u32 use_adaptive_tx_coalesce;
__u32 pkt_rate_low;
__u32 rx_coalesce_usecs_low;
__u32 rx_max_coalesced_frames_low;
__u32 tx_coalesce_usecs_low;
__u32 tx_max_coalesced_frames_low;
# 194 "include/linux/ethtool.h"
__u32 pkt_rate_high;
__u32 rx_coalesce_usecs_high;
__u32 rx_max_coalesced_frames_high;
__u32 tx_coalesce_usecs_high;
__u32 tx_max_coalesced_frames_high;
__u32 rate_sample_interval;
};
struct ethtool_ringparam {
__u32 cmd;
__u32 rx_max_pending;
__u32 rx_mini_max_pending;
__u32 rx_jumbo_max_pending;
__u32 tx_max_pending;
__u32 rx_pending;
__u32 rx_mini_pending;
__u32 rx_jumbo_pending;
__u32 tx_pending;
};
struct ethtool_pauseparam {
__u32 cmd;
# 242 "include/linux/ethtool.h"
__u32 autoneg;
__u32 rx_pause;
__u32 tx_pause;
};
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
};
struct ethtool_gstrings {
__u32 cmd;
__u32 string_set;
__u32 len;
__u8 data[0];
};
struct ethtool_sset_info {
__u32 cmd;
__u32 reserved;
__u64 sset_mask;
__u32 data[0];
};
enum ethtool_test_flags {
ETH_TEST_FL_OFFLINE = (1 << 0),
ETH_TEST_FL_FAILED = (1 << 1),
};
struct ethtool_test {
__u32 cmd;
__u32 flags;
__u32 reserved;
__u32 len;
__u64 data[0];
};
struct ethtool_stats {
__u32 cmd;
__u32 n_stats;
__u64 data[0];
};
struct ethtool_perm_addr {
__u32 cmd;
__u32 size;
__u8 data[0];
};
# 310 "include/linux/ethtool.h"
enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15),
ETH_FLAG_NTUPLE = (1 << 27),
ETH_FLAG_RXHASH = (1 << 28),
};
struct ethtool_tcpip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be16 psrc;
__be16 pdst;
__u8 tos;
};
struct ethtool_ah_espip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be32 spi;
__u8 tos;
};
struct ethtool_rawip4_spec {
__be32 ip4src;
__be32 ip4dst;
__u8 hdata[64];
};
struct ethtool_ether_spec {
__be16 ether_type;
__u8 frame_size;
__u8 eframe[16];
};
struct ethtool_usrip4_spec {
__be32 ip4src;
__be32 ip4dst;
__be32 l4_4_bytes;
__u8 tos;
__u8 ip_ver;
__u8 proto;
};
struct ethtool_rx_flow_spec {
__u32 flow_type;
union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
struct ethtool_tcpip4_spec sctp_ip4_spec;
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_rawip4_spec raw_ip4_spec;
struct ethtool_ether_spec ether_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
__u8 hdata[64];
} h_u, m_u;
__u64 ring_cookie;
__u32 location;
};
struct ethtool_rxnfc {
__u32 cmd;
__u32 flow_type;
__u64 data;
struct ethtool_rx_flow_spec fs;
__u32 rule_cnt;
__u32 rule_locs[0];
};
struct ethtool_rxfh_indir {
__u32 cmd;
__u32 size;
__u32 ring_index[0];
};
struct ethtool_rx_ntuple_flow_spec {
__u32 flow_type;
union {
struct ethtool_tcpip4_spec tcp_ip4_spec;
struct ethtool_tcpip4_spec udp_ip4_spec;
struct ethtool_tcpip4_spec sctp_ip4_spec;
struct ethtool_ah_espip4_spec ah_ip4_spec;
struct ethtool_ah_espip4_spec esp_ip4_spec;
struct ethtool_rawip4_spec raw_ip4_spec;
struct ethtool_ether_spec ether_spec;
struct ethtool_usrip4_spec usr_ip4_spec;
__u8 hdata[64];
} h_u, m_u;
__u16 vlan_tag;
__u16 vlan_tag_mask;
__u64 data;
__u64 data_mask;
__s32 action;
};
struct ethtool_rx_ntuple {
__u32 cmd;
struct ethtool_rx_ntuple_flow_spec fs;
};
enum ethtool_flash_op_type {
ETHTOOL_FLASH_ALL_REGIONS = 0,
};
struct ethtool_flash {
__u32 cmd;
__u32 region;
char data[128];
};
struct ethtool_rx_ntuple_flow_spec_container {
struct ethtool_rx_ntuple_flow_spec fs;
struct list_head list;
};
struct ethtool_rx_ntuple_list {
struct list_head list;
unsigned int count;
};
struct net_device;
u32 ethtool_op_get_link(struct net_device *dev);
u32 ethtool_op_get_rx_csum(struct net_device *dev);
u32 ethtool_op_get_tx_csum(struct net_device *dev);
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data);
u32 ethtool_op_get_sg(struct net_device *dev);
int ethtool_op_set_sg(struct net_device *dev, u32 data);
u32 ethtool_op_get_tso(struct net_device *dev);
int ethtool_op_set_tso(struct net_device *dev, u32 data);
u32 ethtool_op_get_ufo(struct net_device *dev);
int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported);
void ethtool_ntuple_flush(struct net_device *dev);
# 533 "include/linux/ethtool.h"
struct ethtool_ops {
int (*get_settings)(struct net_device *, struct ethtool_cmd *);
int (*set_settings)(struct net_device *, struct ethtool_cmd *);
void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
int (*get_regs_len)(struct net_device *);
void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
u32 (*get_msglevel)(struct net_device *);
void (*set_msglevel)(struct net_device *, u32);
int (*nway_reset)(struct net_device *);
u32 (*get_link)(struct net_device *);
int (*get_eeprom_len)(struct net_device *);
int (*get_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*set_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
void (*get_ringparam)(struct net_device *,
struct ethtool_ringparam *);
int (*set_ringparam)(struct net_device *,
struct ethtool_ringparam *);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
u32 (*get_rx_csum)(struct net_device *);
int (*set_rx_csum)(struct net_device *, u32);
u32 (*get_tx_csum)(struct net_device *);
int (*set_tx_csum)(struct net_device *, u32);
u32 (*get_sg)(struct net_device *);
int (*set_sg)(struct net_device *, u32);
u32 (*get_tso)(struct net_device *);
int (*set_tso)(struct net_device *, u32);
void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
void (*get_strings)(struct net_device *, u32 stringset, u8 *);
int (*phys_id)(struct net_device *, u32);
void (*get_ethtool_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
int (*begin)(struct net_device *);
void (*complete)(struct net_device *);
u32 (*get_ufo)(struct net_device *);
int (*set_ufo)(struct net_device *, u32);
u32 (*get_flags)(struct net_device *);
int (*set_flags)(struct net_device *, u32);
u32 (*get_priv_flags)(struct net_device *);
int (*set_priv_flags)(struct net_device *, u32);
int (*get_sset_count)(struct net_device *, int);
int (*get_rxnfc)(struct net_device *,
struct ethtool_rxnfc *, void *);
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
int (*set_rx_ntuple)(struct net_device *,
struct ethtool_rx_ntuple *);
int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
int (*get_rxfh_indir)(struct net_device *,
struct ethtool_rxfh_indir *);
int (*set_rxfh_indir)(struct net_device *,
const struct ethtool_rxfh_indir *);
};
# 798 "include/linux/ethtool.h"
enum ethtool_reset_flags {
ETH_RESET_MGMT = 1 << 0,
ETH_RESET_IRQ = 1 << 1,
ETH_RESET_DMA = 1 << 2,
ETH_RESET_FILTER = 1 << 3,
ETH_RESET_OFFLOAD = 1 << 4,
ETH_RESET_MAC = 1 << 5,
ETH_RESET_PHY = 1 << 6,
ETH_RESET_RAM = 1 << 7,
ETH_RESET_DEDICATED = 0x0000ffff,
ETH_RESET_ALL = 0xffffffff,
};
# 49 "include/linux/netdevice.h" 2
# 1 "include/net/net_namespace.h" 1
# 11 "include/net/net_namespace.h"
# 1 "include/net/netns/core.h" 1
struct ctl_table_header;
struct prot_inuse;
struct netns_core {
struct ctl_table_header *sysctl_hdr;
int sysctl_somaxconn;
struct prot_inuse *inuse;
};
# 12 "include/net/net_namespace.h" 2
# 1 "include/net/netns/mib.h" 1
# 1 "include/net/snmp.h" 1
# 23 "include/net/snmp.h"
# 1 "include/linux/snmp.h" 1
# 18 "include/linux/snmp.h"
enum
{
IPSTATS_MIB_NUM = 0,
IPSTATS_MIB_INPKTS,
IPSTATS_MIB_INHDRERRORS,
IPSTATS_MIB_INTOOBIGERRORS,
IPSTATS_MIB_INNOROUTES,
IPSTATS_MIB_INADDRERRORS,
IPSTATS_MIB_INUNKNOWNPROTOS,
IPSTATS_MIB_INTRUNCATEDPKTS,
IPSTATS_MIB_INDISCARDS,
IPSTATS_MIB_INDELIVERS,
IPSTATS_MIB_OUTFORWDATAGRAMS,
IPSTATS_MIB_OUTPKTS,
IPSTATS_MIB_OUTDISCARDS,
IPSTATS_MIB_OUTNOROUTES,
IPSTATS_MIB_REASMTIMEOUT,
IPSTATS_MIB_REASMREQDS,
IPSTATS_MIB_REASMOKS,
IPSTATS_MIB_REASMFAILS,
IPSTATS_MIB_FRAGOKS,
IPSTATS_MIB_FRAGFAILS,
IPSTATS_MIB_FRAGCREATES,
IPSTATS_MIB_INMCASTPKTS,
IPSTATS_MIB_OUTMCASTPKTS,
IPSTATS_MIB_INBCASTPKTS,
IPSTATS_MIB_OUTBCASTPKTS,
IPSTATS_MIB_INOCTETS,
IPSTATS_MIB_OUTOCTETS,
IPSTATS_MIB_INMCASTOCTETS,
IPSTATS_MIB_OUTMCASTOCTETS,
IPSTATS_MIB_INBCASTOCTETS,
IPSTATS_MIB_OUTBCASTOCTETS,
__IPSTATS_MIB_MAX
};
enum
{
ICMP_MIB_NUM = 0,
ICMP_MIB_INMSGS,
ICMP_MIB_INERRORS,
ICMP_MIB_INDESTUNREACHS,
ICMP_MIB_INTIMEEXCDS,
ICMP_MIB_INPARMPROBS,
ICMP_MIB_INSRCQUENCHS,
ICMP_MIB_INREDIRECTS,
ICMP_MIB_INECHOS,
ICMP_MIB_INECHOREPS,
ICMP_MIB_INTIMESTAMPS,
ICMP_MIB_INTIMESTAMPREPS,
ICMP_MIB_INADDRMASKS,
ICMP_MIB_INADDRMASKREPS,
ICMP_MIB_OUTMSGS,
ICMP_MIB_OUTERRORS,
ICMP_MIB_OUTDESTUNREACHS,
ICMP_MIB_OUTTIMEEXCDS,
ICMP_MIB_OUTPARMPROBS,
ICMP_MIB_OUTSRCQUENCHS,
ICMP_MIB_OUTREDIRECTS,
ICMP_MIB_OUTECHOS,
ICMP_MIB_OUTECHOREPS,
ICMP_MIB_OUTTIMESTAMPS,
ICMP_MIB_OUTTIMESTAMPREPS,
ICMP_MIB_OUTADDRMASKS,
ICMP_MIB_OUTADDRMASKREPS,
__ICMP_MIB_MAX
};
enum
{
ICMP6_MIB_NUM = 0,
ICMP6_MIB_INMSGS,
ICMP6_MIB_INERRORS,
ICMP6_MIB_OUTMSGS,
ICMP6_MIB_OUTERRORS,
__ICMP6_MIB_MAX
};
# 114 "include/linux/snmp.h"
enum
{
TCP_MIB_NUM = 0,
TCP_MIB_RTOALGORITHM,
TCP_MIB_RTOMIN,
TCP_MIB_RTOMAX,
TCP_MIB_MAXCONN,
TCP_MIB_ACTIVEOPENS,
TCP_MIB_PASSIVEOPENS,
TCP_MIB_ATTEMPTFAILS,
TCP_MIB_ESTABRESETS,
TCP_MIB_CURRESTAB,
TCP_MIB_INSEGS,
TCP_MIB_OUTSEGS,
TCP_MIB_RETRANSSEGS,
TCP_MIB_INERRS,
TCP_MIB_OUTRSTS,
__TCP_MIB_MAX
};
enum
{
UDP_MIB_NUM = 0,
UDP_MIB_INDATAGRAMS,
UDP_MIB_NOPORTS,
UDP_MIB_INERRORS,
UDP_MIB_OUTDATAGRAMS,
UDP_MIB_RCVBUFERRORS,
UDP_MIB_SNDBUFERRORS,
__UDP_MIB_MAX
};
enum
{
LINUX_MIB_NUM = 0,
LINUX_MIB_SYNCOOKIESSENT,
LINUX_MIB_SYNCOOKIESRECV,
LINUX_MIB_SYNCOOKIESFAILED,
LINUX_MIB_EMBRYONICRSTS,
LINUX_MIB_PRUNECALLED,
LINUX_MIB_RCVPRUNED,
LINUX_MIB_OFOPRUNED,
LINUX_MIB_OUTOFWINDOWICMPS,
LINUX_MIB_LOCKDROPPEDICMPS,
LINUX_MIB_ARPFILTER,
LINUX_MIB_TIMEWAITED,
LINUX_MIB_TIMEWAITRECYCLED,
LINUX_MIB_TIMEWAITKILLED,
LINUX_MIB_PAWSPASSIVEREJECTED,
LINUX_MIB_PAWSACTIVEREJECTED,
LINUX_MIB_PAWSESTABREJECTED,
LINUX_MIB_DELAYEDACKS,
LINUX_MIB_DELAYEDACKLOCKED,
LINUX_MIB_DELAYEDACKLOST,
LINUX_MIB_LISTENOVERFLOWS,
LINUX_MIB_LISTENDROPS,
LINUX_MIB_TCPPREQUEUED,
LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG,
LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE,
LINUX_MIB_TCPPREQUEUEDROPPED,
LINUX_MIB_TCPHPHITS,
LINUX_MIB_TCPHPHITSTOUSER,
LINUX_MIB_TCPPUREACKS,
LINUX_MIB_TCPHPACKS,
LINUX_MIB_TCPRENORECOVERY,
LINUX_MIB_TCPSACKRECOVERY,
LINUX_MIB_TCPSACKRENEGING,
LINUX_MIB_TCPFACKREORDER,
LINUX_MIB_TCPSACKREORDER,
LINUX_MIB_TCPRENOREORDER,
LINUX_MIB_TCPTSREORDER,
LINUX_MIB_TCPFULLUNDO,
LINUX_MIB_TCPPARTIALUNDO,
LINUX_MIB_TCPDSACKUNDO,
LINUX_MIB_TCPLOSSUNDO,
LINUX_MIB_TCPLOSS,
LINUX_MIB_TCPLOSTRETRANSMIT,
LINUX_MIB_TCPRENOFAILURES,
LINUX_MIB_TCPSACKFAILURES,
LINUX_MIB_TCPLOSSFAILURES,
LINUX_MIB_TCPFASTRETRANS,
LINUX_MIB_TCPFORWARDRETRANS,
LINUX_MIB_TCPSLOWSTARTRETRANS,
LINUX_MIB_TCPTIMEOUTS,
LINUX_MIB_TCPRENORECOVERYFAIL,
LINUX_MIB_TCPSACKRECOVERYFAIL,
LINUX_MIB_TCPSCHEDULERFAILED,
LINUX_MIB_TCPRCVCOLLAPSED,
LINUX_MIB_TCPDSACKOLDSENT,
LINUX_MIB_TCPDSACKOFOSENT,
LINUX_MIB_TCPDSACKRECV,
LINUX_MIB_TCPDSACKOFORECV,
LINUX_MIB_TCPABORTONSYN,
LINUX_MIB_TCPABORTONDATA,
LINUX_MIB_TCPABORTONCLOSE,
LINUX_MIB_TCPABORTONMEMORY,
LINUX_MIB_TCPABORTONTIMEOUT,
LINUX_MIB_TCPABORTONLINGER,
LINUX_MIB_TCPABORTFAILED,
LINUX_MIB_TCPMEMORYPRESSURES,
LINUX_MIB_TCPSACKDISCARD,
LINUX_MIB_TCPDSACKIGNOREDOLD,
LINUX_MIB_TCPDSACKIGNOREDNOUNDO,
LINUX_MIB_TCPSPURIOUSRTOS,
LINUX_MIB_TCPMD5NOTFOUND,
LINUX_MIB_TCPMD5UNEXPECTED,
LINUX_MIB_SACKSHIFTED,
LINUX_MIB_SACKMERGED,
LINUX_MIB_SACKSHIFTFALLBACK,
LINUX_MIB_TCPBACKLOGDROP,
LINUX_MIB_TCPMINTTLDROP,
LINUX_MIB_TCPDEFERACCEPTDROP,
LINUX_MIB_IPRPFILTER,
__LINUX_MIB_MAX
};
enum
{
LINUX_MIB_XFRMNUM = 0,
LINUX_MIB_XFRMINERROR,
LINUX_MIB_XFRMINBUFFERERROR,
LINUX_MIB_XFRMINHDRERROR,
LINUX_MIB_XFRMINNOSTATES,
LINUX_MIB_XFRMINSTATEPROTOERROR,
LINUX_MIB_XFRMINSTATEMODEERROR,
LINUX_MIB_XFRMINSTATESEQERROR,
LINUX_MIB_XFRMINSTATEEXPIRED,
LINUX_MIB_XFRMINSTATEMISMATCH,
LINUX_MIB_XFRMINSTATEINVALID,
LINUX_MIB_XFRMINTMPLMISMATCH,
LINUX_MIB_XFRMINNOPOLS,
LINUX_MIB_XFRMINPOLBLOCK,
LINUX_MIB_XFRMINPOLERROR,
LINUX_MIB_XFRMOUTERROR,
LINUX_MIB_XFRMOUTBUNDLEGENERROR,
LINUX_MIB_XFRMOUTBUNDLECHECKERROR,
LINUX_MIB_XFRMOUTNOSTATES,
LINUX_MIB_XFRMOUTSTATEPROTOERROR,
LINUX_MIB_XFRMOUTSTATEMODEERROR,
LINUX_MIB_XFRMOUTSTATESEQERROR,
LINUX_MIB_XFRMOUTSTATEEXPIRED,
LINUX_MIB_XFRMOUTPOLBLOCK,
LINUX_MIB_XFRMOUTPOLDEAD,
LINUX_MIB_XFRMOUTPOLERROR,
LINUX_MIB_XFRMFWDHDRERROR,
__LINUX_MIB_XFRMMAX
};
# 24 "include/net/snmp.h" 2
# 34 "include/net/snmp.h"
struct snmp_mib {
const char *name;
int entry;
};
# 52 "include/net/snmp.h"
# 1 "include/linux/u64_stats_sync.h" 1
# 64 "include/linux/u64_stats_sync.h"
struct u64_stats_sync {
seqcount_t seq;
};
static void inline __attribute__((always_inline)) u64_stats_update_begin(struct u64_stats_sync *syncp)
{
write_seqcount_begin(&syncp->seq);
}
static void inline __attribute__((always_inline)) u64_stats_update_end(struct u64_stats_sync *syncp)
{
write_seqcount_end(&syncp->seq);
}
static unsigned int inline __attribute__((always_inline)) u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
{
return read_seqcount_begin(&syncp->seq);
}
static int inline __attribute__((always_inline)) u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
unsigned int start)
{
return read_seqcount_retry(&syncp->seq, start);
}
static unsigned int inline __attribute__((always_inline)) u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
{
return read_seqcount_begin(&syncp->seq);
}
static int inline __attribute__((always_inline)) u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
unsigned int start)
{
return read_seqcount_retry(&syncp->seq, start);
}
# 53 "include/net/snmp.h" 2
struct ipstats_mib {
u64 mibs[__IPSTATS_MIB_MAX];
struct u64_stats_sync syncp;
};
struct icmp_mib {
unsigned long mibs[(__ICMP_MIB_MAX + 1)];
};
struct icmpmsg_mib {
unsigned long mibs[512];
};
struct icmpv6_mib {
unsigned long mibs[__ICMP6_MIB_MAX];
};
struct icmpv6msg_mib {
unsigned long mibs[512];
};
struct tcp_mib {
unsigned long mibs[__TCP_MIB_MAX];
};
struct udp_mib {
unsigned long mibs[__UDP_MIB_MAX];
};
struct linux_mib {
unsigned long mibs[__LINUX_MIB_MAX];
};
struct linux_xfrm_mib {
unsigned long mibs[__LINUX_MIB_XFRMMAX];
};
# 5 "include/net/netns/mib.h" 2
struct netns_mib {
__typeof__(struct tcp_mib) *tcp_statistics[2];
__typeof__(struct ipstats_mib) *ip_statistics[2];
__typeof__(struct linux_mib) *net_statistics[2];
__typeof__(struct udp_mib) *udp_statistics[2];
__typeof__(struct udp_mib) *udplite_statistics[2];
__typeof__(struct icmp_mib) *icmp_statistics[2];
__typeof__(struct icmpmsg_mib) *icmpmsg_statistics[2];
struct proc_dir_entry *proc_net_devsnmp6;
__typeof__(struct udp_mib) *udp_stats_in6[2];
__typeof__(struct udp_mib) *udplite_stats_in6[2];
__typeof__(struct ipstats_mib) *ipv6_statistics[2];
__typeof__(struct icmpv6_mib) *icmpv6_statistics[2];
__typeof__(struct icmpv6msg_mib) *icmpv6msg_statistics[2];
};
# 13 "include/net/net_namespace.h" 2
# 1 "include/net/netns/unix.h" 1
struct ctl_table_header;
struct netns_unix {
int sysctl_max_dgram_qlen;
struct ctl_table_header *ctl;
};
# 14 "include/net/net_namespace.h" 2
# 1 "include/net/netns/packet.h" 1
# 10 "include/net/netns/packet.h"
struct netns_packet {
spinlock_t sklist_lock;
struct hlist_head sklist;
};
# 15 "include/net/net_namespace.h" 2
# 1 "include/net/netns/ipv4.h" 1
# 1 "include/net/inet_frag.h" 1
struct netns_frags {
int nqueues;
atomic_t mem;
struct list_head lru_list;
int timeout;
int high_thresh;
int low_thresh;
};
struct inet_frag_queue {
struct hlist_node list;
struct netns_frags *net;
struct list_head lru_list;
spinlock_t lock;
atomic_t refcnt;
struct timer_list timer;
struct sk_buff *fragments;
struct sk_buff *fragments_tail;
ktime_t stamp;
int len;
int meat;
__u8 last_in;
};
struct inet_frags {
struct hlist_head hash[64];
rwlock_t lock;
u32 rnd;
int qsize;
int secret_interval;
struct timer_list secret_timer;
unsigned int (*hashfn)(struct inet_frag_queue *);
void (*constructor)(struct inet_frag_queue *q,
void *arg);
void (*destructor)(struct inet_frag_queue *);
void (*skb_free)(struct sk_buff *);
int (*match)(struct inet_frag_queue *q,
void *arg);
void (*frag_expire)(unsigned long data);
};
void inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
void inet_frags_init_net(struct netns_frags *nf);
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
void inet_frag_destroy(struct inet_frag_queue *q,
struct inet_frags *f, int *work);
int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
struct inet_frags *f, void *key, unsigned int hash)
;
static inline __attribute__((always_inline)) void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
if ((atomic_sub_return(1, (&q->refcnt)) == 0))
inet_frag_destroy(q, f, ((void *)0));
}
# 9 "include/net/netns/ipv4.h" 2
struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
struct hlist_head;
struct sock;
struct netns_ipv4 {
struct ctl_table_header *forw_hdr;
struct ctl_table_header *frags_hdr;
struct ctl_table_header *ipv4_hdr;
struct ctl_table_header *route_hdr;
struct ipv4_devconf *devconf_all;
struct ipv4_devconf *devconf_dflt;
struct fib_rules_ops *rules_ops;
struct hlist_head *fib_table_hash;
struct sock *fibnl;
struct sock **icmp_sk;
struct sock *tcp_sock;
struct netns_frags frags;
struct xt_table *iptable_filter;
struct xt_table *iptable_mangle;
struct xt_table *iptable_raw;
struct xt_table *arptable_filter;
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
unsigned int nat_htable_size;
int nat_vmalloced;
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
int sysctl_icmp_errors_use_inbound_ifaddr;
int sysctl_rt_cache_rebuild_count;
int current_rt_cache_rebuild_count;
atomic_t rt_genid;
struct mr_table *mrt;
};
# 16 "include/net/net_namespace.h" 2
# 1 "include/net/netns/ipv6.h" 1
# 9 "include/net/netns/ipv6.h"
# 1 "include/net/dst_ops.h" 1
struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
struct dst_ops {
unsigned short family;
__be16 protocol;
unsigned gc_thresh;
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
int (*local_out)(struct sk_buff *skb);
atomic_t entries;
struct kmem_cache *kmem_cachep;
};
# 10 "include/net/netns/ipv6.h" 2
struct ctl_table_header;
struct netns_sysctl_ipv6 {
struct ctl_table_header *table;
struct ctl_table_header *frags_hdr;
int bindv6only;
int flush_delay;
int ip6_rt_max_size;
int ip6_rt_gc_min_interval;
int ip6_rt_gc_timeout;
int ip6_rt_gc_interval;
int ip6_rt_gc_elasticity;
int ip6_rt_mtu_expires;
int ip6_rt_min_advmss;
int icmpv6_time;
};
struct netns_ipv6 {
struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
struct netns_frags frags;
struct xt_table *ip6table_filter;
struct xt_table *ip6table_mangle;
struct xt_table *ip6table_raw;
struct rt6_info *ip6_null_entry;
struct rt6_statistics *rt6_stats;
struct timer_list ip6_fib_timer;
struct hlist_head *fib_table_hash;
struct fib6_table *fib6_main_tbl;
struct dst_ops ip6_dst_ops;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
struct rt6_info *ip6_prohibit_entry;
struct rt6_info *ip6_blk_hole_entry;
struct fib6_table *fib6_local_tbl;
struct fib_rules_ops *fib6_rules_ops;
struct sock **icmp_sk;
struct sock *ndisc_sk;
struct sock *tcp_sk;
struct sock *igmp_sk;
# 69 "include/net/netns/ipv6.h"
};
# 17 "include/net/net_namespace.h" 2
# 1 "include/net/netns/dccp.h" 1
struct sock;
struct netns_dccp {
struct sock *v4_ctl_sk;
struct sock *v6_ctl_sk;
};
# 18 "include/net/net_namespace.h" 2
# 1 "include/net/netns/x_tables.h" 1
# 1 "include/linux/netfilter.h" 1
# 9 "include/linux/netfilter.h"
# 1 "include/linux/in.h" 1
# 25 "include/linux/in.h"
enum {
IPPROTO_IP = 0,
IPPROTO_ICMP = 1,
IPPROTO_IGMP = 2,
IPPROTO_IPIP = 4,
IPPROTO_TCP = 6,
IPPROTO_EGP = 8,
IPPROTO_PUP = 12,
IPPROTO_UDP = 17,
IPPROTO_IDP = 22,
IPPROTO_DCCP = 33,
IPPROTO_RSVP = 46,
IPPROTO_GRE = 47,
IPPROTO_IPV6 = 41,
IPPROTO_ESP = 50,
IPPROTO_AH = 51,
IPPROTO_BEETPH = 94,
IPPROTO_PIM = 103,
IPPROTO_COMP = 108,
IPPROTO_SCTP = 132,
IPPROTO_UDPLITE = 136,
IPPROTO_RAW = 255,
IPPROTO_MAX
};
struct in_addr {
__be32 s_addr;
};
# 142 "include/linux/in.h"
struct ip_mreq {
struct in_addr imr_multiaddr;
struct in_addr imr_interface;
};
struct ip_mreqn {
struct in_addr imr_multiaddr;
struct in_addr imr_address;
int imr_ifindex;
};
struct ip_mreq_source {
__be32 imr_multiaddr;
__be32 imr_interface;
__be32 imr_sourceaddr;
};
struct ip_msfilter {
__be32 imsf_multiaddr;
__be32 imsf_interface;
__u32 imsf_fmode;
__u32 imsf_numsrc;
__be32 imsf_slist[1];
};
struct group_req {
__u32 gr_interface;
struct __kernel_sockaddr_storage gr_group;
};
struct group_source_req {
__u32 gsr_interface;
struct __kernel_sockaddr_storage gsr_group;
struct __kernel_sockaddr_storage gsr_source;
};
struct group_filter {
__u32 gf_interface;
struct __kernel_sockaddr_storage gf_group;
__u32 gf_fmode;
__u32 gf_numsrc;
struct __kernel_sockaddr_storage gf_slist[1];
};
struct in_pktinfo {
int ipi_ifindex;
struct in_addr ipi_spec_dst;
struct in_addr ipi_addr;
};
struct sockaddr_in {
sa_family_t sin_family;
__be16 sin_port;
struct in_addr sin_addr;
unsigned char __pad[16 - sizeof(short int) -
sizeof(unsigned short int) - sizeof(struct in_addr)];
};
# 271 "include/linux/in.h"
static inline __attribute__((always_inline)) int ipv4_is_loopback(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xff000000))) == (( __be32)(__u32)(0x7f000000));
}
static inline __attribute__((always_inline)) int ipv4_is_multicast(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xf0000000))) == (( __be32)(__u32)(0xe0000000));
}
static inline __attribute__((always_inline)) int ipv4_is_local_multicast(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xffffff00))) == (( __be32)(__u32)(0xe0000000));
}
static inline __attribute__((always_inline)) int ipv4_is_lbcast(__be32 addr)
{
return addr == (( __be32)(__u32)(((unsigned long int) 0xffffffff)));
}
static inline __attribute__((always_inline)) int ipv4_is_zeronet(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xff000000))) == (( __be32)(__u32)(0x00000000));
}
static inline __attribute__((always_inline)) int ipv4_is_private_10(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xff000000))) == (( __be32)(__u32)(0x0a000000));
}
static inline __attribute__((always_inline)) int ipv4_is_private_172(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xfff00000))) == (( __be32)(__u32)(0xac100000));
}
static inline __attribute__((always_inline)) int ipv4_is_private_192(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xffff0000))) == (( __be32)(__u32)(0xc0a80000));
}
static inline __attribute__((always_inline)) int ipv4_is_linklocal_169(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xffff0000))) == (( __be32)(__u32)(0xa9fe0000));
}
static inline __attribute__((always_inline)) int ipv4_is_anycast_6to4(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xffffff00))) == (( __be32)(__u32)(0xc0586300));
}
static inline __attribute__((always_inline)) int ipv4_is_test_192(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xffffff00))) == (( __be32)(__u32)(0xc0000200));
}
static inline __attribute__((always_inline)) int ipv4_is_test_198(__be32 addr)
{
return (addr & (( __be32)(__u32)(0xfffe0000))) == (( __be32)(__u32)(0xc6120000));
}
# 10 "include/linux/netfilter.h" 2
# 44 "include/linux/netfilter.h"
enum nf_inet_hooks {
NF_INET_PRE_ROUTING,
NF_INET_LOCAL_IN,
NF_INET_FORWARD,
NF_INET_LOCAL_OUT,
NF_INET_POST_ROUTING,
NF_INET_NUMHOOKS
};
enum {
NFPROTO_UNSPEC = 0,
NFPROTO_IPV4 = 2,
NFPROTO_ARP = 3,
NFPROTO_BRIDGE = 7,
NFPROTO_IPV6 = 10,
NFPROTO_DECNET = 12,
NFPROTO_NUMPROTO,
};
union nf_inet_addr {
__u32 all[4];
__be32 ip;
__be32 ip6[4];
struct in_addr in;
struct in6_addr in6;
};
static inline __attribute__((always_inline)) int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
}
extern void netfilter_init(void);
struct sk_buff;
typedef unsigned int nf_hookfn(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *));
struct nf_hook_ops {
struct list_head list;
nf_hookfn *hook;
struct module *owner;
u_int8_t pf;
unsigned int hooknum;
int priority;
};
struct nf_sockopt_ops {
struct list_head list;
u_int8_t pf;
int set_optmin;
int set_optmax;
int (*set)(struct sock *sk, int optval, void *user, unsigned int len);
int get_optmin;
int get_optmax;
int (*get)(struct sock *sk, int optval, void *user, int *len);
struct module *owner;
};
int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
int nf_register_sockopt(struct nf_sockopt_ops *reg);
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
extern struct ctl_path nf_net_netfilter_sysctl_path[];
extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
extern struct list_head nf_hooks[NFPROTO_NUMPROTO][8];
int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh);
# 162 "include/linux/netfilter.h"
static inline __attribute__((always_inline)) int nf_hook_thresh(u_int8_t pf, unsigned int hook,
struct sk_buff *skb,
struct net_device *indev,
struct net_device *outdev,
int (*okfn)(struct sk_buff *), int thresh)
{
if (list_empty(&nf_hooks[pf][hook]))
return 1;
return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh);
}
static inline __attribute__((always_inline)) int nf_hook(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
int (*okfn)(struct sk_buff *))
{
return nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, (-((int)(~0U>>1)) - 1));
}
# 199 "include/linux/netfilter.h"
static inline __attribute__((always_inline)) int
NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct sk_buff *), int thresh)
{
int ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, thresh);
if (ret == 1)
ret = okfn(skb);
return ret;
}
static inline __attribute__((always_inline)) int
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct sk_buff *), int cond)
{
int ret;
if (!cond ||
(ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, (-((int)(~0U>>1)) - 1)) == 1))
ret = okfn(skb);
return ret;
}
static inline __attribute__((always_inline)) int
NF_HOOK(uint8_t pf, unsigned int hook, struct sk_buff *skb,
struct net_device *in, struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return NF_HOOK_THRESH(pf, hook, skb, in, out, okfn, (-((int)(~0U>>1)) - 1));
}
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char *opt,
unsigned int len);
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char *opt,
int *len);
# 246 "include/linux/netfilter.h"
extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
struct flowi;
struct nf_queue_entry;
struct nf_afinfo {
unsigned short family;
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
__sum16 (*checksum_partial)(struct sk_buff *skb,
unsigned int hook,
unsigned int dataoff,
unsigned int len,
u_int8_t protocol);
int (*route)(struct dst_entry **dst, struct flowi *fl);
void (*saveroute)(const struct sk_buff *skb,
struct nf_queue_entry *entry);
int (*reroute)(struct sk_buff *skb,
const struct nf_queue_entry *entry);
int route_key_size;
};
extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
static inline __attribute__((always_inline)) const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return ({ typeof(nf_afinfo[family]) _________p1 = (*(volatile typeof(nf_afinfo[family]) *)&(nf_afinfo[family])); do { } while(0); (_________p1); });
}
static inline __attribute__((always_inline)) __sum16
nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum(skb, hook, dataoff, protocol);
rcu_read_unlock();
return csum;
}
static inline __attribute__((always_inline)) __sum16
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, unsigned int len,
u_int8_t protocol, unsigned short family)
{
const struct nf_afinfo *afinfo;
__sum16 csum = 0;
rcu_read_lock();
afinfo = nf_get_afinfo(family);
if (afinfo)
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
protocol);
rcu_read_unlock();
return csum;
}
extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
# 1 "include/net/flow.h" 1
# 13 "include/net/flow.h"
struct flowi {
int oif;
int iif;
__u32 mark;
union {
struct {
__be32 daddr;
__be32 saddr;
__u8 tos;
__u8 scope;
} ip4_u;
struct {
struct in6_addr daddr;
struct in6_addr saddr;
__be32 flowlabel;
} ip6_u;
struct {
__le16 daddr;
__le16 saddr;
__u8 scope;
} dn_u;
} nl_u;
# 49 "include/net/flow.h"
__u8 proto;
__u8 flags;
union {
struct {
__be16 sport;
__be16 dport;
} ports;
struct {
__u8 type;
__u8 code;
} icmpt;
struct {
__le16 sport;
__le16 dport;
} dnports;
__be32 spi;
struct {
__u8 type;
} mht;
} uli_u;
__u32 secid;
} __attribute__((__aligned__(32/8)));
struct net;
struct sock;
struct flow_cache_ops;
struct flow_cache_object {
const struct flow_cache_ops *ops;
};
struct flow_cache_ops {
struct flow_cache_object *(*get)(struct flow_cache_object *);
int (*check)(struct flow_cache_object *);
void (*delete)(struct flow_cache_object *);
};
typedef struct flow_cache_object *(*flow_resolve_t)(
struct net *net, struct flowi *key, u16 family,
u8 dir, struct flow_cache_object *oldobj, void *ctx);
extern struct flow_cache_object *flow_cache_lookup(
struct net *net, struct flowi *key, u16 family,
u8 dir, flow_resolve_t resolver, void *ctx);
extern void flow_cache_flush(void);
extern atomic_t flow_cache_genid;
static inline __attribute__((always_inline)) int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
{
return (fl1->proto == fl2->proto &&
!memcmp(&fl1->uli_u, &fl2->uli_u, sizeof(fl1->uli_u)));
}
# 310 "include/linux/netfilter.h" 2
extern void (*ip_nat_decode_session)(struct sk_buff *, struct flowi *);
static inline __attribute__((always_inline)) void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
void (*decodefn)(struct sk_buff *, struct flowi *);
if (family == 2) {
rcu_read_lock();
decodefn = ({ typeof(ip_nat_decode_session) _________p1 = (*(volatile typeof(ip_nat_decode_session) *)&(ip_nat_decode_session)); do { } while(0); (_________p1); });
if (decodefn)
decodefn(skb, fl);
rcu_read_unlock();
}
}
# 1 "include/linux/proc_fs.h" 1
# 1 "include/linux/fs.h" 1
# 9 "include/linux/fs.h"
# 1 "include/linux/limits.h" 1
# 10 "include/linux/fs.h" 2
# 1 "include/linux/blk_types.h" 1
# 12 "include/linux/blk_types.h"
struct bio_set;
struct bio;
struct bio_integrity_payload;
struct page;
struct block_device;
typedef void (bio_end_io_t) (struct bio *, int);
typedef void (bio_destructor_t) (struct bio *);
struct bio_vec {
struct page *bv_page;
unsigned int bv_len;
unsigned int bv_offset;
};
struct bio {
sector_t bi_sector;
struct bio *bi_next;
struct block_device *bi_bdev;
unsigned long bi_flags;
unsigned long bi_rw;
unsigned short bi_vcnt;
unsigned short bi_idx;
unsigned int bi_phys_segments;
unsigned int bi_size;
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
unsigned int bi_max_vecs;
unsigned int bi_comp_cpu;
atomic_t bi_cnt;
struct bio_vec *bi_io_vec;
bio_end_io_t *bi_end_io;
void *bi_private;
bio_destructor_t *bi_destructor;
struct bio_vec bi_inline_vecs[0];
};
# 117 "include/linux/blk_types.h"
enum rq_flag_bits {
__REQ_WRITE,
__REQ_FAILFAST_DEV,
__REQ_FAILFAST_TRANSPORT,
__REQ_FAILFAST_DRIVER,
__REQ_HARDBARRIER,
__REQ_SYNC,
__REQ_META,
__REQ_DISCARD,
__REQ_NOIDLE,
__REQ_UNPLUG,
__REQ_RAHEAD,
__REQ_SORTED,
__REQ_SOFTBARRIER,
__REQ_FUA,
__REQ_NOMERGE,
__REQ_STARTED,
__REQ_DONTPREP,
__REQ_QUEUED,
__REQ_ELVPRIV,
__REQ_FAILED,
__REQ_QUIET,
__REQ_PREEMPT,
__REQ_ORDERED_COLOR,
__REQ_ALLOCED,
__REQ_COPY_USER,
__REQ_INTEGRITY,
__REQ_FLUSH,
__REQ_IO_STAT,
__REQ_MIXED_MERGE,
__REQ_SECURE,
__REQ_NR_BITS,
};
# 12 "include/linux/fs.h" 2
# 36 "include/linux/fs.h"
struct files_stat_struct {
int nr_files;
int nr_free_files;
int max_files;
};
struct inodes_stat_t {
int nr_inodes;
int nr_unused;
int dummy[5];
};
# 371 "include/linux/fs.h"
# 1 "include/linux/kdev_t.h" 1
# 21 "include/linux/kdev_t.h"
static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256;
}
static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev)
{
return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1)));
}
static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val)
{
return ((((val >> 8) & 255) << 20) | (val & 255));
}
static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev)
{
return 1;
}
static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev)
{
unsigned major = ((unsigned int) ((dev) >> 20));
unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1)));
return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
}
static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev)
{
unsigned major = (dev & 0xfff00) >> 8;
unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return (((major) << 20) | (minor));
}
static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev)
{
return 1;
}
static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev)
{
return new_encode_dev(dev);
}
static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev)
{
return new_decode_dev(dev);
}
static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev)
{
return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18);
}
static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev)
{
return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18);
}
static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev)
{
return (dev >> 18) & 0x3fff;
}
static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev)
{
return dev & 0x3ffff;
}
# 372 "include/linux/fs.h" 2
# 1 "include/linux/dcache.h" 1
# 11 "include/linux/dcache.h"
struct nameidata;
struct path;
struct vfsmount;
# 33 "include/linux/dcache.h"
struct qstr {
unsigned int hash;
unsigned int len;
const unsigned char *name;
};
struct dentry_stat_t {
int nr_dentry;
int nr_unused;
int age_limit;
int want_pages;
int dummy[2];
};
extern struct dentry_stat_t dentry_stat;
static inline __attribute__((always_inline)) unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash)
{
return (unsigned int) hash;
}
static inline __attribute__((always_inline)) unsigned int
full_name_hash(const unsigned char *name, unsigned int len)
{
unsigned long hash = 0;
while (len--)
hash = partial_name_hash(*name++, hash);
return end_name_hash(hash);
}
# 89 "include/linux/dcache.h"
struct dentry {
atomic_t d_count;
unsigned int d_flags;
spinlock_t d_lock;
int d_mounted;
struct inode *d_inode;
struct hlist_node d_hash;
struct dentry *d_parent;
struct qstr d_name;
struct list_head d_lru;
union {
struct list_head d_child;
struct rcu_head d_rcu;
} d_u;
struct list_head d_subdirs;
struct list_head d_alias;
unsigned long d_time;
const struct dentry_operations *d_op;
struct super_block *d_sb;
void *d_fsdata;
unsigned char d_iname[40];
};
enum dentry_d_lock_class
{
DENTRY_D_LOCK_NORMAL,
DENTRY_D_LOCK_NESTED
};
struct dentry_operations {
int (*d_revalidate)(struct dentry *, struct nameidata *);
int (*d_hash) (struct dentry *, struct qstr *);
int (*d_compare) (struct dentry *, struct qstr *, struct qstr *);
int (*d_delete)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
char *(*d_dname)(struct dentry *, char *, int);
};
# 191 "include/linux/dcache.h"
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
# 210 "include/linux/dcache.h"
static inline __attribute__((always_inline)) void __d_drop(struct dentry *dentry)
{
if (!(dentry->d_flags & 0x0010)) {
dentry->d_flags |= 0x0010;
hlist_del_rcu(&dentry->d_hash);
}
}
static inline __attribute__((always_inline)) void d_drop(struct dentry *dentry)
{
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
}
static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry)
{
return dentry->d_name.name != dentry->d_iname;
}
extern void d_instantiate(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
extern void d_delete(struct dentry *);
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
extern struct dentry * d_obtain_alias(struct inode *);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern void shrink_dcache_for_umount(struct super_block *);
extern int d_invalidate(struct dentry *);
extern struct dentry * d_alloc_root(struct inode *);
extern void d_genocide(struct dentry *);
extern struct dentry *d_find_alias(struct inode *);
extern void d_prune_aliases(struct inode *);
extern int have_submounts(struct dentry *);
extern void d_rehash(struct dentry *);
# 276 "include/linux/dcache.h"
static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode)
{
d_instantiate(entry, inode);
d_rehash(entry);
}
# 290 "include/linux/dcache.h"
static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *res;
res = d_instantiate_unique(entry, inode);
d_rehash(res != ((void *)0) ? res : entry);
return res;
}
extern void d_move(struct dentry *, struct dentry *);
extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
extern struct dentry * d_lookup(struct dentry *, struct qstr *);
extern struct dentry * __d_lookup(struct dentry *, struct qstr *);
extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
extern int d_validate(struct dentry *, struct dentry *);
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *__d_path(const struct path *path, struct path *root, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *__dentry_path(struct dentry *, char *, int);
extern char *dentry_path(struct dentry *, char *, int);
# 337 "include/linux/dcache.h"
static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry)
{
if (dentry) {
__BUG_ON((unsigned long)(!(*(volatile int *)&(&dentry->d_count)->counter)));
atomic_add(1, (&dentry->d_count));
}
return dentry;
}
extern struct dentry * dget_locked(struct dentry *);
# 355 "include/linux/dcache.h"
static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry)
{
return (dentry->d_flags & 0x0010);
}
static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry)
{
return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent);
}
static inline __attribute__((always_inline)) int cant_mount(struct dentry *dentry)
{
return (dentry->d_flags & 0x0100);
}
static inline __attribute__((always_inline)) void dont_mount(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
dentry->d_flags |= 0x0100;
spin_unlock(&dentry->d_lock);
}
static inline __attribute__((always_inline)) struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
spin_lock(&dentry->d_lock);
ret = dget(dentry->d_parent);
spin_unlock(&dentry->d_lock);
return ret;
}
extern void dput(struct dentry *);
static inline __attribute__((always_inline)) int d_mountpoint(struct dentry *dentry)
{
return dentry->d_mounted;
}
extern struct vfsmount *lookup_mnt(struct path *);
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
extern int sysctl_vfs_cache_pressure;
# 373 "include/linux/fs.h" 2
# 1 "include/linux/path.h" 1
struct dentry;
struct vfsmount;
struct path {
struct vfsmount *mnt;
struct dentry *dentry;
};
extern void path_get(struct path *);
extern void path_put(struct path *);
static inline __attribute__((always_inline)) int path_equal(const struct path *path1, const struct path *path2)
{
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
}
# 374 "include/linux/fs.h" 2
# 1 "include/linux/radix-tree.h" 1
# 41 "include/linux/radix-tree.h"
static inline __attribute__((always_inline)) void *radix_tree_ptr_to_indirect(void *ptr)
{
return (void *)((unsigned long)ptr | 1);
}
static inline __attribute__((always_inline)) void *radix_tree_indirect_to_ptr(void *ptr)
{
return (void *)((unsigned long)ptr & ~1);
}
static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr)
{
return (int)((unsigned long)ptr & 1);
}
struct radix_tree_root {
unsigned int height;
gfp_t gfp_mask;
struct radix_tree_node *rnode;
};
# 144 "include/linux/radix-tree.h"
static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot)
{
void *ret = ({ typeof(*pslot) _________p1 = (*(volatile typeof(*pslot) *)&(*pslot)); do { } while(0); (_________p1); });
if (__builtin_expect(!!(radix_tree_is_indirect_ptr(ret)), 0))
ret = ((void *)-1UL);
return ret;
}
# 159 "include/linux/radix-tree.h"
static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item)
{
__BUG_ON((unsigned long)(radix_tree_is_indirect_ptr(item)));
({ if (!__builtin_constant_p(item) || ((item) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (*pslot) = (item); });
}
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items);
unsigned int
radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items);
unsigned long radix_tree_next_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
unsigned long index, unsigned long max_scan);
int radix_tree_preload(gfp_t gfp_mask);
void radix_tree_init(void);
void *radix_tree_tag_set(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
void *radix_tree_tag_clear(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
int radix_tree_tag_get(struct radix_tree_root *root,
unsigned long index, unsigned int tag);
unsigned int
radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
unsigned int
radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
unsigned long first_index, unsigned int max_items,
unsigned int tag);
unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
unsigned long *first_indexp, unsigned long last_index,
unsigned long nr_to_tag,
unsigned int fromtag, unsigned int totag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
static inline __attribute__((always_inline)) void radix_tree_preload_end(void)
{
do { } while (0);
}
# 379 "include/linux/fs.h" 2
# 1 "include/linux/pid.h" 1
enum pid_type
{
PIDTYPE_PID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX
};
# 50 "include/linux/pid.h"
struct upid {
int nr;
struct pid_namespace *ns;
struct hlist_node pid_chain;
};
struct pid
{
atomic_t count;
unsigned int level;
struct hlist_head tasks[PIDTYPE_MAX];
struct rcu_head rcu;
struct upid numbers[1];
};
extern struct pid init_struct_pid;
struct pid_link
{
struct hlist_node node;
struct pid *pid;
};
static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid)
{
if (pid)
atomic_add(1, (&pid->count));
return pid;
}
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
extern void attach_pid(struct task_struct *task, enum pid_type type,
struct pid *pid);
extern void detach_pid(struct task_struct *task, enum pid_type);
extern void change_pid(struct task_struct *task, enum pid_type,
struct pid *pid);
extern void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type);
struct pid_namespace;
extern struct pid_namespace init_pid_ns;
# 112 "include/linux/pid.h"
extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
extern struct pid *find_vpid(int nr);
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
int next_pidmap(struct pid_namespace *pid_ns, int last);
extern struct pid *alloc_pid(struct pid_namespace *ns);
extern void free_pid(struct pid *pid);
# 135 "include/linux/pid.h"
static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid)
{
struct pid_namespace *ns = ((void *)0);
if (pid)
ns = pid->numbers[pid->level].ns;
return ns;
}
# 154 "include/linux/pid.h"
static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid)
{
pid_t nr = 0;
if (pid)
nr = pid->numbers[0].nr;
return nr;
}
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
pid_t pid_vnr(struct pid *pid);
# 382 "include/linux/fs.h" 2
# 1 "include/linux/semaphore.h" 1
# 16 "include/linux/semaphore.h"
struct semaphore {
spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
# 38 "include/linux/semaphore.h"
static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val)
{
static struct lock_class_key __key;
*sem = (struct semaphore) { .lock = (spinlock_t ) { { .rlock = { .raw_lock = { .lock = 0 }, } } }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, };
do { (void)("semaphore->lock"); (void)(&__key); } while (0);
}
extern void down(struct semaphore *sem);
extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem);
extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem);
extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem);
extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies);
extern void up(struct semaphore *sem);
# 385 "include/linux/fs.h" 2
# 1 "include/linux/fiemap.h" 1
# 16 "include/linux/fiemap.h"
struct fiemap_extent {
__u64 fe_logical;
__u64 fe_physical;
__u64 fe_length;
__u64 fe_reserved64[2];
__u32 fe_flags;
__u32 fe_reserved[3];
};
struct fiemap {
__u64 fm_start;
__u64 fm_length;
__u32 fm_flags;
__u32 fm_mapped_extents;
__u32 fm_extent_count;
__u32 fm_reserved;
struct fiemap_extent fm_extents[0];
};
# 386 "include/linux/fs.h" 2
struct export_operations;
struct hd_geometry;
struct iovec;
struct nameidata;
struct kiocb;
struct pipe_inode_info;
struct poll_table_struct;
struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long);
extern struct files_stat_struct files_stat;
extern int get_max_files(void);
extern int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private, int ret,
int is_async);
# 450 "include/linux/fs.h"
struct iattr {
unsigned int ia_valid;
umode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
loff_t ia_size;
struct timespec ia_atime;
struct timespec ia_mtime;
struct timespec ia_ctime;
struct file *ia_file;
};
# 1 "include/linux/quota.h" 1
# 88 "include/linux/quota.h"
enum {
QIF_BLIMITS_B = 0,
QIF_SPACE_B,
QIF_ILIMITS_B,
QIF_INODES_B,
QIF_BTIME_B,
QIF_ITIME_B,
};
# 108 "include/linux/quota.h"
struct if_dqblk {
__u64 dqb_bhardlimit;
__u64 dqb_bsoftlimit;
__u64 dqb_curspace;
__u64 dqb_ihardlimit;
__u64 dqb_isoftlimit;
__u64 dqb_curinodes;
__u64 dqb_btime;
__u64 dqb_itime;
__u32 dqb_valid;
};
# 129 "include/linux/quota.h"
struct if_dqinfo {
__u64 dqi_bgrace;
__u64 dqi_igrace;
__u32 dqi_flags;
__u32 dqi_valid;
};
# 151 "include/linux/quota.h"
enum {
QUOTA_NL_C_UNSPEC,
QUOTA_NL_C_WARNING,
__QUOTA_NL_C_MAX,
};
enum {
QUOTA_NL_A_UNSPEC,
QUOTA_NL_A_QTYPE,
QUOTA_NL_A_EXCESS_ID,
QUOTA_NL_A_WARNING,
QUOTA_NL_A_DEV_MAJOR,
QUOTA_NL_A_DEV_MINOR,
QUOTA_NL_A_CAUSED_ID,
__QUOTA_NL_A_MAX,
};
# 177 "include/linux/quota.h"
# 1 "include/linux/percpu_counter.h" 1
# 18 "include/linux/percpu_counter.h"
struct percpu_counter {
spinlock_t lock;
s64 count;
s32 *counters;
};
extern int percpu_counter_batch;
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
struct lock_class_key *key);
# 39 "include/linux/percpu_counter.h"
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
__percpu_counter_add(fbc, amount, percpu_counter_batch);
}
static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}
static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc);
}
static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
s64 ret = fbc->count;
__asm__ __volatile__("": : :"memory");
if (ret >= 0)
return ret;
return 1;
}
# 148 "include/linux/percpu_counter.h"
static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, 1);
}
static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, -1);
}
static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add(fbc, -amount);
}
# 178 "include/linux/quota.h" 2
# 1 "include/linux/dqblk_xfs.h" 1
# 50 "include/linux/dqblk_xfs.h"
typedef struct fs_disk_quota {
__s8 d_version;
__s8 d_flags;
__u16 d_fieldmask;
__u32 d_id;
__u64 d_blk_hardlimit;
__u64 d_blk_softlimit;
__u64 d_ino_hardlimit;
__u64 d_ino_softlimit;
__u64 d_bcount;
__u64 d_icount;
__s32 d_itimer;
__s32 d_btimer;
__u16 d_iwarns;
__u16 d_bwarns;
__s32 d_padding2;
__u64 d_rtb_hardlimit;
__u64 d_rtb_softlimit;
__u64 d_rtbcount;
__s32 d_rtbtimer;
__u16 d_rtbwarns;
__s16 d_padding3;
char d_padding4[8];
} fs_disk_quota_t;
# 146 "include/linux/dqblk_xfs.h"
typedef struct fs_qfilestat {
__u64 qfs_ino;
__u64 qfs_nblks;
__u32 qfs_nextents;
} fs_qfilestat_t;
typedef struct fs_quota_stat {
__s8 qs_version;
__u16 qs_flags;
__s8 qs_pad;
fs_qfilestat_t qs_uquota;
fs_qfilestat_t qs_gquota;
__u32 qs_incoredqs;
__s32 qs_btimelimit;
__s32 qs_itimelimit;
__s32 qs_rtbtimelimit;
__u16 qs_bwarnlimit;
__u16 qs_iwarnlimit;
} fs_quota_stat_t;
# 180 "include/linux/quota.h" 2
# 1 "include/linux/dqblk_v1.h" 1
# 181 "include/linux/quota.h" 2
# 1 "include/linux/dqblk_v2.h" 1
# 1 "include/linux/dqblk_qtree.h" 1
# 17 "include/linux/dqblk_qtree.h"
struct dquot;
struct qtree_fmt_operations {
void (*mem2disk_dqblk)(void *disk, struct dquot *dquot);
void (*disk2mem_dqblk)(struct dquot *dquot, void *disk);
int (*is_id)(void *disk, struct dquot *dquot);
};
struct qtree_mem_dqinfo {
struct super_block *dqi_sb;
int dqi_type;
unsigned int dqi_blocks;
unsigned int dqi_free_blk;
unsigned int dqi_free_entry;
unsigned int dqi_blocksize_bits;
unsigned int dqi_entry_size;
unsigned int dqi_usable_bs;
unsigned int dqi_qtree_depth;
struct qtree_fmt_operations *dqi_ops;
};
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info)
{
unsigned int epb = info->dqi_usable_bs >> 2;
unsigned long long entries = epb;
int i;
for (i = 1; entries < (1ULL << 32); i++)
entries *= epb;
return i;
}
# 9 "include/linux/dqblk_v2.h" 2
# 182 "include/linux/quota.h" 2
typedef __kernel_uid32_t qid_t;
typedef long long qsize_t;
extern spinlock_t dq_data_lock;
# 200 "include/linux/quota.h"
struct mem_dqblk {
qsize_t dqb_bhardlimit;
qsize_t dqb_bsoftlimit;
qsize_t dqb_curspace;
qsize_t dqb_rsvspace;
qsize_t dqb_ihardlimit;
qsize_t dqb_isoftlimit;
qsize_t dqb_curinodes;
time_t dqb_btime;
time_t dqb_itime;
};
struct quota_format_type;
struct mem_dqinfo {
struct quota_format_type *dqi_format;
int dqi_fmt_id;
struct list_head dqi_dirty_list;
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
qsize_t dqi_maxblimit;
qsize_t dqi_maxilimit;
void *dqi_priv;
};
struct super_block;
extern void mark_info_dirty(struct super_block *sb, int type);
static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info)
{
return test_bit(16, &info->dqi_flags);
}
enum {
DQST_LOOKUPS,
DQST_DROPS,
DQST_READS,
DQST_WRITES,
DQST_CACHE_HITS,
DQST_ALLOC_DQUOTS,
DQST_FREE_DQUOTS,
DQST_SYNCS,
_DQST_DQSTAT_LAST
};
struct dqstats {
int stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
extern struct dqstats *dqstats_pcpu;
extern struct dqstats dqstats;
static inline __attribute__((always_inline)) void dqstats_inc(unsigned int type)
{
percpu_counter_inc(&dqstats.counter[type]);
}
static inline __attribute__((always_inline)) void dqstats_dec(unsigned int type)
{
percpu_counter_dec(&dqstats.counter[type]);
}
# 284 "include/linux/quota.h"
struct dquot {
struct hlist_node dq_hash;
struct list_head dq_inuse;
struct list_head dq_free;
struct list_head dq_dirty;
struct mutex dq_lock;
atomic_t dq_count;
wait_queue_head_t dq_wait_unused;
struct super_block *dq_sb;
unsigned int dq_id;
loff_t dq_off;
unsigned long dq_flags;
short dq_type;
struct mem_dqblk dq_dqb;
};
struct quota_format_ops {
int (*check_quota_file)(struct super_block *sb, int type);
int (*read_file_info)(struct super_block *sb, int type);
int (*write_file_info)(struct super_block *sb, int type);
int (*free_file_info)(struct super_block *sb, int type);
int (*read_dqblk)(struct dquot *dquot);
int (*commit_dqblk)(struct dquot *dquot);
int (*release_dqblk)(struct dquot *dquot);
};
struct dquot_operations {
int (*write_dquot) (struct dquot *);
struct dquot *(*alloc_dquot)(struct super_block *, int);
void (*destroy_dquot)(struct dquot *);
int (*acquire_dquot) (struct dquot *);
int (*release_dquot) (struct dquot *);
int (*mark_dirty) (struct dquot *);
int (*write_info) (struct super_block *, int);
qsize_t *(*get_reserved_space) (struct inode *);
};
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, char *);
int (*quota_off)(struct super_block *, int);
int (*quota_sync)(struct super_block *, int, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
int (*set_xstate)(struct super_block *, unsigned int, int);
};
struct quota_format_type {
int qf_fmt_id;
const struct quota_format_ops *qf_ops;
struct module *qf_owner;
struct quota_format_type *qf_next;
};
enum {
_DQUOT_USAGE_ENABLED = 0,
_DQUOT_LIMITS_ENABLED,
_DQUOT_SUSPENDED,
_DQUOT_STATE_FLAGS
};
# 371 "include/linux/quota.h"
static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type)
{
return flags << _DQUOT_STATE_FLAGS * type;
}
static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type)
{
return (flags >> _DQUOT_STATE_FLAGS * type) & ((1 << _DQUOT_USAGE_ENABLED) | (1 << _DQUOT_LIMITS_ENABLED) | (1 << _DQUOT_SUSPENDED));
}
static inline __attribute__((always_inline)) void quota_send_warning(short type, unsigned int id, dev_t dev,
const char warntype)
{
return;
}
struct quota_info {
unsigned int flags;
struct mutex dqio_mutex;
struct mutex dqonoff_mutex;
struct rw_semaphore dqptr_sem;
struct inode *files[2];
struct mem_dqinfo info[2];
const struct quota_format_ops *ops[2];
};
int register_quota_format(struct quota_format_type *fmt);
void unregister_quota_format(struct quota_format_type *fmt);
struct quota_module_name {
int qm_fmt_id;
char *qm_mod_name;
};
# 472 "include/linux/fs.h" 2
# 499 "include/linux/fs.h"
enum positive_aop_returns {
AOP_WRITEPAGE_ACTIVATE = 0x80000,
AOP_TRUNCATED_PAGE = 0x80001,
};
# 513 "include/linux/fs.h"
struct page;
struct address_space;
struct writeback_control;
struct iov_iter {
const struct iovec *iov;
unsigned long nr_segs;
size_t iov_offset;
size_t count;
};
size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
size_t iov_iter_copy_from_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
size_t iov_iter_single_seg_count(struct iov_iter *i);
static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i,
const struct iovec *iov, unsigned long nr_segs,
size_t count, size_t written)
{
i->iov = iov;
i->nr_segs = nr_segs;
i->iov_offset = 0;
i->count = count + written;
iov_iter_advance(i, written);
}
static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i)
{
return i->count;
}
# 558 "include/linux/fs.h"
typedef struct {
size_t written;
size_t count;
union {
char *buf;
void *data;
} arg;
int error;
} read_descriptor_t;
typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
unsigned long, unsigned long);
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*readpage)(struct file *, struct page *);
void (*sync_page)(struct page *);
int (*writepages)(struct address_space *, struct writeback_control *);
int (*set_page_dirty)(struct page *page);
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
int (*write_begin)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
int (*write_end)(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
sector_t (*bmap)(struct address_space *, sector_t);
void (*invalidatepage) (struct page *, unsigned long);
int (*releasepage) (struct page *, gfp_t);
ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
int (*get_xip_mem)(struct address_space *, unsigned long, int,
void **, unsigned long *);
int (*migratepage) (struct address_space *,
struct page *, struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
};
int pagecache_write_begin(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
int pagecache_write_end(struct file *, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
struct backing_dev_info;
struct address_space {
struct inode *host;
struct radix_tree_root page_tree;
spinlock_t tree_lock;
unsigned int i_mmap_writable;
struct prio_tree_root i_mmap;
struct list_head i_mmap_nonlinear;
spinlock_t i_mmap_lock;
unsigned int truncate_count;
unsigned long nrpages;
unsigned long writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
struct backing_dev_info *backing_dev_info;
spinlock_t private_lock;
struct list_head private_list;
struct address_space *assoc_mapping;
} __attribute__((aligned(sizeof(long))));
struct block_device {
dev_t bd_dev;
struct inode * bd_inode;
struct super_block * bd_super;
int bd_openers;
struct mutex bd_mutex;
struct list_head bd_inodes;
void * bd_claiming;
void * bd_holder;
int bd_holders;
struct block_device * bd_contains;
unsigned bd_block_size;
struct hd_struct * bd_part;
unsigned bd_part_count;
int bd_invalidated;
struct gendisk * bd_disk;
struct list_head bd_list;
unsigned long bd_private;
int bd_fsfreeze_count;
struct mutex bd_fsfreeze_mutex;
};
# 689 "include/linux/fs.h"
int mapping_tagged(struct address_space *mapping, int tag);
static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping)
{
return !prio_tree_empty(&mapping->i_mmap) ||
!list_empty(&mapping->i_mmap_nonlinear);
}
static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping)
{
return mapping->i_mmap_writable != 0;
}
# 722 "include/linux/fs.h"
struct posix_acl;
struct inode {
struct hlist_node i_hash;
struct list_head i_list;
struct list_head i_sb_list;
struct list_head i_dentry;
unsigned long i_ino;
atomic_t i_count;
unsigned int i_nlink;
uid_t i_uid;
gid_t i_gid;
dev_t i_rdev;
unsigned int i_blkbits;
u64 i_version;
loff_t i_size;
seqcount_t i_size_seqcount;
struct timespec i_atime;
struct timespec i_mtime;
struct timespec i_ctime;
blkcnt_t i_blocks;
unsigned short i_bytes;
umode_t i_mode;
spinlock_t i_lock;
struct mutex i_mutex;
struct rw_semaphore i_alloc_sem;
const struct inode_operations *i_op;
const struct file_operations *i_fop;
struct super_block *i_sb;
struct file_lock *i_flock;
struct address_space *i_mapping;
struct address_space i_data;
struct list_head i_devices;
union {
struct pipe_inode_info *i_pipe;
struct block_device *i_bdev;
struct cdev *i_cdev;
};
__u32 i_generation;
__u32 i_fsnotify_mask;
struct hlist_head i_fsnotify_marks;
unsigned long i_state;
unsigned long dirtied_when;
unsigned int i_flags;
atomic_t i_writecount;
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
void *i_private;
};
# 801 "include/linux/fs.h"
enum inode_i_mutex_lock_class
{
I_MUTEX_NORMAL,
I_MUTEX_PARENT,
I_MUTEX_CHILD,
I_MUTEX_XATTR,
I_MUTEX_QUOTA
};
# 820 "include/linux/fs.h"
static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode)
{
loff_t i_size;
unsigned int seq;
do {
seq = read_seqcount_begin(&inode->i_size_seqcount);
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
# 841 "include/linux/fs.h"
}
static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size)
{
write_seqcount_begin(&inode->i_size_seqcount);
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
}
static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode)
{
return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1)));
}
static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode)
{
return ((unsigned int) ((inode->i_rdev) >> 20));
}
extern struct block_device *I_BDEV(struct inode *inode);
struct fown_struct {
rwlock_t lock;
struct pid *pid;
enum pid_type pid_type;
uid_t uid, euid;
int signum;
};
struct file_ra_state {
unsigned long start;
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
unsigned int mmap_miss;
loff_t prev_pos;
};
static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index)
{
return (index >= ra->start &&
index < ra->start + ra->size);
}
struct file {
union {
struct list_head fu_list;
struct rcu_head fu_rcuhead;
} f_u;
struct path f_path;
const struct file_operations *f_op;
spinlock_t f_lock;
int f_sb_list_cpu;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
loff_t f_pos;
struct fown_struct f_owner;
const struct cred *f_cred;
struct file_ra_state f_ra;
u64 f_version;
void *private_data;
struct list_head f_ep_links;
struct address_space *f_mapping;
};
# 988 "include/linux/fs.h"
static inline __attribute__((always_inline)) void file_take_write(struct file *filp) {}
static inline __attribute__((always_inline)) void file_release_write(struct file *filp) {}
static inline __attribute__((always_inline)) void file_reset_write(struct file *filp) {}
static inline __attribute__((always_inline)) void file_check_state(struct file *filp) {}
static inline __attribute__((always_inline)) int file_check_writeable(struct file *filp)
{
return 0;
}
# 1029 "include/linux/fs.h"
typedef struct files_struct *fl_owner_t;
struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
};
struct lock_manager_operations {
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *);
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *);
int (*fl_mylease)(struct file_lock *, struct file_lock *);
int (*fl_change)(struct file_lock **, int);
};
struct lock_manager {
struct list_head list;
};
void locks_start_grace(struct lock_manager *);
void locks_end_grace(struct lock_manager *);
int locks_in_grace(void);
# 1 "include/linux/nfs_fs_i.h" 1
# 1 "include/linux/nfs.h" 1
# 40 "include/linux/nfs.h"
enum nfs_stat {
NFS_OK = 0,
NFSERR_PERM = 1,
NFSERR_NOENT = 2,
NFSERR_IO = 5,
NFSERR_NXIO = 6,
NFSERR_EAGAIN = 11,
NFSERR_ACCES = 13,
NFSERR_EXIST = 17,
NFSERR_XDEV = 18,
NFSERR_NODEV = 19,
NFSERR_NOTDIR = 20,
NFSERR_ISDIR = 21,
NFSERR_INVAL = 22,
NFSERR_FBIG = 27,
NFSERR_NOSPC = 28,
NFSERR_ROFS = 30,
NFSERR_MLINK = 31,
NFSERR_OPNOTSUPP = 45,
NFSERR_NAMETOOLONG = 63,
NFSERR_NOTEMPTY = 66,
NFSERR_DQUOT = 69,
NFSERR_STALE = 70,
NFSERR_REMOTE = 71,
NFSERR_WFLUSH = 99,
NFSERR_BADHANDLE = 10001,
NFSERR_NOT_SYNC = 10002,
NFSERR_BAD_COOKIE = 10003,
NFSERR_NOTSUPP = 10004,
NFSERR_TOOSMALL = 10005,
NFSERR_SERVERFAULT = 10006,
NFSERR_BADTYPE = 10007,
NFSERR_JUKEBOX = 10008,
NFSERR_SAME = 10009,
NFSERR_DENIED = 10010,
NFSERR_EXPIRED = 10011,
NFSERR_LOCKED = 10012,
NFSERR_GRACE = 10013,
NFSERR_FHEXPIRED = 10014,
NFSERR_SHARE_DENIED = 10015,
NFSERR_WRONGSEC = 10016,
NFSERR_CLID_INUSE = 10017,
NFSERR_RESOURCE = 10018,
NFSERR_MOVED = 10019,
NFSERR_NOFILEHANDLE = 10020,
NFSERR_MINOR_VERS_MISMATCH = 10021,
NFSERR_STALE_CLIENTID = 10022,
NFSERR_STALE_STATEID = 10023,
NFSERR_OLD_STATEID = 10024,
NFSERR_BAD_STATEID = 10025,
NFSERR_BAD_SEQID = 10026,
NFSERR_NOT_SAME = 10027,
NFSERR_LOCK_RANGE = 10028,
NFSERR_SYMLINK = 10029,
NFSERR_RESTOREFH = 10030,
NFSERR_LEASE_MOVED = 10031,
NFSERR_ATTRNOTSUPP = 10032,
NFSERR_NO_GRACE = 10033,
NFSERR_RECLAIM_BAD = 10034,
NFSERR_RECLAIM_CONFLICT = 10035,
NFSERR_BAD_XDR = 10036,
NFSERR_LOCKS_HELD = 10037,
NFSERR_OPENMODE = 10038,
NFSERR_BADOWNER = 10039,
NFSERR_BADCHAR = 10040,
NFSERR_BADNAME = 10041,
NFSERR_BAD_RANGE = 10042,
NFSERR_LOCK_NOTSUPP = 10043,
NFSERR_OP_ILLEGAL = 10044,
NFSERR_DEADLOCK = 10045,
NFSERR_FILE_OPEN = 10046,
NFSERR_ADMIN_REVOKED = 10047,
NFSERR_CB_PATH_DOWN = 10048,
};
enum nfs_ftype {
NFNON = 0,
NFREG = 1,
NFDIR = 2,
NFBLK = 3,
NFCHR = 4,
NFLNK = 5,
NFSOCK = 6,
NFBAD = 7,
NFFIFO = 8
};
# 1 "include/linux/sunrpc/msg_prot.h" 1
# 18 "include/linux/sunrpc/msg_prot.h"
typedef u32 rpc_authflavor_t;
enum rpc_auth_flavors {
RPC_AUTH_NULL = 0,
RPC_AUTH_UNIX = 1,
RPC_AUTH_SHORT = 2,
RPC_AUTH_DES = 3,
RPC_AUTH_KRB = 4,
RPC_AUTH_GSS = 6,
RPC_AUTH_MAXFLAVOR = 8,
RPC_AUTH_GSS_KRB5 = 390003,
RPC_AUTH_GSS_KRB5I = 390004,
RPC_AUTH_GSS_KRB5P = 390005,
RPC_AUTH_GSS_LKEY = 390006,
RPC_AUTH_GSS_LKEYI = 390007,
RPC_AUTH_GSS_LKEYP = 390008,
RPC_AUTH_GSS_SPKM = 390009,
RPC_AUTH_GSS_SPKMI = 390010,
RPC_AUTH_GSS_SPKMP = 390011,
};
enum rpc_msg_type {
RPC_CALL = 0,
RPC_REPLY = 1
};
enum rpc_reply_stat {
RPC_MSG_ACCEPTED = 0,
RPC_MSG_DENIED = 1
};
enum rpc_accept_stat {
RPC_SUCCESS = 0,
RPC_PROG_UNAVAIL = 1,
RPC_PROG_MISMATCH = 2,
RPC_PROC_UNAVAIL = 3,
RPC_GARBAGE_ARGS = 4,
RPC_SYSTEM_ERR = 5,
RPC_DROP_REPLY = 60000,
};
enum rpc_reject_stat {
RPC_MISMATCH = 0,
RPC_AUTH_ERROR = 1
};
enum rpc_auth_stat {
RPC_AUTH_OK = 0,
RPC_AUTH_BADCRED = 1,
RPC_AUTH_REJECTEDCRED = 2,
RPC_AUTH_BADVERF = 3,
RPC_AUTH_REJECTEDVERF = 4,
RPC_AUTH_TOOWEAK = 5,
RPCSEC_GSS_CREDPROBLEM = 13,
RPCSEC_GSS_CTXPROBLEM = 14
};
# 102 "include/linux/sunrpc/msg_prot.h"
typedef __be32 rpc_fraghdr;
# 193 "include/linux/sunrpc/msg_prot.h"
# 1 "include/linux/inet.h" 1
# 54 "include/linux/inet.h"
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
# 194 "include/linux/sunrpc/msg_prot.h" 2
# 131 "include/linux/nfs.h" 2
struct nfs_fh {
unsigned short size;
unsigned char data[128];
};
static inline __attribute__((always_inline)) int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
{
return a->size != b->size || memcmp(a->data, b->data, a->size) != 0;
}
static inline __attribute__((always_inline)) void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
{
target->size = source->size;
memcpy(target->data, source->data, source->size);
}
# 165 "include/linux/nfs.h"
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
NFS_FILE_SYNC = 2
};
# 7 "include/linux/nfs_fs_i.h" 2
struct nlm_lockowner;
struct nfs_lock_info {
u32 state;
struct nlm_lockowner *owner;
struct list_head list;
};
struct nfs4_lock_state;
struct nfs4_lock_info {
struct nfs4_lock_state *owner;
};
# 1057 "include/linux/fs.h" 2
struct file_lock {
struct file_lock *fl_next;
struct list_head fl_link;
struct list_head fl_block;
fl_owner_t fl_owner;
unsigned char fl_flags;
unsigned char fl_type;
unsigned int fl_pid;
struct pid *fl_nspid;
wait_queue_head_t fl_wait;
struct file *fl_file;
loff_t fl_start;
loff_t fl_end;
struct fasync_struct * fl_fasync;
unsigned long fl_break_time;
const struct file_lock_operations *fl_ops;
const struct lock_manager_operations *fl_lmops;
union {
struct nfs_lock_info nfs_fl;
struct nfs4_lock_info nfs4_fl;
struct {
struct list_head link;
int state;
} afs;
} fl_u;
};
# 1100 "include/linux/fs.h"
extern void send_sigio(struct fown_struct *fown, int fd, int band);
extern int fcntl_getlk(struct file *, struct flock *);
extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
struct flock *);
extern int fcntl_getlk64(struct file *, struct flock64 *);
extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
struct flock64 *);
extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);
extern void locks_init_lock(struct file_lock *);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file *, struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags);
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **);
extern int vfs_setlease(struct file *, long, struct file_lock **);
extern int lease_modify(struct file_lock **, int);
extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
# 1283 "include/linux/fs.h"
struct fasync_struct {
spinlock_t fa_lock;
int magic;
int fa_fd;
struct fasync_struct *fa_next;
struct file *fa_file;
struct rcu_head fa_rcu;
};
extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
extern void kill_fasync(struct fasync_struct **, int, int);
extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
extern int f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
# 1315 "include/linux/fs.h"
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
struct super_block {
struct list_head s_list;
dev_t s_dev;
unsigned char s_dirt;
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes;
struct file_system_type *s_type;
const struct super_operations *s_op;
const struct dquot_operations *dq_op;
const struct quotactl_ops *s_qcop;
const struct export_operations *s_export_op;
unsigned long s_flags;
unsigned long s_magic;
struct dentry *s_root;
struct rw_semaphore s_umount;
struct mutex s_lock;
int s_count;
atomic_t s_active;
const struct xattr_handler **s_xattr;
struct list_head s_inodes;
struct hlist_head s_anon;
struct list_head *s_files;
struct list_head s_dentry_lru;
int s_nr_dentry_unused;
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct list_head s_instances;
struct quota_info s_dquot;
int s_frozen;
wait_queue_head_t s_wait_unfrozen;
char s_id[32];
void *s_fs_info;
fmode_t s_mode;
u32 s_time_gran;
struct mutex s_vfs_rename_mutex;
char *s_subtype;
char *s_options;
};
extern struct timespec current_fs_time(struct super_block *sb);
enum {
SB_UNFROZEN = 0,
SB_FREEZE_WRITE = 1,
SB_FREEZE_TRANS = 2,
};
# 1412 "include/linux/fs.h"
extern void lock_super(struct super_block *);
extern void unlock_super(struct super_block *);
extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
extern int vfs_mkdir(struct inode *, struct dentry *, int);
extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
extern int vfs_rmdir(struct inode *, struct dentry *);
extern int vfs_unlink(struct inode *, struct dentry *);
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
extern void dentry_unhash(struct dentry *dentry);
extern int file_permission(struct file *, int);
extern void inode_init_owner(struct inode *inode, const struct inode *dir,
mode_t mode);
struct fiemap_extent_info {
unsigned int fi_flags;
unsigned int fi_extents_mapped;
unsigned int fi_extents_max;
struct fiemap_extent *fi_extents_start;
};
int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
u64 phys, u64 len, u32 flags);
int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
# 1474 "include/linux/fs.h"
typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
struct block_device_operations;
# 1488 "include/linux/fs.h"
struct file_operations {
struct module *owner;
loff_t (*llseek) (struct file *, loff_t, int);
ssize_t (*read) (struct file *, char *, size_t, loff_t *);
ssize_t (*write) (struct file *, const char *, size_t, loff_t *);
ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
int (*readdir) (struct file *, void *, filldir_t);
unsigned int (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
int (*fsync) (struct file *, int datasync);
int (*aio_fsync) (struct kiocb *, int datasync);
int (*fasync) (int, struct file *, int);
int (*lock) (struct file *, int, struct file_lock *);
ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
int (*setlease)(struct file *, long, struct file_lock **);
};
struct inode_operations {
int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
int (*mkdir) (struct inode *,struct dentry *,int);
int (*rmdir) (struct inode *,struct dentry *);
int (*mknod) (struct inode *,struct dentry *,int,dev_t);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char *,int);
void * (*follow_link) (struct dentry *, struct nameidata *);
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int);
int (*check_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
long (*fallocate)(struct inode *inode, int mode, loff_t offset,
loff_t len);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
};
struct seq_file;
ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
struct iovec **ret_pointer);
extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec *,
unsigned long, loff_t *);
extern ssize_t vfs_writev(struct file *, const struct iovec *,
unsigned long, loff_t *);
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
int (*statfs) (struct dentry *, struct kstatfs *);
int (*remount_fs) (struct super_block *, int *, char *);
void (*umount_begin) (struct super_block *);
int (*show_options)(struct seq_file *, struct vfsmount *);
int (*show_stats)(struct seq_file *, struct vfsmount *);
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
};
# 1647 "include/linux/fs.h"
extern void __mark_inode_dirty(struct inode *, int);
static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode)
{
__mark_inode_dirty(inode, (1 | 2 | 4));
}
static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode)
{
__mark_inode_dirty(inode, 1);
}
# 1666 "include/linux/fs.h"
static inline __attribute__((always_inline)) void inc_nlink(struct inode *inode)
{
inode->i_nlink++;
}
static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode)
{
inc_nlink(inode);
mark_inode_dirty(inode);
}
# 1688 "include/linux/fs.h"
static inline __attribute__((always_inline)) void drop_nlink(struct inode *inode)
{
inode->i_nlink--;
}
# 1701 "include/linux/fs.h"
static inline __attribute__((always_inline)) void clear_nlink(struct inode *inode)
{
inode->i_nlink = 0;
}
static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode)
{
drop_nlink(inode);
mark_inode_dirty(inode);
}
# 1720 "include/linux/fs.h"
static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode)
{
spin_lock(&inode->i_lock);
inode->i_version++;
spin_unlock(&inode->i_lock);
}
extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
static inline __attribute__((always_inline)) void file_accessed(struct file *file)
{
if (!(file->f_flags & 01000000))
touch_atime(file->f_path.mnt, file->f_path.dentry);
}
int sync_inode(struct inode *inode, struct writeback_control *wbc);
struct file_system_type {
const char *name;
int fs_flags;
int (*get_sb) (struct file_system_type *, int,
const char *, void *, struct vfsmount *);
void (*kill_sb) (struct super_block *);
struct module *owner;
struct file_system_type * next;
struct list_head fs_supers;
struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key;
struct lock_class_key s_vfs_rename_key;
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
struct lock_class_key i_mutex_dir_key;
struct lock_class_key i_alloc_sem_key;
};
extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
extern int get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
extern int get_sb_single(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
extern int get_sb_nodev(struct file_system_type *fs_type,
int flags, void *data,
int (*fill_super)(struct super_block *, void *, int),
struct vfsmount *mnt);
void generic_shutdown_super(struct super_block *sb);
void kill_block_super(struct super_block *sb);
void kill_anon_super(struct super_block *sb);
void kill_litter_super(struct super_block *sb);
void deactivate_super(struct super_block *sb);
void deactivate_locked_super(struct super_block *sb);
int set_anon_super(struct super_block *s, void *data);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
void *data);
extern int get_sb_pseudo(struct file_system_type *, char *,
const struct super_operations *ops, unsigned long,
struct vfsmount *mnt);
extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
static inline __attribute__((always_inline)) void sb_mark_dirty(struct super_block *sb)
{
sb->s_dirt = 1;
}
static inline __attribute__((always_inline)) void sb_mark_clean(struct super_block *sb)
{
sb->s_dirt = 0;
}
static inline __attribute__((always_inline)) int sb_is_dirty(struct super_block *sb)
{
return sb->s_dirt;
}
extern int register_filesystem(struct file_system_type *);
extern int unregister_filesystem(struct file_system_type *);
extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
extern long do_mount(char *, char *, char *, unsigned long, void *);
extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
struct vfsmount *);
extern int vfs_statfs(struct path *, struct kstatfs *);
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern int current_umask(void);
extern struct kobject *fs_kobj;
extern int rw_verify_area(int, struct file *, loff_t *, size_t);
extern int locks_mandatory_locked(struct inode *);
extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino)
{
return (ino->i_mode & (0002000 | 00010)) == 0002000;
}
static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino)
{
return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino);
}
static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode)
{
if (mandatory_lock(inode))
return locks_mandatory_locked(inode);
return 0;
}
static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode,
struct file *filp,
loff_t size)
{
if (inode->i_flock && mandatory_lock(inode))
return locks_mandatory_area(
2, inode, filp,
size < inode->i_size ? size : inode->i_size,
(size < inode->i_size ? inode->i_size - size
: size - inode->i_size)
);
return 0;
}
static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode)
{
if (inode->i_flock)
return __break_lease(inode, mode);
return 0;
}
# 1926 "include/linux/fs.h"
extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
struct file *filp);
extern int do_fallocate(struct file *file, int mode, loff_t offset,
loff_t len);
extern long do_sys_open(int dfd, const char *filename, int flags,
int mode);
extern struct file *filp_open(const char *, int, int);
extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern char * getname(const char *);
extern int ioctl_preallocate(struct file *filp, void *argp);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long);
extern struct kmem_cache *names_cachep;
# 1958 "include/linux/fs.h"
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern struct block_device *open_by_devnum(dev_t, fmode_t);
extern void invalidate_bdev(struct block_device *);
extern int sync_blockdev(struct block_device *bdev);
extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
# 1987 "include/linux/fs.h"
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
extern const struct file_operations bad_sock_fops;
extern const struct file_operations def_fifo_fops;
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
extern int blkdev_get(struct block_device *, fmode_t);
extern int blkdev_put(struct block_device *, fmode_t);
extern int bd_claim(struct block_device *, void *);
extern void bd_release(struct block_device *);
# 2011 "include/linux/fs.h"
extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
extern int register_chrdev_region(dev_t, unsigned, const char *);
extern int __register_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name,
const struct file_operations *fops);
extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
unsigned int count, const char *name);
extern void unregister_chrdev_region(dev_t, unsigned);
extern void chrdev_show(struct seq_file *,off_t);
static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name,
const struct file_operations *fops)
{
return __register_chrdev(major, 0, 256, name, fops);
}
static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name)
{
__unregister_chrdev(major, 0, 256, name);
}
extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
extern void close_bdev_exclusive(struct block_device *, fmode_t);
extern void blkdev_show(struct seq_file *,off_t);
extern void init_special_inode(struct inode *, umode_t, dev_t);
extern void make_bad_inode(struct inode *);
extern int is_bad_inode(struct inode *);
extern const struct file_operations read_pipefifo_fops;
extern const struct file_operations write_pipefifo_fops;
extern const struct file_operations rdwr_pipefifo_fops;
extern int fs_may_remount_ro(struct super_block *);
# 2072 "include/linux/fs.h"
extern void check_disk_size_change(struct gendisk *disk,
struct block_device *bdev);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int);
extern int invalidate_inodes(struct super_block *);
unsigned long invalidate_mapping_pages(struct address_space *mapping,
unsigned long start, unsigned long end);
static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode)
{
if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) ||
(((inode->i_mode) & 00170000) == 0120000))
invalidate_mapping_pages(inode->i_mapping, 0, -1);
}
extern int invalidate_inode_pages2(struct address_space *mapping);
extern int invalidate_inode_pages2_range(struct address_space *mapping,
unsigned long start, unsigned long end);
extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
extern int filemap_fdatawait(struct address_space *);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end, int sync_mode);
extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
extern void sync_supers(void);
extern void emergency_sync(void);
extern void emergency_remount(void);
extern sector_t bmap(struct inode *, sector_t);
extern int notify_change(struct dentry *, struct iattr *);
extern int inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int,
int (*check_acl)(struct inode *, int));
static inline __attribute__((always_inline)) int execute_ok(struct inode *inode)
{
return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000);
}
extern int get_write_access(struct inode *);
extern int deny_write_access(struct file *);
static inline __attribute__((always_inline)) void put_write_access(struct inode * inode)
{
atomic_sub(1, (&inode->i_writecount));
}
static inline __attribute__((always_inline)) void allow_write_access(struct file *file)
{
if (file)
atomic_add(1, (&file->f_path.dentry->d_inode->i_writecount));
}
extern int do_pipe_flags(int *, int);
extern struct file *create_read_pipe(struct file *f, int flags);
extern struct file *create_write_pipe(int flags);
extern void free_write_pipe(struct file *);
extern struct file *do_filp_open(int dfd, const char *pathname,
int open_flag, int mode, int acc_mode);
extern int may_open(struct path *, int, int);
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern struct file * open_exec(const char *);
extern int is_subdir(struct dentry *, struct dentry *);
extern int path_is_under(struct path *, struct path *);
extern ino_t find_inode_number(struct dentry *, struct qstr *);
extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
extern int inode_init_always(struct super_block *, struct inode *);
extern void inode_init_once(struct inode *);
extern void inode_add_to_lists(struct super_block *, struct inode *);
extern void iput(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern int generic_delete_inode(struct inode *inode);
extern int generic_drop_inode(struct inode *inode);
extern struct inode *ilookup5_nowait(struct super_block *sb,
unsigned long hashval, int (*test)(struct inode *, void *),
void *data);
extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data);
extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
extern int insert_inode_locked(struct inode *);
extern void unlock_new_inode(struct inode *);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
extern void end_writeback(struct inode *);
extern void destroy_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
extern int should_remove_suid(struct dentry *);
extern int file_remove_suid(struct file *);
extern void __insert_inode_hash(struct inode *, unsigned long hashval);
extern void remove_inode_hash(struct inode *);
static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) {
__insert_inode_hash(inode, inode->i_ino);
}
extern void submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
extern int set_blocksize(struct block_device *, int);
extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int);
extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
loff_t *);
extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
unsigned long *, loff_t, loff_t *, size_t, size_t);
extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
unsigned long, loff_t, loff_t *, size_t, ssize_t);
extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
extern int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags);
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern int blkdev_fsync(struct file *filp, int datasync);
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t default_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
struct file *out, loff_t *, size_t len, unsigned int flags);
extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
size_t len, unsigned int flags);
extern void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
int origin);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
extern ssize_t xip_file_read(struct file *filp, char *buf, size_t len,
loff_t *ppos);
extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
extern ssize_t xip_file_write(struct file *filp, const char *buf,
size_t len, loff_t *ppos);
extern int xip_truncate_page(struct address_space *mapping, loff_t from);
# 2266 "include/linux/fs.h"
typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
loff_t file_offset);
enum {
DIO_LOCKING = 0x01,
DIO_SKIP_HOLES = 0x02,
};
void dio_end_io(struct bio *bio, int error);
ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags);
static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
struct inode *inode, struct block_device *bdev, const struct iovec *iov,
loff_t offset, unsigned long nr_segs, get_block_t get_block,
dio_iodone_t end_io)
{
return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
nr_segs, get_block, end_io, ((void *)0),
DIO_LOCKING | DIO_SKIP_HOLES);
}
extern const struct file_operations generic_ro_fops;
extern int vfs_readlink(struct dentry *, char *, int, const char *);
extern int vfs_follow_link(struct nameidata *, const char *);
extern int page_readlink(struct dentry *, char *, int);
extern void *page_follow_link_light(struct dentry *, struct nameidata *);
extern void page_put_link(struct dentry *, struct nameidata *, void *);
extern int __page_symlink(struct inode *inode, const char *symname, int len,
int nofs);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern int generic_readlink(struct dentry *, char *, int);
extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
extern int vfs_readdir(struct file *, filldir_t, void *);
extern int vfs_stat(const char *, struct kstat *);
extern int vfs_lstat(const char *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
extern int vfs_fstatat(int , const char *, struct kstat *, int);
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg);
extern int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
loff_t start, loff_t len,
get_block_t *get_block);
extern int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block);
extern void get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
extern struct file_system_type *get_fs_type(const char *name);
extern struct super_block *get_super(struct block_device *);
extern struct super_block *get_active_super(struct block_device *bdev);
extern struct super_block *user_get_super(dev_t);
extern void drop_super(struct super_block *sb);
extern void iterate_supers(void (*)(struct super_block *, void *), void *);
extern int dcache_dir_open(struct inode *, struct file *);
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, void *, filldir_t);
extern int simple_setattr(struct dentry *, struct iattr *);
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
extern int noop_fsync(struct file *, int);
extern int simple_empty(struct dentry *);
extern int simple_readpage(struct file *file, struct page *page);
extern int simple_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
extern int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *);
extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
struct tree_descr { char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void *to, size_t count,
loff_t *ppos, const void *from, size_t available);
extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
const void *from, size_t count);
extern int generic_file_fsync(struct file *, int);
# 2388 "include/linux/fs.h"
extern int inode_change_ok(const struct inode *, struct iattr *);
extern int inode_newsize_ok(const struct inode *, loff_t offset);
extern void setattr_copy(struct inode *inode, const struct iattr *attr);
extern void file_update_time(struct file *file);
extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
extern void save_mount_options(struct super_block *sb, char *options);
extern void replace_mount_options(struct super_block *sb, char *options);
static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry)
{
ino_t res;
spin_lock(&dentry->d_lock);
res = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lock);
return res;
}
struct simple_transaction_argresp {
ssize_t size;
char data[0];
};
char *simple_transaction_get(struct file *file, const char *buf,
size_t size);
ssize_t simple_transaction_read(struct file *file, char *buf,
size_t size, loff_t *pos);
int simple_transaction_release(struct inode *inode, struct file *file);
void simple_transaction_set(struct file *file, size_t n);
# 2459 "include/linux/fs.h"
static inline __attribute__((always_inline)) void __attribute__((format(printf, 1, 2)))
__simple_attr_check_format(const char *fmt, ...)
{
}
int simple_attr_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
const char *fmt);
int simple_attr_release(struct inode *inode, struct file *file);
ssize_t simple_attr_read(struct file *file, char *buf,
size_t len, loff_t *ppos);
ssize_t simple_attr_write(struct file *file, const char *buf,
size_t len, loff_t *ppos);
struct ctl_table;
int proc_nr_files(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf);
# 6 "include/linux/proc_fs.h" 2
# 1 "include/linux/magic.h" 1
# 8 "include/linux/proc_fs.h" 2
struct net;
struct completion;
struct mm_struct;
# 30 "include/linux/proc_fs.h"
enum {
PROC_ROOT_INO = 1,
};
# 46 "include/linux/proc_fs.h"
typedef int (read_proc_t)(char *page, char **start, off_t off,
int count, int *eof, void *data);
typedef int (write_proc_t)(struct file *file, const char *buffer,
unsigned long count, void *data);
struct proc_dir_entry {
unsigned int low_ino;
unsigned short namelen;
const char *name;
mode_t mode;
nlink_t nlink;
uid_t uid;
gid_t gid;
loff_t size;
const struct inode_operations *proc_iops;
# 69 "include/linux/proc_fs.h"
const struct file_operations *proc_fops;
struct proc_dir_entry *next, *parent, *subdir;
void *data;
read_proc_t *read_proc;
write_proc_t *write_proc;
atomic_t count;
int pde_users;
spinlock_t pde_unload_lock;
struct completion *pde_unload_completion;
struct list_head pde_openers;
};
enum kcore_type {
KCORE_TEXT,
KCORE_VMALLOC,
KCORE_RAM,
KCORE_VMEMMAP,
KCORE_OTHER,
};
struct kcore_list {
struct list_head list;
unsigned long addr;
size_t size;
int type;
};
struct vmcore {
struct list_head list;
unsigned long long paddr;
unsigned long long size;
loff_t offset;
};
extern void proc_root_init(void);
void proc_flush_task(struct task_struct *task);
extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent);
struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
struct proc_dir_entry *parent,
const struct file_operations *proc_fops,
void *data);
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
struct pid_namespace;
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
struct tty_driver;
extern void proc_tty_init(void);
extern void proc_tty_register_driver(struct tty_driver *driver);
extern void proc_tty_unregister_driver(struct tty_driver *driver);
# 146 "include/linux/proc_fs.h"
extern struct proc_dir_entry *proc_symlink(const char *,
struct proc_dir_entry *, const char *);
extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
struct proc_dir_entry *parent);
static inline __attribute__((always_inline)) struct proc_dir_entry *proc_create(const char *name, mode_t mode,
struct proc_dir_entry *parent, const struct file_operations *proc_fops)
{
return proc_create_data(name, mode, parent, proc_fops, ((void *)0));
}
static inline __attribute__((always_inline)) struct proc_dir_entry *create_proc_read_entry(const char *name,
mode_t mode, struct proc_dir_entry *base,
read_proc_t *read_proc, void * data)
{
struct proc_dir_entry *res=create_proc_entry(name,mode,base);
if (res) {
res->read_proc=read_proc;
res->data=data;
}
return res;
}
extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
const char *name, mode_t mode, const struct file_operations *fops);
extern void proc_net_remove(struct net *net, const char *name);
extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
struct proc_dir_entry *parent);
extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
# 245 "include/linux/proc_fs.h"
static inline __attribute__((always_inline)) void
kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}
union proc_op {
int (*proc_get_link)(struct inode *, struct path *);
int (*proc_read)(struct task_struct *task, char *page);
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
struct task_struct *task);
};
struct ctl_table_header;
struct ctl_table;
struct proc_inode {
struct pid *pid;
int fd;
union proc_op op;
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
struct ctl_table *sysctl_entry;
struct inode vfs_inode;
};
static inline __attribute__((always_inline)) struct proc_inode *PROC_I(const struct inode *inode)
{
return ({ const typeof( ((struct proc_inode *)0)->vfs_inode ) *__mptr = (inode); (struct proc_inode *)( (char *)__mptr - __builtin_offsetof(struct proc_inode,vfs_inode) );});
}
static inline __attribute__((always_inline)) struct proc_dir_entry *PDE(const struct inode *inode)
{
return PROC_I(inode)->pde;
}
static inline __attribute__((always_inline)) struct net *PDE_NET(struct proc_dir_entry *pde)
{
return pde->parent->data;
}
struct proc_maps_private {
struct pid *pid;
struct task_struct *task;
struct vm_area_struct *tail_vma;
};
# 330 "include/linux/netfilter.h" 2
extern struct proc_dir_entry *proc_net_netfilter;
# 358 "include/linux/netfilter.h"
extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *);
# 6 "include/net/netns/x_tables.h" 2
struct ebt_table;
struct netns_xt {
struct list_head tables[NFPROTO_NUMPROTO];
struct ebt_table *broute_table;
struct ebt_table *frame_filter;
struct ebt_table *frame_nat;
};
# 19 "include/net/net_namespace.h" 2
# 1 "include/net/netns/conntrack.h" 1
# 1 "include/linux/list_nulls.h" 1
# 17 "include/linux/list_nulls.h"
struct hlist_nulls_head {
struct hlist_nulls_node *first;
};
struct hlist_nulls_node {
struct hlist_nulls_node *next, **pprev;
};
# 33 "include/linux/list_nulls.h"
static inline __attribute__((always_inline)) int is_a_nulls(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr & 1);
}
static inline __attribute__((always_inline)) unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
{
return ((unsigned long)ptr) >> 1;
}
static inline __attribute__((always_inline)) int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
static inline __attribute__((always_inline)) int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(h->first);
}
static inline __attribute__((always_inline)) void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;
n->next = first;
n->pprev = &h->first;
h->first = n;
if (!is_a_nulls(first))
first->pprev = &n->next;
}
static inline __attribute__((always_inline)) void __hlist_nulls_del(struct hlist_nulls_node *n)
{
struct hlist_nulls_node *next = n->next;
struct hlist_nulls_node **pprev = n->pprev;
*pprev = next;
if (!is_a_nulls(next))
next->pprev = pprev;
}
static inline __attribute__((always_inline)) void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
n->pprev = ((void *) 0x00200200 + 0);
}
# 6 "include/net/netns/conntrack.h" 2
struct ctl_table_header;
struct nf_conntrack_ecache;
struct netns_ct {
atomic_t count;
unsigned int expect_count;
unsigned int htable_size;
struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash;
struct hlist_head *expect_hash;
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat *stat;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
int sysctl_checksum;
unsigned int sysctl_log_invalid;
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
struct ctl_table_header *event_sysctl_header;
int hash_vmalloc;
int expect_vmalloc;
char *slabname;
};
# 21 "include/net/net_namespace.h" 2
# 1 "include/net/netns/xfrm.h" 1
# 1 "include/linux/xfrm.h" 1
# 13 "include/linux/xfrm.h"
typedef union {
__be32 a4;
__be32 a6[4];
} xfrm_address_t;
struct xfrm_id {
xfrm_address_t daddr;
__be32 spi;
__u8 proto;
};
struct xfrm_sec_ctx {
__u8 ctx_doi;
__u8 ctx_alg;
__u16 ctx_len;
__u32 ctx_sid;
char ctx_str[0];
};
# 46 "include/linux/xfrm.h"
struct xfrm_selector {
xfrm_address_t daddr;
xfrm_address_t saddr;
__be16 dport;
__be16 dport_mask;
__be16 sport;
__be16 sport_mask;
__u16 family;
__u8 prefixlen_d;
__u8 prefixlen_s;
__u8 proto;
int ifindex;
__kernel_uid32_t user;
};
struct xfrm_lifetime_cfg {
__u64 soft_byte_limit;
__u64 hard_byte_limit;
__u64 soft_packet_limit;
__u64 hard_packet_limit;
__u64 soft_add_expires_seconds;
__u64 hard_add_expires_seconds;
__u64 soft_use_expires_seconds;
__u64 hard_use_expires_seconds;
};
struct xfrm_lifetime_cur {
__u64 bytes;
__u64 packets;
__u64 add_time;
__u64 use_time;
};
struct xfrm_replay_state {
__u32 oseq;
__u32 seq;
__u32 bitmap;
};
struct xfrm_algo {
char alg_name[64];
unsigned int alg_key_len;
char alg_key[0];
};
struct xfrm_algo_auth {
char alg_name[64];
unsigned int alg_key_len;
unsigned int alg_trunc_len;
char alg_key[0];
};
struct xfrm_algo_aead {
char alg_name[64];
unsigned int alg_key_len;
unsigned int alg_icv_len;
char alg_key[0];
};
struct xfrm_stats {
__u32 replay_window;
__u32 replay;
__u32 integrity_failed;
};
enum {
XFRM_POLICY_TYPE_MAIN = 0,
XFRM_POLICY_TYPE_SUB = 1,
XFRM_POLICY_TYPE_MAX = 2,
XFRM_POLICY_TYPE_ANY = 255
};
enum {
XFRM_POLICY_IN = 0,
XFRM_POLICY_OUT = 1,
XFRM_POLICY_FWD = 2,
XFRM_POLICY_MASK = 3,
XFRM_POLICY_MAX = 3
};
enum {
XFRM_SHARE_ANY,
XFRM_SHARE_SESSION,
XFRM_SHARE_USER,
XFRM_SHARE_UNIQUE
};
# 143 "include/linux/xfrm.h"
enum {
XFRM_MSG_BASE = 0x10,
XFRM_MSG_NEWSA = 0x10,
XFRM_MSG_DELSA,
XFRM_MSG_GETSA,
XFRM_MSG_NEWPOLICY,
XFRM_MSG_DELPOLICY,
XFRM_MSG_GETPOLICY,
XFRM_MSG_ALLOCSPI,
XFRM_MSG_ACQUIRE,
XFRM_MSG_EXPIRE,
XFRM_MSG_UPDPOLICY,
XFRM_MSG_UPDSA,
XFRM_MSG_POLEXPIRE,
XFRM_MSG_FLUSHSA,
XFRM_MSG_FLUSHPOLICY,
XFRM_MSG_NEWAE,
XFRM_MSG_GETAE,
XFRM_MSG_REPORT,
XFRM_MSG_MIGRATE,
XFRM_MSG_NEWSADINFO,
XFRM_MSG_GETSADINFO,
XFRM_MSG_NEWSPDINFO,
XFRM_MSG_GETSPDINFO,
XFRM_MSG_MAPPING,
__XFRM_MSG_MAX
};
# 213 "include/linux/xfrm.h"
struct xfrm_user_sec_ctx {
__u16 len;
__u16 exttype;
__u8 ctx_alg;
__u8 ctx_doi;
__u16 ctx_len;
};
struct xfrm_user_tmpl {
struct xfrm_id id;
__u16 family;
xfrm_address_t saddr;
__u32 reqid;
__u8 mode;
__u8 share;
__u8 optional;
__u32 aalgos;
__u32 ealgos;
__u32 calgos;
};
struct xfrm_encap_tmpl {
__u16 encap_type;
__be16 encap_sport;
__be16 encap_dport;
xfrm_address_t encap_oa;
};
enum xfrm_ae_ftype_t {
XFRM_AE_UNSPEC,
XFRM_AE_RTHR=1,
XFRM_AE_RVAL=2,
XFRM_AE_LVAL=4,
XFRM_AE_ETHR=8,
XFRM_AE_CR=16,
XFRM_AE_CE=32,
XFRM_AE_CU=64,
__XFRM_AE_MAX
};
struct xfrm_userpolicy_type {
__u8 type;
__u16 reserved1;
__u8 reserved2;
};
enum xfrm_attr_type_t {
XFRMA_UNSPEC,
XFRMA_ALG_AUTH,
XFRMA_ALG_CRYPT,
XFRMA_ALG_COMP,
XFRMA_ENCAP,
XFRMA_TMPL,
XFRMA_SA,
XFRMA_POLICY,
XFRMA_SEC_CTX,
XFRMA_LTIME_VAL,
XFRMA_REPLAY_VAL,
XFRMA_REPLAY_THRESH,
XFRMA_ETIMER_THRESH,
XFRMA_SRCADDR,
XFRMA_COADDR,
XFRMA_LASTUSED,
XFRMA_POLICY_TYPE,
XFRMA_MIGRATE,
XFRMA_ALG_AEAD,
XFRMA_KMADDRESS,
XFRMA_ALG_AUTH_TRUNC,
XFRMA_MARK,
__XFRMA_MAX
};
struct xfrm_mark {
__u32 v;
__u32 m;
};
enum xfrm_sadattr_type_t {
XFRMA_SAD_UNSPEC,
XFRMA_SAD_CNT,
XFRMA_SAD_HINFO,
__XFRMA_SAD_MAX
};
struct xfrmu_sadhinfo {
__u32 sadhcnt;
__u32 sadhmcnt;
};
enum xfrm_spdattr_type_t {
XFRMA_SPD_UNSPEC,
XFRMA_SPD_INFO,
XFRMA_SPD_HINFO,
__XFRMA_SPD_MAX
};
struct xfrmu_spdinfo {
__u32 incnt;
__u32 outcnt;
__u32 fwdcnt;
__u32 inscnt;
__u32 outscnt;
__u32 fwdscnt;
};
struct xfrmu_spdhinfo {
__u32 spdhcnt;
__u32 spdhmcnt;
};
struct xfrm_usersa_info {
struct xfrm_selector sel;
struct xfrm_id id;
xfrm_address_t saddr;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct xfrm_stats stats;
__u32 seq;
__u32 reqid;
__u16 family;
__u8 mode;
__u8 replay_window;
__u8 flags;
};
struct xfrm_usersa_id {
xfrm_address_t daddr;
__be32 spi;
__u16 family;
__u8 proto;
};
struct xfrm_aevent_id {
struct xfrm_usersa_id sa_id;
xfrm_address_t saddr;
__u32 flags;
__u32 reqid;
};
struct xfrm_userspi_info {
struct xfrm_usersa_info info;
__u32 min;
__u32 max;
};
struct xfrm_userpolicy_info {
struct xfrm_selector sel;
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
__u32 priority;
__u32 index;
__u8 dir;
__u8 action;
__u8 flags;
__u8 share;
};
struct xfrm_userpolicy_id {
struct xfrm_selector sel;
__u32 index;
__u8 dir;
};
struct xfrm_user_acquire {
struct xfrm_id id;
xfrm_address_t saddr;
struct xfrm_selector sel;
struct xfrm_userpolicy_info policy;
__u32 aalgos;
__u32 ealgos;
__u32 calgos;
__u32 seq;
};
struct xfrm_user_expire {
struct xfrm_usersa_info state;
__u8 hard;
};
struct xfrm_user_polexpire {
struct xfrm_userpolicy_info pol;
__u8 hard;
};
struct xfrm_usersa_flush {
__u8 proto;
};
struct xfrm_user_report {
__u8 proto;
struct xfrm_selector sel;
};
struct xfrm_user_kmaddress {
xfrm_address_t local;
xfrm_address_t remote;
__u32 reserved;
__u16 family;
};
struct xfrm_user_migrate {
xfrm_address_t old_daddr;
xfrm_address_t old_saddr;
xfrm_address_t new_daddr;
xfrm_address_t new_saddr;
__u8 proto;
__u8 mode;
__u16 reserved;
__u32 reqid;
__u16 old_family;
__u16 new_family;
};
struct xfrm_user_mapping {
struct xfrm_usersa_id id;
__u32 reqid;
xfrm_address_t old_saddr;
xfrm_address_t new_saddr;
__be16 old_sport;
__be16 new_sport;
};
# 467 "include/linux/xfrm.h"
enum xfrm_nlgroups {
XFRMNLGRP_NONE,
XFRMNLGRP_ACQUIRE,
XFRMNLGRP_EXPIRE,
XFRMNLGRP_SA,
XFRMNLGRP_POLICY,
XFRMNLGRP_AEVENTS,
XFRMNLGRP_REPORT,
XFRMNLGRP_MIGRATE,
XFRMNLGRP_MAPPING,
__XFRMNLGRP_MAX
};
# 8 "include/net/netns/xfrm.h" 2
struct ctl_table_header;
struct xfrm_policy_hash {
struct hlist_head *table;
unsigned int hmask;
};
struct netns_xfrm {
struct list_head state_all;
# 27 "include/net/netns/xfrm.h"
struct hlist_head *state_bydst;
struct hlist_head *state_bysrc;
struct hlist_head *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
struct hlist_head state_gc_list;
struct work_struct state_gc_work;
wait_queue_head_t km_waitq;
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
struct hlist_head policy_inexact[XFRM_POLICY_MAX * 2];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct dst_ops xfrm4_dst_ops;
struct dst_ops xfrm6_dst_ops;
struct sock *nlsk;
struct sock *nlsk_stash;
u32 sysctl_aevent_etime;
u32 sysctl_aevent_rseqth;
int sysctl_larval_drop;
u32 sysctl_acq_expires;
struct ctl_table_header *sysctl_hdr;
};
# 23 "include/net/net_namespace.h" 2
struct proc_dir_entry;
struct net_device;
struct sock;
struct ctl_table_header;
struct net_generic;
struct sock;
struct net {
atomic_t count;
struct list_head list;
struct list_head cleanup_list;
struct list_head exit_list;
struct proc_dir_entry *proc_net;
struct proc_dir_entry *proc_net_stat;
struct ctl_table_set sysctls;
struct net_device *loopback_dev;
struct list_head dev_base_head;
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
struct list_head rules_ops;
spinlock_t rules_mod_lock;
struct sock *rtnl;
struct sock *genl_sock;
struct netns_core core;
struct netns_mib mib;
struct netns_packet packet;
struct netns_unix unx;
struct netns_ipv4 ipv4;
struct netns_ipv6 ipv6;
struct netns_xt xt;
struct netns_ct ct;
struct sock *nfnl;
struct sock *nfnl_stash;
struct netns_xfrm xfrm;
struct sk_buff_head wext_nlevents;
struct net_generic *gen;
};
# 1 "include/linux/seq_file_net.h" 1
# 1 "include/linux/seq_file.h" 1
# 10 "include/linux/seq_file.h"
struct seq_operations;
struct file;
struct path;
struct inode;
struct dentry;
struct seq_file {
char *buf;
size_t size;
size_t from;
size_t count;
loff_t index;
loff_t read_pos;
u64 version;
struct mutex lock;
const struct seq_operations *op;
void *private;
};
struct seq_operations {
void * (*start) (struct seq_file *m, loff_t *pos);
void (*stop) (struct seq_file *m, void *v);
void * (*next) (struct seq_file *m, void *v, loff_t *pos);
int (*show) (struct seq_file *m, void *v);
};
# 46 "include/linux/seq_file.h"
static inline __attribute__((always_inline)) size_t seq_get_buf(struct seq_file *m, char **bufp)
{
__BUG_ON((unsigned long)(m->count > m->size));
if (m->count < m->size)
*bufp = m->buf + m->count;
else
*bufp = ((void *)0);
return m->size - m->count;
}
# 66 "include/linux/seq_file.h"
static inline __attribute__((always_inline)) void seq_commit(struct seq_file *m, int num)
{
if (num < 0) {
m->count = m->size;
} else {
__BUG_ON((unsigned long)(m->count + num > m->size));
m->count += num;
}
}
char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
int seq_escape(struct seq_file *, const char *, const char *);
int seq_putc(struct seq_file *m, char c);
int seq_puts(struct seq_file *m, const char *s);
int seq_write(struct seq_file *seq, const void *data, size_t len);
int seq_printf(struct seq_file *, const char *, ...)
__attribute__ ((format (printf,2,3)));
int seq_path(struct seq_file *, struct path *, char *);
int seq_dentry(struct seq_file *, struct dentry *, char *);
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc);
int seq_bitmap(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
static inline __attribute__((always_inline)) int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
{
return seq_bitmap(m, ((mask)->bits), nr_cpu_ids);
}
static inline __attribute__((always_inline)) int seq_nodemask(struct seq_file *m, nodemask_t *mask)
{
return seq_bitmap(m, mask->bits, (1 << 0));
}
int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
unsigned int nr_bits);
static inline __attribute__((always_inline)) int seq_cpumask_list(struct seq_file *m,
const struct cpumask *mask)
{
return seq_bitmap_list(m, ((mask)->bits), nr_cpu_ids);
}
static inline __attribute__((always_inline)) int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
{
return seq_bitmap_list(m, mask->bits, (1 << 0));
}
int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
int single_release(struct inode *, struct file *);
void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
extern struct list_head *seq_list_start(struct list_head *head,
loff_t pos);
extern struct list_head *seq_list_start_head(struct list_head *head,
loff_t pos);
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
loff_t *ppos);
extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
loff_t pos);
extern struct hlist_node *seq_hlist_next_rcu(void *v,
struct hlist_head *head,
loff_t *ppos);
# 5 "include/linux/seq_file_net.h" 2
struct net;
extern struct net init_net;
struct seq_net_private {
};
int seq_open_net(struct inode *, struct file *,
const struct seq_operations *, int);
int single_open_net(struct inode *, struct file *file,
int (*show)(struct seq_file *, void *));
int seq_release_net(struct inode *, struct file *);
int single_release_net(struct inode *, struct file *);
static inline __attribute__((always_inline)) struct net *seq_file_net(struct seq_file *seq)
{
return &init_net;
}
# 98 "include/net/net_namespace.h" 2
extern struct net init_net;
extern struct net *copy_net_ns(unsigned long flags, struct net *net_ns);
# 114 "include/net/net_namespace.h"
extern struct list_head net_namespace_list;
extern struct net *get_net_ns_by_pid(pid_t pid);
# 152 "include/net/net_namespace.h"
static inline __attribute__((always_inline)) struct net *get_net(struct net *net)
{
return net;
}
static inline __attribute__((always_inline)) void put_net(struct net *net)
{
}
static inline __attribute__((always_inline)) struct net *maybe_get_net(struct net *net)
{
return net;
}
static inline __attribute__((always_inline))
int net_eq(const struct net *net1, const struct net *net2)
{
return 1;
}
# 188 "include/net/net_namespace.h"
static inline __attribute__((always_inline)) struct net *hold_net(struct net *net)
{
return net;
}
static inline __attribute__((always_inline)) void release_net(struct net *net)
{
}
# 233 "include/net/net_namespace.h"
struct pernet_operations {
struct list_head list;
int (*init)(struct net *net);
void (*exit)(struct net *net);
void (*exit_batch)(struct list_head *net_exit_list);
int *id;
size_t size;
};
# 261 "include/net/net_namespace.h"
extern int register_pernet_subsys(struct pernet_operations *);
extern void unregister_pernet_subsys(struct pernet_operations *);
extern int register_pernet_device(struct pernet_operations *);
extern void unregister_pernet_device(struct pernet_operations *);
struct ctl_path;
struct ctl_table;
struct ctl_table_header;
extern struct ctl_table_header *register_net_sysctl_table(struct net *net,
const struct ctl_path *path, struct ctl_table *table);
extern struct ctl_table_header *register_net_sysctl_rotable(
const struct ctl_path *path, struct ctl_table *table);
extern void unregister_net_sysctl_table(struct ctl_table_header *header);
# 50 "include/linux/netdevice.h" 2
# 1 "include/net/dsa.h" 1
# 17 "include/net/dsa.h"
struct dsa_chip_data {
struct device *mii_bus;
int sw_addr;
# 31 "include/net/dsa.h"
char *port_names[12];
s8 *rtable;
};
struct dsa_platform_data {
struct device *netdev;
int nr_chips;
struct dsa_chip_data *chip;
};
extern int dsa_uses_dsa_tags(void *dsa_ptr);
extern int dsa_uses_trailer_tags(void *dsa_ptr);
# 51 "include/linux/netdevice.h" 2
struct vlan_group;
struct netpoll_info;
struct phy_device;
struct wireless_dev;
# 111 "include/linux/netdevice.h"
enum netdev_tx {
__NETDEV_TX_MIN = (-((int)(~0U>>1)) - 1),
NETDEV_TX_OK = 0x00,
NETDEV_TX_BUSY = 0x10,
NETDEV_TX_LOCKED = 0x20,
};
typedef enum netdev_tx netdev_tx_t;
static inline __attribute__((always_inline)) int dev_xmit_complete(int rc)
{
if (__builtin_expect(!!(rc < 0x0f), 1))
return true;
return false;
}
# 173 "include/linux/netdevice.h"
struct net_device_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_errors;
unsigned long tx_errors;
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long multicast;
unsigned long collisions;
unsigned long rx_length_errors;
unsigned long rx_over_errors;
unsigned long rx_crc_errors;
unsigned long rx_frame_errors;
unsigned long rx_fifo_errors;
unsigned long rx_missed_errors;
unsigned long tx_aborted_errors;
unsigned long tx_carrier_errors;
unsigned long tx_fifo_errors;
unsigned long tx_heartbeat_errors;
unsigned long tx_window_errors;
unsigned long rx_compressed;
unsigned long tx_compressed;
};
# 208 "include/linux/netdevice.h"
enum {
IF_PORT_UNKNOWN = 0,
IF_PORT_10BASE2,
IF_PORT_10BASET,
IF_PORT_AUI,
IF_PORT_100BASET,
IF_PORT_100BASETX,
IF_PORT_100BASEFX
};
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
unsigned char addr[32];
unsigned char type;
int refcount;
int synced;
int global_use;
struct rcu_head rcu_head;
};
struct netdev_hw_addr_list {
struct list_head list;
int count;
};
# 262 "include/linux/netdevice.h"
struct hh_cache {
struct hh_cache *hh_next;
atomic_t hh_refcnt;
__be16 hh_type __attribute__((__aligned__((1 << 5))));
u16 hh_len;
int (*hh_output)(struct sk_buff *skb);
seqlock_t hh_lock;
unsigned long hh_data[(((96)+(16 -1))&~(16 - 1)) / sizeof(long)];
};
# 307 "include/linux/netdevice.h"
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*rebuild)(struct sk_buff *skb);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
};
enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
};
struct netdev_boot_setup {
char name[16];
struct ifmap map;
};
extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) netdev_boot_setup(char *str);
struct napi_struct {
struct list_head poll_list;
unsigned long state;
int weight;
int (*poll)(struct napi_struct *, int);
unsigned int gro_count;
struct net_device *dev;
struct list_head dev_list;
struct sk_buff *gro_list;
struct sk_buff *skb;
};
enum {
NAPI_STATE_SCHED,
NAPI_STATE_DISABLE,
NAPI_STATE_NPSVC,
};
enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
};
typedef enum gro_result gro_result_t;
typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
extern void __napi_schedule(struct napi_struct *n);
static inline __attribute__((always_inline)) int napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
# 407 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) int napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
# 420 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void napi_schedule(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule(n);
}
static inline __attribute__((always_inline)) int napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
return 1;
}
return 0;
}
extern void __napi_complete(struct napi_struct *n);
extern void napi_complete(struct napi_struct *n);
# 452 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void napi_disable(struct napi_struct *n)
{
set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
# 467 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void napi_enable(struct napi_struct *n)
{
__BUG_ON((unsigned long)(!test_bit(NAPI_STATE_SCHED, &n->state)));
__asm__ __volatile__(" \n" : : :"memory");
clear_bit(NAPI_STATE_SCHED, &n->state);
}
# 483 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void napi_synchronize(const struct napi_struct *n)
{
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
}
enum netdev_queue_state_t {
__QUEUE_STATE_XOFF,
__QUEUE_STATE_FROZEN,
};
struct netdev_queue {
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
struct Qdisc *qdisc_sleeping;
spinlock_t _xmit_lock __attribute__((__aligned__((1 << 5))));
int xmit_lock_owner;
unsigned long trans_start;
u64 tx_bytes;
u64 tx_packets;
u64 tx_dropped;
} __attribute__((__aligned__((1 << 5))));
# 711 "include/linux/netdevice.h"
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
void (*ndo_set_multicast_list)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
void (*ndo_vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
int queue, u16 vlan, u8 qos);
int (*ndo_set_vf_tx_rate)(struct net_device *dev,
int vf, int rate);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
# 781 "include/linux/netdevice.h"
};
# 793 "include/linux/netdevice.h"
struct net_device {
char name[16];
struct pm_qos_request_list pm_qos_req;
struct hlist_node name_hlist;
char *ifalias;
unsigned long mem_end;
unsigned long mem_start;
unsigned long base_addr;
unsigned int irq;
unsigned char if_port;
unsigned char dma;
unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
unsigned long features;
# 888 "include/linux/netdevice.h"
int ifindex;
int iflink;
struct net_device_stats stats;
const struct iw_handler_def * wireless_handlers;
struct iw_public_data * wireless_data;
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
const struct header_ops *header_ops;
unsigned int flags;
unsigned short gflags;
unsigned int priv_flags;
unsigned short padded;
unsigned char operstate;
unsigned char link_mode;
unsigned int mtu;
unsigned short type;
unsigned short hard_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
struct net_device *master;
unsigned char perm_addr[32];
unsigned char addr_assign_type;
unsigned char addr_len;
unsigned short dev_id;
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc;
struct netdev_hw_addr_list mc;
int uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
void *atalk_ptr;
void *ip_ptr;
void *dn_ptr;
void *ip6_ptr;
void *ec_ptr;
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
unsigned long last_rx;
unsigned char *dev_addr;
struct netdev_hw_addr_list dev_addrs;
unsigned char broadcast[32];
# 985 "include/linux/netdevice.h"
struct netdev_queue rx_queue;
rx_handler_func_t *rx_handler;
void *rx_handler_data;
struct netdev_queue *_tx __attribute__((__aligned__((1 << 5))));
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
unsigned long tx_queue_len;
spinlock_t tx_global_lock;
# 1011 "include/linux/netdevice.h"
unsigned long trans_start;
int watchdog_timeo;
struct timer_list watchdog_timer;
atomic_t refcnt __attribute__((__aligned__((1 << 5))));
struct list_head todo_list;
struct hlist_node index_hlist;
struct list_head link_watch_list;
enum { NETREG_UNINITIALIZED=0,
NETREG_REGISTERED,
NETREG_UNREGISTERING,
NETREG_UNREGISTERED,
NETREG_RELEASED,
NETREG_DUMMY,
} reg_state:16;
enum {
RTNL_LINK_INITIALIZED,
RTNL_LINK_INITIALIZING,
} rtnl_link_state:16;
void (*destructor)(struct net_device *dev);
# 1053 "include/linux/netdevice.h"
void *ml_priv;
struct garp_port *garp_port;
struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct rtnl_link_ops *rtnl_link_ops;
unsigned long vlan_features;
unsigned int gso_max_size;
# 1083 "include/linux/netdevice.h"
struct ethtool_rx_ntuple_list ethtool_ntuple_list;
struct phy_device *phydev;
u32 bind_group[2];
int bind_index;
# 1112 "include/linux/netdevice.h"
u8 bind_type;
# 1122 "include/linux/netdevice.h"
u8 vlan_mode;
u16 tci;
u16 multicast_tci;
u8 ipversion;
struct list_head ext_mvlan_list;
int ppp_flags;
};
static inline __attribute__((always_inline))
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
return &dev->_tx[index];
}
static inline __attribute__((always_inline)) void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
void *),
void *arg)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
f(dev, &dev->_tx[i], arg);
}
static inline __attribute__((always_inline))
struct net *dev_net(const struct net_device *dev)
{
return (&init_net);
}
static inline __attribute__((always_inline))
void dev_net_set(struct net_device *dev, struct net *net)
{
}
static inline __attribute__((always_inline)) int netdev_uses_dsa_tags(struct net_device *dev)
{
return 0;
}
static inline __attribute__((always_inline)) void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
}
static inline __attribute__((always_inline)) int netdev_uses_trailer_tags(struct net_device *dev)
{
return 0;
}
static inline __attribute__((always_inline)) void *netdev_priv(const struct net_device *dev)
{
return (char *)dev + ((((sizeof(struct net_device))) + ((typeof((sizeof(struct net_device))))((32)) - 1)) & ~((typeof((sizeof(struct net_device))))((32)) - 1));
}
# 1236 "include/linux/netdevice.h"
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
void *frag0;
unsigned int frag0_len;
int data_offset;
int same_flow;
int flush;
int count;
int free;
};
struct packet_type {
__be16 type;
struct net_device *dev;
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
# 1 "include/linux/interrupt.h" 1
# 10 "include/linux/interrupt.h"
# 1 "include/linux/irqreturn.h" 1
# 10 "include/linux/irqreturn.h"
enum irqreturn {
IRQ_NONE,
IRQ_HANDLED,
IRQ_WAKE_THREAD,
};
typedef enum irqreturn irqreturn_t;
# 11 "include/linux/interrupt.h" 2
# 1 "include/linux/hardirq.h" 1
# 9 "include/linux/hardirq.h"
# 1 "include/linux/ftrace_irq.h" 1
# 9 "include/linux/ftrace_irq.h"
static inline __attribute__((always_inline)) void ftrace_nmi_enter(void) { }
static inline __attribute__((always_inline)) void ftrace_nmi_exit(void) { }
# 10 "include/linux/hardirq.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hardirq.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hardirq.h"
extern void ack_bad_irq(unsigned int irq);
# 1 "include/asm-generic/hardirq.h" 1
# 1 "include/linux/irq.h" 1
# 27 "include/linux/irq.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsmtregs.h" 1
# 168 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsmtregs.h"
static inline __attribute__((always_inline)) unsigned int dvpe(void)
{
int res = 0;
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
" .set mips32r2 \n"
" .word 0x41610001 # dvpe $1 \n"
" move %0, $1 \n"
" ehb \n"
" .set pop \n"
: "=r" (res));
do { unsigned long tmp; __asm__ __volatile__( " .set mips64r2 \n" " dla %0, 1f \n" " jr.hb %0 \n" " .set mips0 \n" "1: \n" : "=r" (tmp)); } while (0);
return res;
}
static inline __attribute__((always_inline)) void __raw_evpe(void)
{
__asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set noat \n"
" .set mips32r2 \n"
" .word 0x41600021 # evpe \n"
" ehb \n"
" .set pop \n");
}
static inline __attribute__((always_inline)) void evpe(int previous)
{
if ((previous & ((unsigned long)(1))))
__raw_evpe();
}
static inline __attribute__((always_inline)) unsigned int dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
: "=r" (res));
do { unsigned long tmp; __asm__ __volatile__( " .set mips64r2 \n" " dla %0, 1f \n" " jr.hb %0 \n" " .set mips0 \n" "1: \n" : "=r" (tmp)); } while (0);
return res;
}
static inline __attribute__((always_inline)) void __raw_emt(void)
{
__asm__ __volatile__(
" .set noreorder \n"
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set mips0 \n"
" .set reorder");
}
static inline __attribute__((always_inline)) void emt(int previous)
{
if ((previous & ((unsigned long)(1) << 15)))
__raw_emt();
}
static inline __attribute__((always_inline)) void ehb(void)
{
__asm__ __volatile__(
" .set mips32r2 \n"
" ehb \n"
" .set mips0 \n");
}
# 391 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mipsmtregs.h"
static inline __attribute__((always_inline)) unsigned int set_c0_mvpcontrol(unsigned int set) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$0" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$0" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res | set; do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$0" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$0" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int clear_c0_mvpcontrol(unsigned int clear) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$0" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$0" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~clear; do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$0" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$0" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; } static inline __attribute__((always_inline)) unsigned int change_c0_mvpcontrol(unsigned int change, unsigned int newbits) { unsigned int res; unsigned int new; unsigned int omt; unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); omt = __dmt(); res = ({ int __res; if (1 == 0) __asm__ __volatile__( "mfc0\t%0, " "$0" "\n\t" : "=r" (__res)); else __asm__ __volatile__( ".set\tmips32\n\t" "mfc0\t%0, " "$0" ", " "1" "\n\t" ".set\tmips0\n\t" : "=r" (__res)); __res; }); new = res & ~change; new |= (newbits & change); do { if (1 == 0) __asm__ __volatile__( "mtc0\t%z0, " "$0" "\n\t" : : "Jr" ((unsigned int)(new))); else __asm__ __volatile__( ".set\tmips32\n\t" "mtc0\t%z0, " "$0" ", " "1" "\n\t" ".set\tmips0" : : "Jr" ((unsigned int)(new))); } while (0); __emt(omt); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); return res; }
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/irq.h" 1
# 11 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/irq.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/mach-generic/irq.h" 1
# 12 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/irq.h" 2
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h" 2
# 30 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h"
struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask);
static inline __attribute__((always_inline)) void smtc_im_ack_irq(unsigned int irq)
{
if (irq_hwmask[irq] & 0x0000ff00)
set_c0_status(irq_hwmask[irq] & 0x0000ff00);
}
# 53 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h"
extern int plat_set_irq_affinity(unsigned int irq,
const struct cpumask *affinity);
extern void smtc_forward_irq(unsigned int irq);
# 116 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq.h"
extern void do_IRQ(int irq);
extern void do_IRQ_no_affinity(unsigned int irq);
extern void arch_init_irq(void);
extern void spurious_interrupt(void);
extern int allocate_irqno(void);
extern void alloc_legacy_irqno(void);
extern void free_irqno(unsigned int irq);
extern int cp0_compare_irq;
extern int cp0_compare_irq_shift;
extern int cp0_perfcount_irq;
# 28 "include/linux/irq.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h" 1
# 29 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h"
struct pt_regs {
unsigned long pad0[6];
unsigned long regs[32];
unsigned long cp0_status;
unsigned long hi;
unsigned long lo;
unsigned long cp0_badvaddr;
unsigned long cp0_cause;
unsigned long cp0_epc;
unsigned long cp0_tcstatus;
} __attribute__ ((aligned (8)));
# 78 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h"
enum pt_watch_style {
pt_watch_style_mips32,
pt_watch_style_mips64
};
struct mips32_watch_regs {
unsigned int watchlo[8];
unsigned short watchhi[8];
unsigned short watch_masks[8];
unsigned int num_valid;
} __attribute__((aligned(8)));
struct mips64_watch_regs {
unsigned long long watchlo[8];
unsigned short watchhi[8];
unsigned short watch_masks[8];
unsigned int num_valid;
} __attribute__((aligned(8)));
struct pt_watch_regs {
enum pt_watch_style style;
union {
struct mips32_watch_regs mips32;
struct mips64_watch_regs mips64;
};
};
# 120 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/isadep.h" 1
# 121 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h" 2
struct task_struct;
extern int ptrace_getregs(struct task_struct *child, __s64 *data);
extern int ptrace_setregs(struct task_struct *child, __s64 *data);
extern int ptrace_getfpregs(struct task_struct *child, __u32 *data);
extern int ptrace_setfpregs(struct task_struct *child, __u32 *data);
extern int ptrace_get_watch_regs(struct task_struct *child,
struct pt_watch_regs *addr);
extern int ptrace_set_watch_regs(struct task_struct *child,
struct pt_watch_regs *addr);
# 144 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ptrace.h"
extern void do_syscall_trace(struct pt_regs *regs, int entryexit);
extern void die(const char *, struct pt_regs *) __attribute__((noreturn));
static inline __attribute__((always_inline)) void die_if_kernel(const char *str, struct pt_regs *regs)
{
if (__builtin_expect(!!(!(((regs)->cp0_status & 0x18) == 0x10)), 0))
die(str, regs);
}
# 29 "include/linux/irq.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq_regs.h" 1
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/irq_regs.h"
static inline __attribute__((always_inline)) struct pt_regs *get_irq_regs(void)
{
return __current_thread_info->regs;
}
# 30 "include/linux/irq.h" 2
struct irq_desc;
typedef void (*irq_flow_handler_t)(unsigned int irq,
struct irq_desc *desc);
# 83 "include/linux/irq.h"
struct proc_dir_entry;
struct msi_desc;
# 111 "include/linux/irq.h"
struct irq_chip {
const char *name;
unsigned int (*startup)(unsigned int irq);
void (*shutdown)(unsigned int irq);
void (*enable)(unsigned int irq);
void (*disable)(unsigned int irq);
void (*ack)(unsigned int irq);
void (*mask)(unsigned int irq);
void (*mask_ack)(unsigned int irq);
void (*unmask)(unsigned int irq);
void (*eoi)(unsigned int irq);
void (*end)(unsigned int irq);
int (*set_affinity)(unsigned int irq,
const struct cpumask *dest);
int (*retrigger)(unsigned int irq);
int (*set_type)(unsigned int irq, unsigned int flow_type);
int (*set_wake)(unsigned int irq, unsigned int on);
void (*bus_lock)(unsigned int irq);
void (*bus_sync_unlock)(unsigned int irq);
# 142 "include/linux/irq.h"
const char *typename;
};
struct timer_rand_state;
struct irq_2_iommu;
# 175 "include/linux/irq.h"
struct irq_desc {
unsigned int irq;
struct timer_rand_state *timer_rand_state;
unsigned int *kstat_irqs;
irq_flow_handler_t handle_irq;
struct irq_chip *chip;
struct msi_desc *msi_desc;
void *handler_data;
void *chip_data;
struct irqaction *action;
unsigned int status;
unsigned int depth;
unsigned int wake_depth;
unsigned int irq_count;
unsigned long last_unhandled;
unsigned int irqs_unhandled;
raw_spinlock_t lock;
cpumask_var_t affinity;
const struct cpumask *affinity_hint;
unsigned int node;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
struct proc_dir_entry *dir;
const char *name;
} __attribute__((__aligned__(1 << (5))));
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
struct irq_desc *desc, int node);
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
extern struct irq_desc irq_desc[64];
static inline __attribute__((always_inline)) struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
{
return desc;
}
extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hw_irq.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hw_irq.h"
extern atomic_t irq_err_count;
# 235 "include/linux/irq.h" 2
extern int setup_irq(unsigned int irq, struct irqaction *new);
extern void remove_irq(unsigned int irq, struct irqaction *act);
# 250 "include/linux/irq.h"
static inline __attribute__((always_inline)) void move_irq(int irq)
{
}
static inline __attribute__((always_inline)) void move_native_irq(int irq)
{
}
static inline __attribute__((always_inline)) void move_masked_irq(int irq)
{
}
# 271 "include/linux/irq.h"
extern int no_irq_affinity;
static inline __attribute__((always_inline)) int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return desc->status & (0x00010000 | 0x00400000);
}
extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
# 309 "include/linux/irq.h"
static inline __attribute__((always_inline)) void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
}
static inline __attribute__((always_inline)) void generic_handle_irq(unsigned int irq)
{
generic_handle_irq_desc(irq, irq_to_desc(irq));
}
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
extern int noirqdebug_setup(char *str);
extern int can_request_irq(unsigned int irq, unsigned long irqflags);
extern struct irq_chip no_irq_chip;
extern struct irq_chip dummy_irq_chip;
extern void
set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handle);
extern void
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handle, const char *name);
extern void
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);
static inline __attribute__((always_inline)) void __set_irq_handler_unlocked(int irq,
irq_flow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
static inline __attribute__((always_inline)) void
set_irq_handler(unsigned int irq, irq_flow_handler_t handle)
{
__set_irq_handler(irq, handle, 0, ((void *)0));
}
static inline __attribute__((always_inline)) void
set_irq_chained_handler(unsigned int irq,
irq_flow_handler_t handle)
{
__set_irq_handler(irq, handle, 1, ((void *)0));
}
extern void set_irq_nested_thread(unsigned int irq, int nest);
extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);
static inline __attribute__((always_inline)) int irq_has_action(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->action != ((void *)0);
}
extern void dynamic_irq_init(unsigned int irq);
void dynamic_irq_init_keep_chip_data(unsigned int irq);
extern void dynamic_irq_cleanup(unsigned int irq);
void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
extern int set_irq_data(unsigned int irq, void *data);
extern int set_irq_chip_data(unsigned int irq, void *data);
extern int set_irq_type(unsigned int irq, unsigned int type);
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
# 439 "include/linux/irq.h"
static inline __attribute__((always_inline)) int alloc_desc_masks(struct irq_desc *desc, int node,
int boot)
{
gfp_t gfp = ((( gfp_t)0x20u));
if (boot)
gfp = (((( gfp_t)0x20u)) & ~(( gfp_t)0x20u));
# 458 "include/linux/irq.h"
return true;
}
static inline __attribute__((always_inline)) void init_desc_masks(struct irq_desc *desc)
{
cpumask_setall(desc->affinity);
}
# 479 "include/linux/irq.h"
static inline __attribute__((always_inline)) void init_copy_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
}
static inline __attribute__((always_inline)) void free_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
free_cpumask_var(old_desc->affinity);
}
# 7 "include/asm-generic/hardirq.h" 2
typedef struct {
unsigned int __softirq_pending;
} __attribute__((__aligned__((1 << 5)))) irq_cpustat_t;
# 1 "include/linux/irq_cpustat.h" 1
# 20 "include/linux/irq_cpustat.h"
extern irq_cpustat_t irq_stat[];
# 13 "include/asm-generic/hardirq.h" 2
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/hardirq.h" 2
# 11 "include/linux/hardirq.h" 2
# 128 "include/linux/hardirq.h"
extern void synchronize_irq(unsigned int irq);
struct task_struct;
static inline __attribute__((always_inline)) void account_system_vtime(struct task_struct *tsk)
{
}
# 193 "include/linux/hardirq.h"
extern void irq_enter(void);
# 208 "include/linux/hardirq.h"
extern void irq_exit(void);
# 13 "include/linux/interrupt.h" 2
# 79 "include/linux/interrupt.h"
enum {
IRQTF_RUNTHREAD,
IRQTF_DIED,
IRQTF_WARNED,
IRQTF_AFFINITY,
};
# 93 "include/linux/interrupt.h"
enum {
IRQC_IS_HARDIRQ = 0,
IRQC_IS_NESTED,
};
typedef irqreturn_t (*irq_handler_t)(int, void *);
# 113 "include/linux/interrupt.h"
struct irqaction {
irq_handler_t handler;
unsigned long flags;
const char *name;
void *dev_id;
struct irqaction *next;
int irq;
struct proc_dir_entry *dir;
irq_handler_t thread_fn;
struct task_struct *thread;
unsigned long thread_flags;
};
extern irqreturn_t no_action(int cpl, void *dev_id);
extern int __attribute__((warn_unused_result))
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
return request_threaded_irq(irq, handler, ((void *)0), flags, name, dev);
}
extern int __attribute__((warn_unused_result))
request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern void exit_irq_thread(void);
# 175 "include/linux/interrupt.h"
extern void free_irq(unsigned int, void *);
struct device;
extern int __attribute__((warn_unused_result))
devm_request_threaded_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, irq_handler_t thread_fn,
unsigned long irqflags, const char *devname,
void *dev_id);
static inline __attribute__((always_inline)) int __attribute__((warn_unused_result))
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return devm_request_threaded_irq(dev, irq, handler, ((void *)0), irqflags,
devname, dev_id);
}
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
# 213 "include/linux/interrupt.h"
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
static inline __attribute__((always_inline)) int check_wakeup_irqs(void) { return 0; }
# 234 "include/linux/interrupt.h"
extern cpumask_var_t irq_default_affinity;
extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
# 274 "include/linux/interrupt.h"
static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);
}
static inline __attribute__((always_inline)) void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);
}
static inline __attribute__((always_inline)) void disable_irq_lockdep(unsigned int irq)
{
disable_irq(irq);
}
static inline __attribute__((always_inline)) void enable_irq_lockdep(unsigned int irq)
{
enable_irq(irq);
}
static inline __attribute__((always_inline)) void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{
enable_irq(irq);
}
extern int set_irq_wake(unsigned int irq, unsigned int on);
static inline __attribute__((always_inline)) int enable_irq_wake(unsigned int irq)
{
return set_irq_wake(irq, 1);
}
static inline __attribute__((always_inline)) int disable_irq_wake(unsigned int irq)
{
return set_irq_wake(irq, 0);
}
# 375 "include/linux/interrupt.h"
enum
{
HI_SOFTIRQ=0,
TIMER_SOFTIRQ,
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
BLOCK_SOFTIRQ,
BLOCK_IOPOLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ,
RCU_SOFTIRQ,
NR_SOFTIRQS
};
extern char *softirq_to_name[NR_SOFTIRQS];
struct softirq_action
{
void (*action)(struct softirq_action *);
};
void do_softirq(void);
void __do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
extern void wakeup_softirqd(void);
# 421 "include/linux/interrupt.h"
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct list_head [NR_SOFTIRQS]) softirq_work_list;
extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
int this_cpu, int softirq);
# 454 "include/linux/interrupt.h"
struct tasklet_struct
{
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
void (*func)(unsigned long);
unsigned long data;
};
# 470 "include/linux/interrupt.h"
enum
{
TASKLET_STATE_SCHED,
TASKLET_STATE_RUN
};
static inline __attribute__((always_inline)) int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
static inline __attribute__((always_inline)) void tasklet_unlock(struct tasklet_struct *t)
{
__asm__ __volatile__(" \n" : : :"memory");
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
static inline __attribute__((always_inline)) void tasklet_unlock_wait(struct tasklet_struct *t)
{
while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { __asm__ __volatile__("": : :"memory"); }
}
extern void __tasklet_schedule(struct tasklet_struct *t);
static inline __attribute__((always_inline)) void tasklet_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_schedule(t);
}
extern void __tasklet_hi_schedule(struct tasklet_struct *t);
static inline __attribute__((always_inline)) void tasklet_hi_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_hi_schedule(t);
}
extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
static inline __attribute__((always_inline)) void tasklet_hi_schedule_first(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_hi_schedule_first(t);
}
static inline __attribute__((always_inline)) void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_add(1, (&t->count));
__asm__ __volatile__(" \n" : : :"memory");
}
static inline __attribute__((always_inline)) void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
tasklet_unlock_wait(t);
__asm__ __volatile__("": : :"memory");
}
static inline __attribute__((always_inline)) void tasklet_enable(struct tasklet_struct *t)
{
__asm__ __volatile__(" \n" : : :"memory");
atomic_sub(1, (&t->count));
}
static inline __attribute__((always_inline)) void tasklet_hi_enable(struct tasklet_struct *t)
{
__asm__ __volatile__(" \n" : : :"memory");
atomic_sub(1, (&t->count));
}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
struct tasklet_hrtimer {
struct hrtimer timer;
struct tasklet_struct tasklet;
enum hrtimer_restart (*function)(struct hrtimer *);
};
extern void
tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
enum hrtimer_restart (*function)(struct hrtimer *),
clockid_t which_clock, enum hrtimer_mode mode);
static inline __attribute__((always_inline))
int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
const enum hrtimer_mode mode)
{
return hrtimer_start(&ttimer->timer, time, mode);
}
static inline __attribute__((always_inline))
void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
{
hrtimer_cancel(&ttimer->timer);
tasklet_kill(&ttimer->tasklet);
}
# 626 "include/linux/interrupt.h"
extern unsigned long probe_irq_on(void);
extern int probe_irq_off(unsigned long);
extern unsigned int probe_irq_mask(unsigned long);
extern void init_irq_proc(void);
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
struct irq_desc;
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
extern int arch_init_chip_data(struct irq_desc *desc, int node);
# 1290 "include/linux/netdevice.h" 2
extern rwlock_t dev_base_lock;
# 1309 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? ((void *)0) : ({ const typeof( ((struct net_device *)0)->dev_list ) *__mptr = (lh); (struct net_device *)( (char *)__mptr - __builtin_offsetof(struct net_device,dev_list) );});
}
static inline __attribute__((always_inline)) struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = ({ typeof(dev->dev_list.next) _________p1 = (*(volatile typeof(dev->dev_list.next) *)&(dev->dev_list.next)); do { } while(0); (_________p1); });
return lh == &net->dev_base_head ? ((void *)0) : ({ const typeof( ((struct net_device *)0)->dev_list ) *__mptr = (lh); (struct net_device *)( (char *)__mptr - __builtin_offsetof(struct net_device,dev_list) );});
}
static inline __attribute__((always_inline)) struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? ((void *)0) :
({ const typeof( ((struct net_device *)0)->dev_list ) *__mptr = (net->dev_base_head.next); (struct net_device *)( (char *)__mptr - __builtin_offsetof(struct net_device,dev_list) );});
}
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern void dev_add_pack(struct packet_type *pt);
extern int pon_check_pack(__u16 type);
extern void dev_remove_pack(struct packet_type *pt);
extern void __dev_remove_pack(struct packet_type *pt);
extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
unsigned short mask);
extern struct net_device *dev_get_by_name(struct net *net, const char *name);
extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
extern void dev_disable_lro(struct net_device *dev);
extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice_queue(struct net_device *dev,
struct list_head *head);
extern void unregister_netdevice_many(struct list_head *head);
static inline __attribute__((always_inline)) void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, ((void *)0));
}
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
extern int dev_restart(struct net_device *dev);
extern int skb_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern void skb_gro_reset_offset(struct sk_buff *skb);
static inline __attribute__((always_inline)) unsigned int skb_gro_offset(const struct sk_buff *skb)
{
return ((struct napi_gro_cb *)(skb)->cb)->data_offset;
}
static inline __attribute__((always_inline)) unsigned int skb_gro_len(const struct sk_buff *skb)
{
return skb->len - ((struct napi_gro_cb *)(skb)->cb)->data_offset;
}
static inline __attribute__((always_inline)) void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
((struct napi_gro_cb *)(skb)->cb)->data_offset += len;
}
static inline __attribute__((always_inline)) void *skb_gro_header_fast(struct sk_buff *skb,
unsigned int offset)
{
return ((struct napi_gro_cb *)(skb)->cb)->frag0 + offset;
}
static inline __attribute__((always_inline)) int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
{
return ((struct napi_gro_cb *)(skb)->cb)->frag0_len < hlen;
}
static inline __attribute__((always_inline)) void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
((struct napi_gro_cb *)(skb)->cb)->frag0 = ((void *)0);
((struct napi_gro_cb *)(skb)->cb)->frag0_len = 0;
return pskb_may_pull(skb, hlen) ? skb->data + offset : ((void *)0);
}
static inline __attribute__((always_inline)) void *skb_gro_mac_header(struct sk_buff *skb)
{
return ((struct napi_gro_cb *)(skb)->cb)->frag0 ?: skb_mac_header(skb);
}
static inline __attribute__((always_inline)) void *skb_gro_network_header(struct sk_buff *skb)
{
return (((struct napi_gro_cb *)(skb)->cb)->frag0 ?: skb->data) +
skb_network_offset(skb);
}
static inline __attribute__((always_inline)) int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned len)
{
if (!dev->header_ops || !dev->header_ops->create)
return 0;
return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
}
static inline __attribute__((always_inline)) int dev_parse_header(const struct sk_buff *skb,
unsigned char *haddr)
{
const struct net_device *dev = skb->dev;
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
return dev->header_ops->parse(skb, haddr);
}
typedef int gifconf_func_t(struct net_device * dev, char * bufptr, int len);
extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
static inline __attribute__((always_inline)) int unregister_gifconf(unsigned int family)
{
return register_gifconf(family, ((void *)0));
}
struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct list_head poll_list;
struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
unsigned int processed;
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
# 1485 "include/linux/netdevice.h"
unsigned dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
static inline __attribute__((always_inline)) void input_queue_head_incr(struct softnet_data *sd)
{
}
static inline __attribute__((always_inline)) void input_queue_tail_incr_save(struct softnet_data *sd,
unsigned int *qtail)
{
}
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct softnet_data) softnet_data __attribute__((__aligned__((1 << 5))));
extern void __netif_schedule(struct Qdisc *q);
static inline __attribute__((always_inline)) void netif_schedule_queue(struct netdev_queue *txq)
{
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
static inline __attribute__((always_inline)) void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
static inline __attribute__((always_inline)) void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
static inline __attribute__((always_inline)) void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}
static inline __attribute__((always_inline)) void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}
static inline __attribute__((always_inline)) void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
# 1573 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}
static inline __attribute__((always_inline)) void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}
static inline __attribute__((always_inline)) void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
# 1600 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
static inline __attribute__((always_inline)) void netif_tx_stop_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
static inline __attribute__((always_inline)) int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
static inline __attribute__((always_inline)) int netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
static inline __attribute__((always_inline)) int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
}
static inline __attribute__((always_inline)) int netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
# 1661 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_start_queue(txq);
}
# 1675 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_stop_queue(txq);
}
# 1692 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
static inline __attribute__((always_inline)) int netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
# 1713 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
static inline __attribute__((always_inline)) int netif_is_multiqueue(const struct net_device *dev)
{
return (dev->num_tx_queues > 1);
}
extern void netif_set_real_num_tx_queues(struct net_device *dev,
unsigned int txq);
extern void dev_kfree_skb_irq(struct sk_buff *skb);
extern void dev_kfree_skb_any(struct sk_buff *skb);
extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
extern int netif_receive_skb(struct sk_buff *skb);
extern gro_result_t dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
extern gro_result_t napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void napi_reuse_skb(struct napi_struct *napi,
struct sk_buff *skb);
extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
extern gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret);
extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
extern gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline __attribute__((always_inline)) void napi_free_frags(struct napi_struct *napi)
{
kfree_skb(napi->skb);
napi->skb = ((void *)0);
}
extern int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
extern int __dev_change_flags(struct net_device *, unsigned int flags);
extern int dev_change_flags(struct net_device *, unsigned);
extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
extern int dev_change_name(struct net_device *, const char *);
extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
extern int dev_forward_skb(struct net_device *dev,
struct sk_buff *skb);
extern int netdev_budget;
extern void netdev_run_todo(void);
static inline __attribute__((always_inline)) void dev_put(struct net_device *dev)
{
atomic_sub(1, (&dev->refcnt));
}
static inline __attribute__((always_inline)) void dev_hold(struct net_device *dev)
{
atomic_add(1, (&dev->refcnt));
}
# 1837 "include/linux/netdevice.h"
extern void linkwatch_fire_event(struct net_device *dev);
extern void linkwatch_forget_dev(struct net_device *dev);
static inline __attribute__((always_inline)) int netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
extern unsigned long dev_trans_start(struct net_device *dev);
extern void __netdev_watchdog_up(struct net_device *dev);
extern void netif_carrier_on(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
extern void netif_notify_peers(struct net_device *dev);
# 1874 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
static inline __attribute__((always_inline)) void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
static inline __attribute__((always_inline)) int netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
# 1910 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) int netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN );
}
static inline __attribute__((always_inline)) int netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
extern void netif_device_detach(struct net_device *dev);
extern void netif_device_attach(struct net_device *dev);
enum {
NETIF_MSG_DRV = 0x0001,
NETIF_MSG_PROBE = 0x0002,
NETIF_MSG_LINK = 0x0004,
NETIF_MSG_TIMER = 0x0008,
NETIF_MSG_IFDOWN = 0x0010,
NETIF_MSG_IFUP = 0x0020,
NETIF_MSG_RX_ERR = 0x0040,
NETIF_MSG_TX_ERR = 0x0080,
NETIF_MSG_TX_QUEUED = 0x0100,
NETIF_MSG_INTR = 0x0200,
NETIF_MSG_TX_DONE = 0x0400,
NETIF_MSG_RX_STATUS = 0x0800,
NETIF_MSG_PKTDATA = 0x1000,
NETIF_MSG_HW = 0x2000,
NETIF_MSG_WOL = 0x4000,
};
# 1970 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
return default_msg_enable_bits;
if (debug_value == 0)
return 0;
return (1 << debug_value) - 1;
}
static inline __attribute__((always_inline)) void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = cpu;
}
static inline __attribute__((always_inline)) void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->xmit_lock_owner = (__current_thread_info->cpu);
}
static inline __attribute__((always_inline)) int __netif_tx_trylock(struct netdev_queue *txq)
{
int ok = spin_trylock(&txq->_xmit_lock);
if (__builtin_expect(!!(ok), 1))
txq->xmit_lock_owner = (__current_thread_info->cpu);
return ok;
}
static inline __attribute__((always_inline)) void __netif_tx_unlock(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock(&txq->_xmit_lock);
}
static inline __attribute__((always_inline)) void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline __attribute__((always_inline)) void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
txq->trans_start = jiffies;
}
static inline __attribute__((always_inline)) void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
int cpu;
spin_lock(&dev->tx_global_lock);
cpu = (__current_thread_info->cpu);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
}
static inline __attribute__((always_inline)) void netif_tx_lock_bh(struct net_device *dev)
{
local_bh_disable();
netif_tx_lock(dev);
}
static inline __attribute__((always_inline)) void netif_tx_unlock(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
netif_schedule_queue(txq);
}
spin_unlock(&dev->tx_global_lock);
}
static inline __attribute__((always_inline)) void netif_tx_unlock_bh(struct net_device *dev)
{
netif_tx_unlock(dev);
local_bh_enable();
}
# 2088 "include/linux/netdevice.h"
static inline __attribute__((always_inline)) void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;
local_bh_disable();
cpu = (__current_thread_info->cpu);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
local_bh_enable();
}
static inline __attribute__((always_inline)) void netif_addr_lock(struct net_device *dev)
{
spin_lock(&dev->addr_list_lock);
}
static inline __attribute__((always_inline)) void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
}
static inline __attribute__((always_inline)) void netif_addr_unlock(struct net_device *dev)
{
spin_unlock(&dev->addr_list_lock);
}
static inline __attribute__((always_inline)) void netif_addr_unlock_bh(struct net_device *dev)
{
spin_unlock_bh(&dev->addr_list_lock);
}
# 2134 "include/linux/netdevice.h"
extern void ether_setup(struct net_device *dev);
extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
unsigned int queue_count);
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
extern void __hw_addr_init(struct netdev_hw_addr_list *list);
extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_add_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern int dev_addr_del_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern void dev_addr_flush(struct net_device *dev);
extern int dev_addr_init(struct net_device *dev);
extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
extern int dev_uc_sync(struct net_device *to, struct net_device *from);
extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
extern void dev_uc_flush(struct net_device *dev);
extern void dev_uc_init(struct net_device *dev);
extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
extern void dev_mc_flush(struct net_device *dev);
extern void dev_mc_init(struct net_device *dev);
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
extern int dev_set_promiscuity(struct net_device *dev, int inc);
extern int dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
extern int netdev_bonding_change(struct net_device *dev,
unsigned long event);
extern void netdev_features_change(struct net_device *dev);
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
extern void dev_txq_stats_fold(const struct net_device *dev,
struct rtnl_link_stats64 *stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
extern void netdev_rx_csum_fault(struct net_device *dev);
extern void net_enable_timestamp(void);
extern void net_disable_timestamp(void);
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
extern void linkwatch_run_queue(void);
unsigned long netdev_increment_features(unsigned long all, unsigned long one,
unsigned long mask);
unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
static inline __attribute__((always_inline)) int net_gso_ok(int features, int gso_type)
{
int feature = gso_type << 16;
return (features & feature) == feature;
}
static inline __attribute__((always_inline)) int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, ((struct skb_shared_info *)(skb_end_pointer(skb)))->gso_type) &&
(!skb_has_frags(skb) || (features & 64));
}
static inline __attribute__((always_inline)) int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
__builtin_expect(!!(skb->ip_summed != 3), 0));
}
static inline __attribute__((always_inline)) void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
dev->gso_max_size = size;
}
extern int __skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master);
static inline __attribute__((always_inline)) int skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master)
{
if (master)
return __skb_bond_should_drop(skb, master);
return 0;
}
extern struct pernet_operations __attribute__ ((__section__(".init.data"))) loopback_net_ops;
static inline __attribute__((always_inline)) int dev_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
return -122;
return dev->ethtool_ops->get_settings(dev, cmd);
}
static inline __attribute__((always_inline)) u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
return 0;
return dev->ethtool_ops->get_rx_csum(dev);
}
static inline __attribute__((always_inline)) u32 dev_ethtool_get_flags(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
return 0;
return dev->ethtool_ops->get_flags(dev);
}
static inline __attribute__((always_inline)) const char *netdev_name(const struct net_device *dev)
{
if (dev->reg_state != NETREG_REGISTERED)
return "(unregistered net_device)";
return dev->name;
}
extern int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
__attribute__ ((format (printf, 3, 4)));
extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_alert(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_crit(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_err(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_warn(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_notice(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_info(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
# 34 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/etherdevice.h" 1
# 30 "include/linux/etherdevice.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h"
# 1 "include/linux/unaligned/be_struct.h" 1
# 1 "include/linux/unaligned/packed_struct.h" 1
struct __una_u16 { u16 x __attribute__((packed)); };
struct __una_u32 { u32 x __attribute__((packed)); };
struct __una_u64 { u64 x __attribute__((packed)); };
static inline __attribute__((always_inline)) u16 __get_unaligned_cpu16(const void *p)
{
const struct __una_u16 *ptr = (const struct __una_u16 *)p;
return ptr->x;
}
static inline __attribute__((always_inline)) u32 __get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
}
static inline __attribute__((always_inline)) u64 __get_unaligned_cpu64(const void *p)
{
const struct __una_u64 *ptr = (const struct __una_u64 *)p;
return ptr->x;
}
static inline __attribute__((always_inline)) void __put_unaligned_cpu16(u16 val, void *p)
{
struct __una_u16 *ptr = (struct __una_u16 *)p;
ptr->x = val;
}
static inline __attribute__((always_inline)) void __put_unaligned_cpu32(u32 val, void *p)
{
struct __una_u32 *ptr = (struct __una_u32 *)p;
ptr->x = val;
}
static inline __attribute__((always_inline)) void __put_unaligned_cpu64(u64 val, void *p)
{
struct __una_u64 *ptr = (struct __una_u64 *)p;
ptr->x = val;
}
# 5 "include/linux/unaligned/be_struct.h" 2
static inline __attribute__((always_inline)) u16 get_unaligned_be16(const void *p)
{
return __get_unaligned_cpu16((const u8 *)p);
}
static inline __attribute__((always_inline)) u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_cpu32((const u8 *)p);
}
static inline __attribute__((always_inline)) u64 get_unaligned_be64(const void *p)
{
return __get_unaligned_cpu64((const u8 *)p);
}
static inline __attribute__((always_inline)) void put_unaligned_be16(u16 val, void *p)
{
__put_unaligned_cpu16(val, p);
}
static inline __attribute__((always_inline)) void put_unaligned_be32(u32 val, void *p)
{
__put_unaligned_cpu32(val, p);
}
static inline __attribute__((always_inline)) void put_unaligned_be64(u64 val, void *p)
{
__put_unaligned_cpu64(val, p);
}
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h" 2
# 1 "include/linux/unaligned/le_byteshift.h" 1
static inline __attribute__((always_inline)) u16 __get_unaligned_le16(const u8 *p)
{
return p[0] | p[1] << 8;
}
static inline __attribute__((always_inline)) u32 __get_unaligned_le32(const u8 *p)
{
return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
}
static inline __attribute__((always_inline)) u64 __get_unaligned_le64(const u8 *p)
{
return (u64)__get_unaligned_le32(p + 4) << 32 |
__get_unaligned_le32(p);
}
static inline __attribute__((always_inline)) void __put_unaligned_le16(u16 val, u8 *p)
{
*p++ = val;
*p++ = val >> 8;
}
static inline __attribute__((always_inline)) void __put_unaligned_le32(u32 val, u8 *p)
{
__put_unaligned_le16(val >> 16, p + 2);
__put_unaligned_le16(val, p);
}
static inline __attribute__((always_inline)) void __put_unaligned_le64(u64 val, u8 *p)
{
__put_unaligned_le32(val >> 32, p + 4);
__put_unaligned_le32(val, p);
}
static inline __attribute__((always_inline)) u16 get_unaligned_le16(const void *p)
{
return __get_unaligned_le16((const u8 *)p);
}
static inline __attribute__((always_inline)) u32 get_unaligned_le32(const void *p)
{
return __get_unaligned_le32((const u8 *)p);
}
static inline __attribute__((always_inline)) u64 get_unaligned_le64(const void *p)
{
return __get_unaligned_le64((const u8 *)p);
}
static inline __attribute__((always_inline)) void put_unaligned_le16(u16 val, void *p)
{
__put_unaligned_le16(val, p);
}
static inline __attribute__((always_inline)) void put_unaligned_le32(u32 val, void *p)
{
__put_unaligned_le32(val, p);
}
static inline __attribute__((always_inline)) void put_unaligned_le64(u64 val, void *p)
{
__put_unaligned_le64(val, p);
}
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h" 2
# 26 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h"
# 1 "include/linux/unaligned/generic.h" 1
extern void __bad_unaligned_access_size(void);
# 27 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/unaligned.h" 2
# 31 "include/linux/etherdevice.h" 2
extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
extern int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr, unsigned len);
extern int eth_rebuild_header(struct sk_buff *skb);
extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh);
extern void eth_header_cache_update(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
extern int eth_mac_addr(struct net_device *dev, void *p);
extern int eth_change_mtu(struct net_device *dev, int new_mtu);
extern int eth_validate_addr(struct net_device *dev);
extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
# 60 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) int is_zero_ether_addr(const u8 *addr)
{
return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
}
# 72 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) int is_multicast_ether_addr(const u8 *addr)
{
return (0x01 & addr[0]);
}
static inline __attribute__((always_inline)) int is_local_ether_addr(const u8 *addr)
{
return (0x02 & addr[0]);
}
static inline __attribute__((always_inline)) int is_broadcast_ether_addr(const u8 *addr)
{
return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
}
# 108 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) int is_valid_ether_addr(const u8 *addr)
{
return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
}
# 122 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) void random_ether_addr(u8 *addr)
{
get_random_bytes (addr, 6);
addr [0] &= 0xfe;
addr [0] |= 0x02;
}
# 137 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr)
{
dev->addr_assign_type |= 1;
random_ether_addr(hwaddr);
}
# 150 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
{
const u16 *a = (const u16 *) addr1;
const u16 *b = (const u16 *) addr2;
((void)(sizeof(struct { int:-!!(6 != 6); })));
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
}
static inline __attribute__((always_inline)) unsigned long zap_last_2bytes(unsigned long value)
{
return value >> 16;
}
# 182 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
const u8 addr2[6+2])
{
# 196 "include/linux/etherdevice.h"
return compare_ether_addr(addr1, addr2);
}
# 211 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) int is_etherdev_addr(const struct net_device *dev,
const u8 addr[6 + 2])
{
struct netdev_hw_addr *ha;
int res = 1;
rcu_read_lock();
for (ha = ({ const typeof( ((typeof(*ha) *)0)->list ) *__mptr = (({ typeof((&dev->dev_addrs.list)->next) _________p1 = (*(volatile typeof((&dev->dev_addrs.list)->next) *)&((&dev->dev_addrs.list)->next)); do { } while(0); (_________p1); })); (typeof(*ha) *)( (char *)__mptr - __builtin_offsetof(typeof(*ha),list) );}); __builtin_prefetch((ha->list.next), 0, 1), &ha->list != (&dev->dev_addrs.list); ha = ({ const typeof( ((typeof(*ha) *)0)->list ) *__mptr = (({ typeof(ha->list.next) _________p1 = (*(volatile typeof(ha->list.next) *)&(ha->list.next)); do { } while(0); (_________p1); })); (typeof(*ha) *)( (char *)__mptr - __builtin_offsetof(typeof(*ha),list) );})) {
res = compare_ether_addr_64bits(addr, ha->addr);
if (!res)
break;
}
rcu_read_unlock();
return !res;
}
# 240 "include/linux/etherdevice.h"
static inline __attribute__((always_inline)) int compare_ether_header(const void *a, const void *b)
{
u32 *a32 = (u32 *)((u8 *)a + 2);
u32 *b32 = (u32 *)((u8 *)b + 2);
return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
(a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
}
# 35 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/signal.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h" 1
# 18 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h"
typedef struct {
unsigned long sig[(128 / (sizeof(unsigned long) * 8))];
} sigset_t;
typedef unsigned long old_sigset_t;
# 112 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h"
# 1 "include/asm-generic/signal-defs.h" 1
# 17 "include/asm-generic/signal-defs.h"
typedef void __signalfn_t(int);
typedef __signalfn_t *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t *__sigrestore_t;
# 113 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h" 2
struct sigaction {
unsigned int sa_flags;
__sighandler_t sa_handler;
sigset_t sa_mask;
};
struct k_sigaction {
struct sigaction sa;
};
typedef struct sigaltstack {
void *ss_sp;
size_t ss_size;
int ss_flags;
} stack_t;
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sigcontext.h" 1
# 21 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sigcontext.h"
struct sigcontext {
unsigned int sc_regmask;
unsigned int sc_status;
unsigned long long sc_pc;
unsigned long long sc_regs[32];
unsigned long long sc_fpregs[32];
unsigned int sc_acx;
unsigned int sc_fpc_csr;
unsigned int sc_fpc_eir;
unsigned int sc_used_math;
unsigned int sc_dsp;
unsigned long long sc_mdhi;
unsigned long long sc_mdlo;
unsigned long sc_hi1;
unsigned long sc_lo1;
unsigned long sc_hi2;
unsigned long sc_lo2;
unsigned long sc_hi3;
unsigned long sc_lo3;
};
# 133 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/siginfo.h" 1
# 23 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/siginfo.h"
struct siginfo;
# 35 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/siginfo.h"
# 1 "include/asm-generic/siginfo.h" 1
typedef union sigval {
int sival_int;
void *sival_ptr;
} sigval_t;
# 259 "include/asm-generic/siginfo.h"
typedef struct sigevent {
sigval_t sigev_value;
int sigev_signo;
int sigev_notify;
union {
int _pad[((64 - (sizeof(long) + 2*sizeof(int))) / sizeof(int))];
int _tid;
struct {
void (*_function)(sigval_t);
void *_attribute;
} _sigev_thread;
} _sigev_un;
} sigevent_t;
struct siginfo;
void do_schedule_next_timer(struct siginfo *info);
# 298 "include/asm-generic/siginfo.h"
extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from);
# 36 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/siginfo.h" 2
typedef struct siginfo {
int si_signo;
int si_code;
int si_errno;
int __pad0[128 / sizeof(int) - ((128 - (3 * sizeof(int))) / sizeof(int)) - 3];
union {
int _pad[((128 - (3 * sizeof(int))) / sizeof(int))];
struct {
pid_t _pid;
__kernel_uid32_t _uid;
} _kill;
struct {
timer_t _tid;
int _overrun;
char _pad[sizeof( __kernel_uid32_t) - sizeof(int)];
sigval_t _sigval;
int _sys_private;
} _timer;
struct {
pid_t _pid;
__kernel_uid32_t _uid;
sigval_t _sigval;
} _rt;
struct {
pid_t _pid;
__kernel_uid32_t _uid;
int _status;
clock_t _utime;
clock_t _stime;
} _sigchld;
struct {
pid_t _pid;
clock_t _utime;
int _status;
clock_t _stime;
} _irix_sigchld;
struct {
void *_addr;
short _addr_lsb;
} _sigfault;
struct {
long _band;
int _fd;
} _sigpoll;
} _sifields;
} siginfo_t;
# 120 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/siginfo.h"
static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(*to));
else
memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
}
# 134 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/signal.h" 2
# 5 "include/linux/signal.h" 2
extern int print_fatal_signals;
struct sigqueue {
struct list_head list;
int flags;
siginfo_t info;
struct user_struct *user;
};
struct sigpending {
struct list_head list;
sigset_t signal;
};
# 40 "include/linux/signal.h"
static inline __attribute__((always_inline)) void sigaddset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((128 / (sizeof(unsigned long) * 8)) == 1)
set->sig[0] |= 1UL << sig;
else
set->sig[sig / (sizeof(unsigned long) * 8)] |= 1UL << (sig % (sizeof(unsigned long) * 8));
}
static inline __attribute__((always_inline)) void sigdelset(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((128 / (sizeof(unsigned long) * 8)) == 1)
set->sig[0] &= ~(1UL << sig);
else
set->sig[sig / (sizeof(unsigned long) * 8)] &= ~(1UL << (sig % (sizeof(unsigned long) * 8)));
}
static inline __attribute__((always_inline)) int sigismember(sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if ((128 / (sizeof(unsigned long) * 8)) == 1)
return 1 & (set->sig[0] >> sig);
else
return 1 & (set->sig[sig / (sizeof(unsigned long) * 8)] >> (sig % (sizeof(unsigned long) * 8)));
}
static inline __attribute__((always_inline)) int sigfindinword(unsigned long word)
{
return __ffs(~(~word));
}
static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set)
{
extern void _NSIG_WORDS_is_unsupported_size(void);
switch ((128 / (sizeof(unsigned long) * 8))) {
case 4:
return (set->sig[3] | set->sig[2] |
set->sig[1] | set->sig[0]) == 0;
case 2:
return (set->sig[1] | set->sig[0]) == 0;
case 1:
return set->sig[0] == 0;
default:
_NSIG_WORDS_is_unsupported_size();
return 0;
}
}
# 121 "include/linux/signal.h"
static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((128 / (sizeof(unsigned long) * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((128 / (sizeof(unsigned long) * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
static inline __attribute__((always_inline)) void signandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((128 / (sizeof(unsigned long) * 8))) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } }
# 151 "include/linux/signal.h"
static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((128 / (sizeof(unsigned long) * 8))) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } }
static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set)
{
switch ((128 / (sizeof(unsigned long) * 8))) {
default:
memset(set, 0, sizeof(sigset_t));
break;
case 2: set->sig[1] = 0;
case 1: set->sig[0] = 0;
break;
}
}
static inline __attribute__((always_inline)) void sigfillset(sigset_t *set)
{
switch ((128 / (sizeof(unsigned long) * 8))) {
default:
memset(set, -1, sizeof(sigset_t));
break;
case 2: set->sig[1] = -1;
case 1: set->sig[0] = -1;
break;
}
}
static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask)
{
set->sig[0] |= mask;
}
static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask)
{
set->sig[0] &= ~mask;
}
static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask)
{
return (set->sig[0] & mask) != 0;
}
static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask)
{
set->sig[0] = mask;
switch ((128 / (sizeof(unsigned long) * 8))) {
default:
memset(&set->sig[1], 0, sizeof(long)*((128 / (sizeof(unsigned long) * 8))-1));
break;
case 2: set->sig[1] = 0;
case 1: ;
}
}
static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask)
{
set->sig[0] = ~mask;
switch ((128 / (sizeof(unsigned long) * 8))) {
default:
memset(&set->sig[1], -1, sizeof(long)*((128 / (sizeof(unsigned long) * 8))-1));
break;
case 2: set->sig[1] = -1;
case 1: ;
}
}
static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig)
{
sigemptyset(&sig->signal);
INIT_LIST_HEAD(&sig->list);
}
extern void flush_sigqueue(struct sigpending *queue);
static inline __attribute__((always_inline)) int valid_signal(unsigned long sig)
{
return sig <= 128 ? 1 : 0;
}
extern int next_signal(struct sigpending *pending, sigset_t *mask);
extern int do_send_sig_info(int sig, struct siginfo *info,
struct task_struct *p, int group);
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
siginfo_t *info);
extern long do_sigpending(void *, unsigned long);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern int show_unhandled_signals;
struct pt_regs;
extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
extern void exit_signals(struct task_struct *tsk);
extern struct kmem_cache *sighand_cachep;
int unhandled_signal(struct task_struct *tsk, int sig);
# 379 "include/linux/signal.h"
void signals_init(void);
# 38 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/sched.h" 1
# 46 "include/linux/sched.h"
struct sched_param {
int sched_priority;
};
# 68 "include/linux/sched.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cputime.h" 1
# 1 "include/asm-generic/cputime.h" 1
typedef unsigned long cputime_t;
# 25 "include/asm-generic/cputime.h"
typedef u64 cputime64_t;
# 5 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cputime.h" 2
# 69 "include/linux/sched.h" 2
# 1 "include/linux/sem.h" 1
# 1 "include/linux/ipc.h" 1
# 9 "include/linux/ipc.h"
struct ipc_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
};
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ipcbuf.h" 1
# 14 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/ipcbuf.h"
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid_t uid;
__kernel_gid_t gid;
__kernel_uid_t cuid;
__kernel_gid_t cgid;
__kernel_mode_t mode;
unsigned short seq;
unsigned short __pad1;
unsigned long __unused1;
unsigned long __unused2;
};
# 22 "include/linux/ipc.h" 2
# 57 "include/linux/ipc.h"
struct ipc_kludge {
struct msgbuf *msgp;
long msgtyp;
};
# 86 "include/linux/ipc.h"
struct kern_ipc_perm
{
spinlock_t lock;
int deleted;
int id;
key_t key;
uid_t uid;
gid_t gid;
uid_t cuid;
gid_t cgid;
mode_t mode;
unsigned long seq;
void *security;
};
# 5 "include/linux/sem.h" 2
# 23 "include/linux/sem.h"
struct semid_ds {
struct ipc_perm sem_perm;
__kernel_time_t sem_otime;
__kernel_time_t sem_ctime;
struct sem *sem_base;
struct sem_queue *sem_pending;
struct sem_queue **sem_pending_last;
struct sem_undo *undo;
unsigned short sem_nsems;
};
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sembuf.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/sembuf.h"
struct semid64_ds {
struct ipc64_perm sem_perm;
__kernel_time_t sem_otime;
__kernel_time_t sem_ctime;
unsigned long sem_nsems;
unsigned long __unused1;
unsigned long __unused2;
};
# 36 "include/linux/sem.h" 2
struct sembuf {
unsigned short sem_num;
short sem_op;
short sem_flg;
};
union semun {
int val;
struct semid_ds *buf;
unsigned short *array;
struct seminfo *__buf;
void *__pad;
};
struct seminfo {
int semmap;
int semmni;
int semmns;
int semmnu;
int semmsl;
int semopm;
int semume;
int semusz;
int semvmx;
int semaem;
};
# 84 "include/linux/sem.h"
struct task_struct;
struct sem {
int semval;
int sempid;
struct list_head sem_pending;
};
struct sem_array {
struct kern_ipc_perm __attribute__((__aligned__((1 << 5))))
sem_perm;
time_t sem_otime;
time_t sem_ctime;
struct sem *sem_base;
struct list_head sem_pending;
struct list_head list_id;
int sem_nsems;
int complex_count;
};
struct sem_queue {
struct list_head simple_list;
struct list_head list;
struct task_struct *sleeper;
struct sem_undo *undo;
int pid;
int status;
struct sembuf *sops;
int nsops;
int alter;
};
struct sem_undo {
struct list_head list_proc;
struct rcu_head rcu;
struct sem_undo_list *ulp;
struct list_head list_id;
int semid;
short * semadj;
};
struct sem_undo_list {
atomic_t refcnt;
spinlock_t lock;
struct list_head list_proc;
};
struct sysv_sem {
struct sem_undo_list *undo_list;
};
extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
extern void exit_sem(struct task_struct *tsk);
# 72 "include/linux/sched.h" 2
# 1 "include/linux/proportions.h" 1
# 16 "include/linux/proportions.h"
struct prop_global {
int shift;
struct percpu_counter events;
};
struct prop_descriptor {
int index;
struct prop_global pg[2];
struct mutex mutex;
};
int prop_descriptor_init(struct prop_descriptor *pd, int shift);
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
struct prop_local_percpu {
struct percpu_counter events;
int shift;
unsigned long period;
spinlock_t lock;
};
int prop_local_init_percpu(struct prop_local_percpu *pl);
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
long *numerator, long *denominator);
static inline __attribute__((always_inline))
void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
{
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
__prop_inc_percpu(pd, pl);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
}
# 89 "include/linux/proportions.h"
void __prop_inc_percpu_max(struct prop_descriptor *pd,
struct prop_local_percpu *pl, long frac);
struct prop_local_single {
unsigned long events;
unsigned long period;
int shift;
spinlock_t lock;
};
int prop_local_init_single(struct prop_local_single *pl);
void prop_local_destroy_single(struct prop_local_single *pl);
void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
long *numerator, long *denominator);
static inline __attribute__((always_inline))
void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
{
unsigned long flags;
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0);
__prop_inc_single(pd, pl);
do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0);
}
# 80 "include/linux/sched.h" 2
# 1 "include/linux/seccomp.h" 1
# 26 "include/linux/seccomp.h"
typedef struct { } seccomp_t;
static inline __attribute__((always_inline)) long prctl_get_seccomp(void)
{
return -22;
}
static inline __attribute__((always_inline)) long prctl_set_seccomp(unsigned long arg2)
{
return -22;
}
# 81 "include/linux/sched.h" 2
# 1 "include/linux/rtmutex.h" 1
# 19 "include/linux/rtmutex.h"
extern int max_lock_depth;
# 28 "include/linux/rtmutex.h"
struct rt_mutex {
raw_spinlock_t wait_lock;
struct plist_head wait_list;
struct task_struct *owner;
};
struct rt_mutex_waiter;
struct hrtimer_sleeper;
static inline __attribute__((always_inline)) int rt_mutex_debug_check_no_locks_freed(const void *from,
unsigned long len)
{
return 0;
}
# 82 "include/linux/rtmutex.h"
static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock)
{
return lock->owner != ((void *)0);
}
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
extern void rt_mutex_destroy(struct rt_mutex *lock);
extern void rt_mutex_lock(struct rt_mutex *lock);
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
int detect_deadlock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
int detect_deadlock);
extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock);
# 84 "include/linux/sched.h" 2
# 1 "include/linux/resource.h" 1
# 22 "include/linux/resource.h"
struct rusage {
struct timeval ru_utime;
struct timeval ru_stime;
long ru_maxrss;
long ru_ixrss;
long ru_idrss;
long ru_isrss;
long ru_minflt;
long ru_majflt;
long ru_nswap;
long ru_inblock;
long ru_oublock;
long ru_msgsnd;
long ru_msgrcv;
long ru_nsignals;
long ru_nvcsw;
long ru_nivcsw;
};
struct rlimit {
unsigned long rlim_cur;
unsigned long rlim_max;
};
struct rlimit64 {
__u64 rlim_cur;
__u64 rlim_max;
};
# 76 "include/linux/resource.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/resource.h" 1
# 33 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/resource.h"
# 1 "include/asm-generic/resource.h" 1
# 34 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/resource.h" 2
# 77 "include/linux/resource.h" 2
struct task_struct;
int getrusage(struct task_struct *p, int who, struct rusage *ru);
int do_prlimit(struct task_struct *tsk, unsigned int resource,
struct rlimit *new_rlim, struct rlimit *old_rlim);
# 88 "include/linux/sched.h" 2
# 1 "include/linux/task_io_accounting.h" 1
# 11 "include/linux/task_io_accounting.h"
struct task_io_accounting {
# 45 "include/linux/task_io_accounting.h"
};
# 91 "include/linux/sched.h" 2
# 1 "include/linux/latencytop.h" 1
# 41 "include/linux/latencytop.h"
static inline __attribute__((always_inline)) void
account_scheduler_latency(struct task_struct *task, int usecs, int inter)
{
}
static inline __attribute__((always_inline)) void clear_all_latency_tracing(struct task_struct *p)
{
}
# 93 "include/linux/sched.h" 2
# 1 "include/linux/cred.h" 1
# 17 "include/linux/cred.h"
# 1 "include/linux/key.h" 1
# 29 "include/linux/key.h"
typedef int32_t key_serial_t;
typedef uint32_t key_perm_t;
struct key;
# 18 "include/linux/cred.h" 2
# 1 "include/linux/selinux.h" 1
# 17 "include/linux/selinux.h"
struct selinux_audit_rule;
struct audit_context;
struct kern_ipc_perm;
# 71 "include/linux/selinux.h"
static inline __attribute__((always_inline)) int selinux_string_to_sid(const char *str, u32 *sid)
{
*sid = 0;
return 0;
}
static inline __attribute__((always_inline)) int selinux_secmark_relabel_packet_permission(u32 sid)
{
return 0;
}
static inline __attribute__((always_inline)) void selinux_secmark_refcount_inc(void)
{
return;
}
static inline __attribute__((always_inline)) void selinux_secmark_refcount_dec(void)
{
return;
}
static inline __attribute__((always_inline)) int selinux_is_enabled(void)
{
return false;
}
# 19 "include/linux/cred.h" 2
struct user_struct;
struct cred;
struct inode;
struct group_info {
atomic_t usage;
int ngroups;
int nblocks;
gid_t small_block[32];
gid_t *blocks[0];
};
# 48 "include/linux/cred.h"
static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi)
{
atomic_add(1, (&gi->usage));
return gi;
}
# 64 "include/linux/cred.h"
extern struct group_info *groups_alloc(int);
extern struct group_info init_groups;
extern void groups_free(struct group_info *);
extern int set_current_groups(struct group_info *);
extern int set_groups(struct cred *, struct group_info *);
extern int groups_search(const struct group_info *, gid_t);
extern int in_group_p(gid_t);
extern int in_egroup_p(gid_t);
# 116 "include/linux/cred.h"
struct cred {
atomic_t usage;
uid_t uid;
gid_t gid;
uid_t suid;
gid_t sgid;
uid_t euid;
gid_t egid;
uid_t fsuid;
gid_t fsgid;
unsigned securebits;
kernel_cap_t cap_inheritable;
kernel_cap_t cap_permitted;
kernel_cap_t cap_effective;
kernel_cap_t cap_bset;
# 148 "include/linux/cred.h"
struct user_struct *user;
struct group_info *group_info;
struct rcu_head rcu;
};
extern void __put_cred(struct cred *);
extern void exit_creds(struct task_struct *);
extern int copy_creds(struct task_struct *, unsigned long);
extern const struct cred *get_task_cred(struct task_struct *);
extern struct cred *cred_alloc_blank(void);
extern struct cred *prepare_creds(void);
extern struct cred *prepare_exec_creds(void);
extern int commit_creds(struct cred *);
extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void);
# 200 "include/linux/cred.h"
static inline __attribute__((always_inline)) void validate_creds(const struct cred *cred)
{
}
static inline __attribute__((always_inline)) void validate_creds_for_do_exit(struct task_struct *tsk)
{
}
static inline __attribute__((always_inline)) void validate_process_creds(void)
{
}
# 218 "include/linux/cred.h"
static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred)
{
atomic_add(1, (&cred->usage));
return cred;
}
# 237 "include/linux/cred.h"
static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
validate_creds(cred);
return get_new_cred(nonconst_cred);
}
# 255 "include/linux/cred.h"
static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred)
{
struct cred *cred = (struct cred *) _cred;
validate_creds(cred);
if ((atomic_sub_return(1, (&(cred)->usage)) == 0))
__put_cred(cred);
}
# 94 "include/linux/sched.h" 2
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
struct fs_struct;
struct perf_event_context;
# 120 "include/linux/sched.h"
extern unsigned long avenrun[];
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
# 135 "include/linux/sched.h"
extern unsigned long total_forks;
extern int nr_threads;
extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) process_counts;
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern unsigned long this_cpu_load(void);
extern void calc_global_load(void);
extern unsigned long get_parent_ip(unsigned long addr);
struct seq_file;
struct cfs_rq;
struct task_group;
static inline __attribute__((always_inline)) void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline __attribute__((always_inline)) void proc_sched_set_task(struct task_struct *p)
{
}
static inline __attribute__((always_inline)) void
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{
}
# 198 "include/linux/sched.h"
extern char ___assert_task_state[1 - 2*!!(
sizeof("RSDTtZXxKW")-1 != ( __builtin_constant_p(512) ? ( (512) < 1 ? ____ilog2_NaN() : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : (512) & (1ULL << 1) ? 1 : (512) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )+1)];
# 256 "include/linux/sched.h"
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
struct task_struct;
extern void sched_init(void);
extern void sched_init_smp(void);
extern void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(int cpu);
extern cpumask_var_t nohz_cpu_mask;
static inline __attribute__((always_inline)) void select_nohz_load_balancer(int stop_tick) { }
extern void show_state_filter(unsigned long state_filter);
static inline __attribute__((always_inline)) void show_state(void)
{
show_state_filter(0);
}
extern void show_regs(struct pt_regs *);
extern void show_stack(struct task_struct *task, unsigned long *sp);
void io_schedule(void);
long io_schedule_timeout(long timeout);
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
# 320 "include/linux/sched.h"
static inline __attribute__((always_inline)) void touch_softlockup_watchdog(void)
{
}
static inline __attribute__((always_inline)) void touch_softlockup_watchdog_sync(void)
{
}
static inline __attribute__((always_inline)) void touch_all_softlockup_watchdogs(void)
{
}
# 345 "include/linux/sched.h"
extern char __sched_text_start[], __sched_text_end[];
extern int in_sched_functions(unsigned long addr);
extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
void schedule(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
struct nsproxy;
struct user_namespace;
# 376 "include/linux/sched.h"
extern int sysctl_max_map_count;
# 1 "include/linux/aio.h" 1
# 1 "include/linux/aio_abi.h" 1
# 33 "include/linux/aio_abi.h"
typedef unsigned long aio_context_t;
enum {
IOCB_CMD_PREAD = 0,
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
};
# 58 "include/linux/aio_abi.h"
struct io_event {
__u64 data;
__u64 obj;
__s64 res;
__s64 res2;
};
# 79 "include/linux/aio_abi.h"
struct iocb {
__u64 aio_data;
__u32 aio_reserved1, aio_key;
__u16 aio_lio_opcode;
__s16 aio_reqprio;
__u32 aio_fildes;
__u64 aio_buf;
__u64 aio_nbytes;
__s64 aio_offset;
__u64 aio_reserved2;
__u32 aio_flags;
__u32 aio_resfd;
};
# 7 "include/linux/aio.h" 2
# 15 "include/linux/aio.h"
struct kioctx;
# 87 "include/linux/aio.h"
struct kiocb {
struct list_head ki_run_list;
unsigned long ki_flags;
int ki_users;
unsigned ki_key;
struct file *ki_filp;
struct kioctx *ki_ctx;
int (*ki_cancel)(struct kiocb *, struct io_event *);
ssize_t (*ki_retry)(struct kiocb *);
void (*ki_dtor)(struct kiocb *);
union {
void *user;
struct task_struct *tsk;
} ki_obj;
__u64 ki_user_data;
loff_t ki_pos;
void *private;
unsigned short ki_opcode;
size_t ki_nbytes;
char *ki_buf;
size_t ki_left;
struct iovec ki_inline_vec;
struct iovec *ki_iovec;
unsigned long ki_nr_segs;
unsigned long ki_cur_seg;
struct list_head ki_list;
struct eventfd_ctx *ki_eventfd;
};
# 147 "include/linux/aio.h"
struct aio_ring {
unsigned id;
unsigned nr;
unsigned head;
unsigned tail;
unsigned magic;
unsigned compat_features;
unsigned incompat_features;
unsigned header_length;
struct io_event io_events[0];
};
struct aio_ring_info {
unsigned long mmap_base;
unsigned long mmap_size;
struct page **ring_pages;
spinlock_t ring_lock;
long nr_pages;
unsigned nr, tail;
struct page *internal_pages[8];
};
struct kioctx {
atomic_t users;
int dead;
struct mm_struct *mm;
unsigned long user_id;
struct hlist_node list;
wait_queue_head_t wait;
spinlock_t ctx_lock;
int reqs_active;
struct list_head active_reqs;
struct list_head run_list;
unsigned max_reqs;
struct aio_ring_info ring_info;
struct delayed_work wq;
struct rcu_head rcu_head;
};
extern unsigned aio_max_size;
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
extern int aio_put_req(struct kiocb *iocb);
extern void kick_iocb(struct kiocb *iocb);
extern int aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
extern long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb * *iocbpp, int compat);
# 229 "include/linux/aio.h"
static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h)
{
return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );});
}
extern unsigned long aio_nr;
extern unsigned long aio_max_nr;
# 379 "include/linux/sched.h" 2
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
# 434 "include/linux/sched.h"
struct sighand_struct {
atomic_t count;
struct k_sigaction action[128];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
};
struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
cputime_t ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
cputime_t expires;
cputime_t incr;
u32 error;
u32 incr_error;
};
# 467 "include/linux/sched.h"
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
# 503 "include/linux/sched.h"
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
spinlock_t lock;
};
# 516 "include/linux/sched.h"
struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
wait_queue_head_t wait_chldexit;
struct task_struct *curr_target;
struct sigpending shared_pending;
int group_exit_code;
int notify_count;
struct task_struct *group_exit_task;
int group_stop_count;
unsigned int flags;
struct list_head posix_timers;
struct hrtimer real_timer;
struct pid *leader_pid;
ktime_t it_real_incr;
struct cpu_itimer it[2];
struct thread_group_cputimer cputimer;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
int leader;
struct tty_struct *tty;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
cputime_t prev_utime, prev_stime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
unsigned long long sum_sched_runtime;
# 611 "include/linux/sched.h"
struct rlimit rlim[16];
# 624 "include/linux/sched.h"
int oom_adj;
long oom_score_adj;
};
# 654 "include/linux/sched.h"
static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig)
{
return (sig->flags & 0x00000008) ||
(sig->group_exit_task != ((void *)0));
}
struct user_struct {
atomic_t __count;
atomic_t processes;
atomic_t files;
atomic_t sigpending;
atomic_t inotify_watches;
atomic_t inotify_devs;
atomic_t epoll_watches;
unsigned long locked_shm;
struct hlist_node uidhash_node;
uid_t uid;
struct user_namespace *user_ns;
};
extern int uids_sysfs_init(void);
extern struct user_struct *find_user(uid_t);
extern struct user_struct root_user;
struct backing_dev_info;
struct reclaim_state;
# 757 "include/linux/sched.h"
static inline __attribute__((always_inline)) int sched_info_on(void)
{
return 0;
}
enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
# 803 "include/linux/sched.h"
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0,
POWERSAVINGS_BALANCE_BASIC,
POWERSAVINGS_BALANCE_WAKEUP,
MAX_POWERSAVINGS_BALANCE_LEVELS
};
extern int sched_mc_power_savings, sched_smt_power_savings;
static inline __attribute__((always_inline)) int sd_balance_for_mc_power(void)
{
if (sched_smt_power_savings)
return 0x0100;
if (!sched_mc_power_savings)
return 0x1000;
return 0;
}
static inline __attribute__((always_inline)) int sd_balance_for_package_power(void)
{
if (sched_mc_power_savings | sched_smt_power_savings)
return 0x0100;
return 0x1000;
}
extern int __attribute__((weak)) arch_sd_sibiling_asym_packing(void);
static inline __attribute__((always_inline)) int sd_power_saving_flags(void)
{
if (sched_mc_power_savings | sched_smt_power_savings)
return 0x0002;
return 0;
}
struct sched_group {
struct sched_group *next;
unsigned int cpu_power, cpu_power_orig;
# 870 "include/linux/sched.h"
unsigned long cpumask[0];
};
static inline __attribute__((always_inline)) struct cpumask *sched_group_cpus(struct sched_group *sg)
{
return ((struct cpumask *)(1 ? (sg->cpumask) : (void *)sizeof(__check_is_bitmap(sg->cpumask))));
}
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
SD_LV_MC,
SD_LV_CPU,
SD_LV_NODE,
SD_LV_ALLNODES,
SD_LV_MAX
};
struct sched_domain_attr {
int relax_domain_level;
};
struct sched_domain {
struct sched_domain *parent;
struct sched_domain *child;
struct sched_group *groups;
unsigned long min_interval;
unsigned long max_interval;
unsigned int busy_factor;
unsigned int imbalance_pct;
unsigned int cache_nice_tries;
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
unsigned int smt_gain;
int flags;
enum sched_domain_level level;
unsigned long last_balance;
unsigned int balance_interval;
unsigned int nr_balance_failed;
u64 last_update;
# 957 "include/linux/sched.h"
unsigned int span_weight;
# 968 "include/linux/sched.h"
unsigned long span[0];
};
static inline __attribute__((always_inline)) struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span))));
}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
static inline __attribute__((always_inline)) int test_sd_parent(struct sched_domain *sd, int flag)
{
if (sd->parent && (sd->parent->flags & flag))
return 1;
return 0;
}
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
# 1007 "include/linux/sched.h"
struct io_context;
static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { }
struct audit_context;
struct mempolicy;
struct pipe_inode_info;
struct uts_namespace;
struct rq;
struct sched_domain;
# 1036 "include/linux/sched.h"
struct sched_class {
const struct sched_class *next;
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
struct task_struct * (*pick_next_task) (struct rq *rq);
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
int (*select_task_rq)(struct rq *rq, struct task_struct *p,
int sd_flag, int flags);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
void (*switched_to) (struct rq *this_rq, struct task_struct *task,
int running);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running);
unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task);
void (*moved_group) (struct task_struct *p, int on_rq);
};
struct load_weight {
unsigned long weight, inv_weight;
};
# 1123 "include/linux/sched.h"
struct sched_entity {
struct load_weight load;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
u64 nr_migrations;
struct sched_entity *parent;
struct cfs_rq *cfs_rq;
struct cfs_rq *my_q;
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned int time_slice;
int nr_cpus_allowed;
struct sched_rt_entity *back;
};
struct rcu_node;
struct task_struct {
volatile long state;
void *stack;
atomic_t usage;
unsigned int flags;
unsigned int ptrace;
int lock_depth;
int oncpu;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
# 1201 "include/linux/sched.h"
unsigned char fpu_counter;
unsigned int policy;
cpumask_t cpus_allowed;
# 1220 "include/linux/sched.h"
struct list_head tasks;
struct plist_node pushable_tasks;
struct mm_struct *mm, *active_mm;
struct task_rss_stat rss_stat;
int exit_state;
int exit_code, exit_signal;
int pdeath_signal;
unsigned int personality;
unsigned did_exec:1;
unsigned in_execve:1;
unsigned in_iowait:1;
unsigned sched_reset_on_fork:1;
pid_t pid;
pid_t tgid;
# 1255 "include/linux/sched.h"
struct task_struct *real_parent;
struct task_struct *parent;
struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;
struct list_head ptraced;
struct list_head ptrace_entry;
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct completion *vfork_done;
int *set_child_tid;
int *clear_child_tid;
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
cputime_t prev_utime, prev_stime;
unsigned long nvcsw, nivcsw;
struct timespec start_time;
struct timespec real_start_time;
unsigned long min_flt, maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
const struct cred *real_cred;
const struct cred *cred;
struct mutex cred_guard_mutex;
struct cred *replacement_session_keyring;
char comm[16];
int link_count, total_link_count;
struct sysv_sem sysvsem;
struct thread_struct thread;
struct fs_struct *fs;
struct files_struct *files;
struct nsproxy *nsproxy;
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask;
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
struct audit_context *audit_context;
seccomp_t seccomp;
u32 parent_exec_id;
u32 self_exec_id;
spinlock_t alloc_lock;
struct irqaction *irqaction;
raw_spinlock_t pi_lock;
struct plist_head pi_waiters;
struct rt_mutex_waiter *pi_blocked_on;
# 1397 "include/linux/sched.h"
void *journal_info;
struct bio_list *bio_list;
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo;
struct task_io_accounting ioac;
# 1425 "include/linux/sched.h"
struct css_set *cgroups;
struct list_head cg_list;
struct robust_list_head *robust_list;
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
# 1446 "include/linux/sched.h"
atomic_t fs_excl;
struct rcu_head rcu;
struct pipe_inode_info *splice_pipe;
struct prop_local_single dirties;
# 1468 "include/linux/sched.h"
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
struct list_head *scm_work_list;
# 1494 "include/linux/sched.h"
struct memcg_batch_info {
int do_batch;
struct mem_cgroup *memcg;
unsigned long bytes;
unsigned long memsw_bytes;
} memcg_batch;
};
# 1525 "include/linux/sched.h"
static inline __attribute__((always_inline)) int rt_prio(int prio)
{
if (__builtin_expect(!!(prio < 100), 0))
return 1;
return 0;
}
static inline __attribute__((always_inline)) int rt_task(struct task_struct *p)
{
return rt_prio(p->prio);
}
static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
struct pid_namespace;
# 1577 "include/linux/sched.h"
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0));
}
static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk)
{
return pid_vnr(task_tgid(tsk));
}
static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0));
}
static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0));
}
static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
# 1647 "include/linux/sched.h"
static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != ((void *)0);
}
static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk)
{
return tsk->pid == 1;
}
extern int is_container_init(struct task_struct *tsk);
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
extern void __put_task_struct(struct task_struct *t);
static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t)
{
if ((atomic_sub_return(1, (&t->usage)) == 0))
__put_task_struct(t);
}
extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
# 1762 "include/linux/sched.h"
static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p)
{
}
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
# 1782 "include/linux/sched.h"
static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
return set_cpus_allowed_ptr(p, &new_mask);
}
# 1796 "include/linux/sched.h"
extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void);
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
static inline __attribute__((always_inline)) void sched_clock_tick(void)
{
}
static inline __attribute__((always_inline)) void sched_clock_idle_sleep_event(void)
{
}
static inline __attribute__((always_inline)) void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
# 1833 "include/linux/sched.h"
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
extern void sched_exec(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
static inline __attribute__((always_inline)) void idle_task_exit(void) {}
extern void sched_idle_next(void);
static inline __attribute__((always_inline)) void wake_up_idle_cpu(int cpu) { }
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern unsigned int sysctl_sched_shares_ratelimit;
extern unsigned int sysctl_sched_shares_thresh;
extern unsigned int sysctl_sched_child_runs_first;
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
SCHED_TUNABLESCALING_LINEAR,
SCHED_TUNABLESCALING_END,
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
# 1893 "include/linux/sched.h"
static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void)
{
return 1;
}
extern unsigned int sysctl_sched_rt_period;
extern int sysctl_sched_rt_runtime;
int sched_rt_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp,
loff_t *ppos);
extern unsigned int sysctl_sched_compat_yield;
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern void rt_mutex_adjust_pi(struct task_struct *p);
# 1919 "include/linux/sched.h"
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p);
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
struct sched_param *);
extern struct task_struct *idle_task(int cpu);
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
extern struct exec_domain default_exec_domain;
union thread_union {
struct thread_info thread_info;
unsigned long stack[(((1UL) << 12) << (2))/sizeof(long)];
};
static inline __attribute__((always_inline)) int kstack_end(void *addr)
{
return !(((unsigned long)addr+sizeof(void*)-1) & ((((1UL) << 12) << (2))-sizeof(void*)));
}
extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern struct mm_struct init_mm;
extern struct pid_namespace init_pid_ns;
# 1972 "include/linux/sched.h"
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
extern void __set_special_pids(struct pid *pid);
extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u)
{
atomic_add(1, (&u->__count));
return u;
}
extern void free_uid(struct user_struct *);
extern void release_uids(struct user_namespace *ns);
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/current.h" 1
# 1989 "include/linux/sched.h" 2
extern void do_timer(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk,
unsigned long clone_flags);
extern void kick_process(struct task_struct *tsk);
extern void sched_fork(struct task_struct *p, int clone_flags);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void __flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
unsigned long flags;
int ret;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&tsk->sighand->siglock)); } while (0); } while (0);
ret = dequeue_signal(tsk, mask, info);
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
return ret;
}
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
extern void unblock_all_signals(void);
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern int do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv)
{
return kill_pid(cad_pid, sig, priv);
}
# 2060 "include/linux/sched.h"
static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp)
{
return sp > (__current_thread_info->task)->sas_ss_sp &&
sp - (__current_thread_info->task)->sas_ss_sp <= (__current_thread_info->task)->sas_ss_size;
}
static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp)
{
return ((__current_thread_info->task)->sas_ss_size == 0 ? 2
: on_sig_stack(sp) ? 1 : 0);
}
extern struct mm_struct * mm_alloc(void);
extern void __mmdrop(struct mm_struct *);
static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm)
{
if (__builtin_expect(!!((atomic_sub_return(1, (&mm->mm_count)) == 0)), 0))
__mmdrop(mm);
}
extern void mmput(struct mm_struct *);
extern struct mm_struct *get_task_mm(struct task_struct *task);
extern void mm_release(struct task_struct *, struct mm_struct *);
extern struct mm_struct *dup_mm(struct task_struct *tsk);
extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *, struct pt_regs *);
extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
extern void do_group_exit(int);
extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern int disallow_signal(int);
extern int do_execve(const char *,
const char * const *,
const char * const *, struct pt_regs *);
extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
struct task_struct *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
# 2141 "include/linux/sched.h"
extern int current_is_single_threaded(void);
# 2153 "include/linux/sched.h"
static inline __attribute__((always_inline)) int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
}
# 2167 "include/linux/sched.h"
static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p)
{
return p->pid == p->tgid;
}
static inline __attribute__((always_inline))
int same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->tgid == p2->tgid;
}
static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p)
{
return ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = (({ typeof(p->thread_group.next) _________p1 = (*(volatile typeof(p->thread_group.next) *)&(p->thread_group.next)); do { } while(0); (_________p1); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );})
;
}
static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
static inline __attribute__((always_inline)) int task_detached(struct task_struct *p)
{
return p->exit_signal == -1;
}
# 2207 "include/linux/sched.h"
static inline __attribute__((always_inline)) void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);
}
extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
*((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack);
((struct thread_info *)(p)->stack)->task = p;
}
static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p)
{
return (unsigned long *)(((struct thread_info *)(p)->stack) + 1);
}
static inline __attribute__((always_inline)) int object_is_on_stack(void *obj)
{
void *stack = (((__current_thread_info->task))->stack);
return (obj >= stack) && (obj < (stack + (((1UL) << 12) << (2))));
}
extern void thread_info_cache_init(void);
static inline __attribute__((always_inline)) unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do {
n++;
} while (!*n);
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
}
static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
}
static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
}
static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
}
static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag);
}
static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,2);
}
static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,2);
}
static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk)
{
return __builtin_expect(!!(test_tsk_thread_flag(tsk,2)), 0);
}
static inline __attribute__((always_inline)) int restart_syscall(void)
{
set_tsk_thread_flag((__current_thread_info->task), 1);
return -513;
}
static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p)
{
return __builtin_expect(!!(test_tsk_thread_flag(p,1)), 0);
}
static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p)
{
return __builtin_expect(!!(sigismember(&p->pending.signal, 9)), 0);
}
static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p)
{
return signal_pending(p) && __fatal_signal_pending(p);
}
static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p)
{
if (!(state & (1 | 128)))
return 0;
if (!signal_pending(p))
return 0;
return (state & 1) || __fatal_signal_pending(p);
}
static inline __attribute__((always_inline)) int need_resched(void)
{
return __builtin_expect(!!(test_ti_thread_flag(__current_thread_info, 2)), 0);
}
# 2352 "include/linux/sched.h"
extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
# 2372 "include/linux/sched.h"
extern int __cond_resched_softirq(void);
# 2384 "include/linux/sched.h"
static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock)
{
return 0;
}
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig)
{
do { spinlock_check(&sig->cputimer.lock); do { *(&(&sig->cputimer.lock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
}
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped);
static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p)
{
return ((struct thread_info *)(p)->stack)->cpu;
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
# 2440 "include/linux/sched.h"
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void);
extern struct task_group init_task_group;
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
extern void sched_move_task(struct task_struct *tsk);
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
extern unsigned long sched_group_shares(struct task_group *tg);
# 2467 "include/linux/sched.h"
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
# 2491 "include/linux/sched.h"
static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk)
{
}
static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk)
{
}
# 2515 "include/linux/sched.h"
extern void task_oncpu_function_call(struct task_struct *p,
void (*func) (void *info), void *info);
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
# 2532 "include/linux/sched.h"
static inline __attribute__((always_inline)) unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
return (*(volatile typeof(tsk->signal->rlim[limit].rlim_cur) *)&(tsk->signal->rlim[limit].rlim_cur));
}
static inline __attribute__((always_inline)) unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return (*(volatile typeof(tsk->signal->rlim[limit].rlim_max) *)&(tsk->signal->rlim[limit].rlim_max));
}
static inline __attribute__((always_inline)) unsigned long rlimit(unsigned int limit)
{
return task_rlimit((__current_thread_info->task), limit);
}
static inline __attribute__((always_inline)) unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max((__current_thread_info->task), limit);
}
# 39 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/mii.h" 1
# 143 "include/linux/mii.h"
struct mii_ioctl_data {
__u16 phy_id;
__u16 reg_num;
__u16 val_in;
__u16 val_out;
};
struct ethtool_cmd;
struct mii_if_info {
int phy_id;
int advertising;
int phy_id_mask;
int reg_num_mask;
unsigned int full_duplex : 1;
unsigned int force_media : 1;
unsigned int supports_gmii : 1;
struct net_device *dev;
int (*mdio_read) (struct net_device *dev, int phy_id, int location);
void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
};
extern int mii_link_ok (struct mii_if_info *mii);
extern int mii_nway_restart (struct mii_if_info *mii);
extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
extern int mii_check_gmii_support(struct mii_if_info *mii);
extern void mii_check_link (struct mii_if_info *mii);
extern unsigned int mii_check_media (struct mii_if_info *mii,
unsigned int ok_to_print,
unsigned int init_media);
extern int generic_mii_ioctl(struct mii_if_info *mii_if,
struct mii_ioctl_data *mii_data, int cmd,
unsigned int *duplex_changed);
static inline __attribute__((always_inline)) struct mii_ioctl_data *if_mii(struct ifreq *rq)
{
return (struct mii_ioctl_data *) &rq->ifr_ifru;
}
# 205 "include/linux/mii.h"
static inline __attribute__((always_inline)) unsigned int mii_nway_result (unsigned int negotiated)
{
unsigned int ret;
if (negotiated & 0x0100)
ret = 0x0100;
else if (negotiated & 0x0200)
ret = 0x0200;
else if (negotiated & 0x0080)
ret = 0x0080;
else if (negotiated & 0x0040)
ret = 0x0040;
else
ret = 0x0020;
return ret;
}
# 232 "include/linux/mii.h"
static inline __attribute__((always_inline)) unsigned int mii_duplex (unsigned int duplex_lock,
unsigned int negotiated)
{
if (duplex_lock)
return 1;
if (mii_nway_result(negotiated) & (0x0040 | 0x0100))
return 1;
return 0;
}
static inline __attribute__((always_inline)) u16 mii_advertise_flowctrl(int cap)
{
u16 adv = 0;
if (cap & 0x02)
adv = 0x0400 | 0x0800;
if (cap & 0x01)
adv ^= 0x0800;
return adv;
}
# 265 "include/linux/mii.h"
static inline __attribute__((always_inline)) u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
if (lcladv & rmtadv & 0x0400) {
cap = 0x01 | 0x02;
} else if (lcladv & rmtadv & 0x0800) {
if (lcladv & 0x0400)
cap = 0x02;
else if (rmtadv & 0x0400)
cap = 0x01;
}
return cap;
}
# 46 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/version.h" 1
# 47 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/pktflow.h" 1
# 16 "include/linux/pktflow.h"
struct sk_buff;
struct net_device;
struct nf_conn;
struct pktflow_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_dropped;
unsigned long tx_dropped;
unsigned long tx_packets_q0;
unsigned long tx_packets_q1;
unsigned long tx_packets_q2;
unsigned long tx_packets_q3;
};
typedef enum {
ETH_802x,
TC2206_STAG,
OUT_VLAN_8021Q,
VLAN_8021Q,
PPPoE_2516,
ETH_IPV4,
ETH_IPV6,
PROTO_MAX
} PktflowEncap_t;
typedef struct ip6_addr {
union
{
__u8 u6_addr8[16];
__be16 u6_addr16[8];
__be32 u6_addr32[4];
} in6_u;
} ip6_addr_t;
typedef struct {
uint8_t itf;
uint8_t count;
uint8_t multicast;
union {
struct {
uint8_t ETH_IPV6 : 1;
uint8_t ETH_IPV4 : 1;
uint8_t PPPoE_2516 : 1;
uint8_t VLAN_8021Q : 1;
uint8_t OUT_VLAN_8021Q : 1;
uint8_t TC2206_STAG : 1;
uint8_t ETH_802x : 1;
} bmap;
uint8_t hdrs;
};
} __attribute__((packed)) PktflowInfo_t;
struct pktflowTuple_t {
union {
struct {
uint32_t saddr;
uint32_t daddr;
union {
struct {
uint16_t source;
uint16_t dest;
} port;
uint32_t ports;
};
} ip4_u;
struct {
ip6_addr_t daddr;
} ip6_u;
};
} __attribute__((packed)) __attribute__((__aligned__((1 << 5))));
typedef struct pktflowTuple_t PktflowTuple_t;
struct pktflowHeader_t {
PktflowTuple_t tuple;
union {
struct net_device *dev_p;
struct nf_conn *ct_p;
};
union {
PktflowInfo_t info;
uint32_t word;
};
union {
uint8_t reserved;
uint8_t nf_dir;
};
uint8_t length;
PktflowEncap_t encap[ 6 ];
uint8_t l2hdr[ 32 ];
} __attribute__((packed)) __attribute__((__aligned__((1 << 5))));
typedef struct pktflowHeader_t PktflowHeader_t;
struct pktflow_t {
struct list_head list;
union {
void *void_p;
struct pktflow_t *pktflow_p;
struct sk_buff *skb;
};
uint16_t flags;
uint16_t hit_count;
uint8_t hash;
uint8_t protocol;
uint16_t nfct_events;
struct net_bridge_fdb_entry *fdb_src;
PktflowHeader_t rx;
PktflowHeader_t tx;
atomic_t refcnt;
struct timer_list timer;
volatile unsigned long timeout;
} __attribute__((packed)) __attribute__((__aligned__((1 << 5))));
typedef struct pktflow_t Pktflow_t;
extern int (*pktflow_rx_hook)(struct sk_buff *skb, int itf, unsigned int *dbg_info);
extern int (*pktflow_tx_hook)(struct sk_buff *skb, int itf);
extern void (*pktflow_nfct_close_hook)(struct nf_conn *nf);
extern void (*pktflow_fdb_delete_hook)(struct net_bridge_fdb_entry *f);
extern void (*pktflow_get_stats_hook)(struct pktflow_stats *stats, int itf);
extern void (*pktflow_clear_stats_hook)(int itf);
extern void (*pktflow_update_ipv4_mc_hook)(__be32 group, unsigned int portmap);
extern void (*pktflow_update_ip6_mc_hook)(struct in6_addr group, unsigned int portmap);
extern void pktflow_register(int (*pktflow_rx)(struct sk_buff *skb, int itf, unsigned int *dbg_info),
int (*pktflow_tx)(struct sk_buff *skb, int itf),
void (*pktflow_nfct_close)(struct nf_conn *nf),
void (*pktflow_fdb_delete)(struct net_bridge_fdb_entry *f),
void (*pktflow_get_stats)(struct pktflow_stats *stats, int itf),
void (*pktflow_clear_stats)(int itf),
void (*pktflow_update_ipv4_mc)(__be32 group, unsigned int portmap),
void (*pktflow_update_ip6_mc)(struct in6_addr group, unsigned int portmap));
extern void pktflow_unregister(void);
extern Pktflow_t *pktflow_get(void);
extern Pktflow_t *pktflow_hp_get(void);
extern void pktflow_put(Pktflow_t *pktflow_p);
extern void pktflow_free(struct sk_buff *skb);
extern void pktflow_xfer(struct sk_buff *skb, const struct sk_buff *prev_p);
extern void pktflow_nfct(struct sk_buff *skb, struct nf_conn *nfct_p);
extern void pktflow_refresh_nfct(struct nf_conn *ct_p, uint32_t jiffies);
typedef void (*pktflow_refresh_t)(struct nf_conn *nfct, uint32_t ctinfo,
struct sk_buff *skb,
uint32_t jiffies, int do_acct);
extern pktflow_refresh_t pktflow_refresh_fn;
# 49 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/tcp.h" 1
# 24 "include/linux/tcp.h"
struct tcphdr {
__be16 source;
__be16 dest;
__be32 seq;
__be32 ack_seq;
# 41 "include/linux/tcp.h"
__u16 doff:4,
res1:4,
cwr:1,
ece:1,
urg:1,
ack:1,
psh:1,
rst:1,
syn:1,
fin:1;
__be16 window;
__sum16 check;
__be16 urg_ptr;
};
union tcp_word_hdr {
struct tcphdr hdr;
__be32 words[5];
};
enum {
TCP_FLAG_CWR = (( __be32)(__u32)(0x00800000)),
TCP_FLAG_ECE = (( __be32)(__u32)(0x00400000)),
TCP_FLAG_URG = (( __be32)(__u32)(0x00200000)),
TCP_FLAG_ACK = (( __be32)(__u32)(0x00100000)),
TCP_FLAG_PSH = (( __be32)(__u32)(0x00080000)),
TCP_FLAG_RST = (( __be32)(__u32)(0x00040000)),
TCP_FLAG_SYN = (( __be32)(__u32)(0x00020000)),
TCP_FLAG_FIN = (( __be32)(__u32)(0x00010000)),
TCP_RESERVED_BITS = (( __be32)(__u32)(0x0F000000)),
TCP_DATA_OFFSET = (( __be32)(__u32)(0xF0000000))
};
# 115 "include/linux/tcp.h"
enum tcp_ca_state {
TCP_CA_Open = 0,
TCP_CA_Disorder = 1,
TCP_CA_CWR = 2,
TCP_CA_Recovery = 3,
TCP_CA_Loss = 4
};
struct tcp_info {
__u8 tcpi_state;
__u8 tcpi_ca_state;
__u8 tcpi_retransmits;
__u8 tcpi_probes;
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
__u32 tcpi_rto;
__u32 tcpi_ato;
__u32 tcpi_snd_mss;
__u32 tcpi_rcv_mss;
__u32 tcpi_unacked;
__u32 tcpi_sacked;
__u32 tcpi_lost;
__u32 tcpi_retrans;
__u32 tcpi_fackets;
__u32 tcpi_last_data_sent;
__u32 tcpi_last_ack_sent;
__u32 tcpi_last_data_recv;
__u32 tcpi_last_ack_recv;
__u32 tcpi_pmtu;
__u32 tcpi_rcv_ssthresh;
__u32 tcpi_rtt;
__u32 tcpi_rttvar;
__u32 tcpi_snd_ssthresh;
__u32 tcpi_snd_cwnd;
__u32 tcpi_advmss;
__u32 tcpi_reordering;
__u32 tcpi_rcv_rtt;
__u32 tcpi_rcv_space;
__u32 tcpi_total_retrans;
};
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr;
__u16 __tcpm_pad1;
__u16 tcpm_keylen;
__u32 __tcpm_pad2;
__u8 tcpm_key[80];
};
# 196 "include/linux/tcp.h"
struct tcp_cookie_transactions {
__u16 tcpct_flags;
__u8 __tcpct_pad1;
__u8 tcpct_cookie_desired;
__u16 tcpct_s_data_desired;
__u16 tcpct_used;
__u8 tcpct_value[536U];
};
# 1 "include/net/sock.h" 1
# 53 "include/net/sock.h"
# 1 "include/linux/security.h" 1
# 26 "include/linux/security.h"
# 1 "include/linux/fsnotify.h" 1
# 14 "include/linux/fsnotify.h"
# 1 "include/linux/fsnotify_backend.h" 1
# 12 "include/linux/fsnotify_backend.h"
# 1 "include/linux/idr.h" 1
# 51 "include/linux/idr.h"
struct idr_layer {
unsigned long bitmap;
struct idr_layer *ary[1<<5];
int count;
int layer;
struct rcu_head rcu_head;
};
struct idr {
struct idr_layer *top;
struct idr_layer *id_free;
int layers;
int id_free_cnt;
spinlock_t lock;
};
# 103 "include/linux/idr.h"
void *idr_find(struct idr *idp, int id);
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int idr_get_new(struct idr *idp, void *ptr, int *id);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_remove_all(struct idr *idp);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
# 125 "include/linux/idr.h"
struct ida_bitmap {
long nr_busy;
unsigned long bitmap[(128 / sizeof(long) - 1)];
};
struct ida {
struct idr idr;
struct ida_bitmap *free_bitmap;
};
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
int ida_get_new(struct ida *ida, int *p_id);
void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida);
void ida_init(struct ida *ida);
void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) idr_init_cache(void);
# 13 "include/linux/fsnotify_backend.h" 2
# 76 "include/linux/fsnotify_backend.h"
struct fsnotify_group;
struct fsnotify_event;
struct fsnotify_mark;
struct fsnotify_event_private_data;
# 93 "include/linux/fsnotify_backend.h"
struct fsnotify_ops {
int (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
__u32 mask, void *data, int data_type);
int (*handle_event)(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
struct fsnotify_group {
# 122 "include/linux/fsnotify_backend.h"
atomic_t refcnt;
const struct fsnotify_ops *ops;
struct mutex notification_mutex;
struct list_head notification_list;
wait_queue_head_t notification_waitq;
unsigned int q_len;
unsigned int max_events;
spinlock_t mark_lock;
atomic_t num_marks;
struct list_head marks_list;
union {
void *private;
struct inotify_group_private_data {
spinlock_t idr_lock;
struct idr idr;
u32 last_wd;
struct fasync_struct *fa;
struct user_struct *user;
} inotify_data;
# 164 "include/linux/fsnotify_backend.h"
};
};
# 177 "include/linux/fsnotify_backend.h"
struct fsnotify_event_holder {
struct fsnotify_event *event;
struct list_head event_list;
};
struct fsnotify_event_private_data {
struct fsnotify_group *group;
struct list_head event_list;
};
struct fsnotify_event {
struct fsnotify_event_holder holder;
spinlock_t lock;
struct inode *to_tell;
# 214 "include/linux/fsnotify_backend.h"
union {
struct path path;
struct inode *inode;
};
int data_type;
atomic_t refcnt;
__u32 mask;
u32 sync_cookie;
const unsigned char *file_name;
size_t name_len;
struct pid *tgid;
struct list_head private_data_list;
};
struct fsnotify_inode_mark {
struct inode *inode;
struct hlist_node i_list;
struct list_head free_i_list;
};
struct fsnotify_vfsmount_mark {
struct vfsmount *mnt;
struct hlist_node m_list;
struct list_head free_m_list;
};
# 266 "include/linux/fsnotify_backend.h"
struct fsnotify_mark {
__u32 mask;
atomic_t refcnt;
struct fsnotify_group *group;
struct list_head g_list;
spinlock_t lock;
union {
struct fsnotify_inode_mark i;
struct fsnotify_vfsmount_mark m;
};
__u32 ignored_mask;
struct list_head free_g_list;
unsigned int flags;
struct list_head destroy_list;
void (*free_mark)(struct fsnotify_mark *mark);
};
extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const unsigned char *name, u32 cookie);
extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
static inline __attribute__((always_inline)) int fsnotify_inode_watches_children(struct inode *inode)
{
if (!(inode->i_fsnotify_mask & 0x08000000))
return 0;
return inode->i_fsnotify_mask & (0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020 | 0x00000040 | 0x00000080 | 0x00000100 | 0x00000200);
}
static inline __attribute__((always_inline)) void __fsnotify_update_dcache_flags(struct dentry *dentry)
{
struct dentry *parent;
assert_spin_locked(&dcache_lock);
assert_spin_locked(&dentry->d_lock);
parent = dentry->d_parent;
if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= 0x0080;
else
dentry->d_flags &= ~0x0080;
}
static inline __attribute__((always_inline)) void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (!inode)
return;
assert_spin_locked(&dcache_lock);
spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
spin_unlock(&dentry->d_lock);
}
extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
extern void fsnotify_put_group(struct fsnotify_group *group);
extern void fsnotify_get_event(struct fsnotify_event *event);
extern void fsnotify_put_event(struct fsnotify_event *event);
extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group,
struct fsnotify_event *event);
extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
struct fsnotify_event_private_data *priv,
struct fsnotify_event *(*merge)(struct list_head *,
struct fsnotify_event *));
extern int fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
extern void fsnotify_recalc_inode_mask(struct inode *inode);
extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
void *data, int data_is,
const unsigned char *name,
u32 cookie, gfp_t gfp);
extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event);
extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event);
# 15 "include/linux/fsnotify.h" 2
# 1 "include/linux/audit.h" 1
# 257 "include/linux/audit.h"
enum {
Audit_equal,
Audit_not_equal,
Audit_bitmask,
Audit_bittest,
Audit_lt,
Audit_gt,
Audit_le,
Audit_ge,
Audit_bad
};
# 317 "include/linux/audit.h"
struct audit_status {
__u32 mask;
__u32 enabled;
__u32 failure;
__u32 pid;
__u32 rate_limit;
__u32 backlog_limit;
__u32 lost;
__u32 backlog;
};
struct audit_tty_status {
__u32 enabled;
};
struct audit_rule_data {
__u32 flags;
__u32 action;
__u32 field_count;
__u32 mask[64];
__u32 fields[64];
__u32 values[64];
__u32 fieldflags[64];
__u32 buflen;
char buf[0];
};
struct audit_rule {
__u32 flags;
__u32 action;
__u32 field_count;
__u32 mask[64];
__u32 fields[64];
__u32 values[64];
};
struct audit_sig_info {
uid_t uid;
pid_t pid;
char ctx[0];
};
struct audit_buffer;
struct audit_context;
struct inode;
struct netlink_skb_parms;
struct linux_binprm;
struct mq_attr;
struct mqstat;
struct audit_watch;
struct audit_tree;
struct audit_krule {
int vers_ops;
u32 flags;
u32 listnr;
u32 action;
u32 mask[64];
u32 buflen;
u32 field_count;
char *filterkey;
struct audit_field *fields;
struct audit_field *arch_f;
struct audit_field *inode_f;
struct audit_watch *watch;
struct audit_tree *tree;
struct list_head rlist;
struct list_head list;
u64 prio;
};
struct audit_field {
u32 type;
u32 val;
u32 op;
char *lsm_str;
void *lsm_rule;
};
extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) audit_register_class(int class, unsigned *list);
extern int audit_classify_syscall(int abi, unsigned syscall);
extern int audit_classify_arch(int arch);
# 16 "include/linux/fsnotify.h" 2
static inline __attribute__((always_inline)) void fsnotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
{
__fsnotify_d_instantiate(dentry, inode);
}
static inline __attribute__((always_inline)) void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
if (!dentry)
dentry = path->dentry;
__fsnotify_parent(path, dentry, mask);
}
static inline __attribute__((always_inline)) int fsnotify_perm(struct file *file, int mask)
{
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 fsnotify_mask = 0;
if (file->f_mode & (( fmode_t)0x1000000))
return 0;
if (!(mask & (4 | 32)))
return 0;
if (mask & 32)
fsnotify_mask = 0x00010000;
else if (mask & 4)
fsnotify_mask = 0x00020000;
else
BUG();
return fsnotify(inode, fsnotify_mask, path, 1, ((void *)0), 0);
}
static inline __attribute__((always_inline)) void fsnotify_d_move(struct dentry *dentry)
{
__fsnotify_update_dcache_flags(dentry);
}
static inline __attribute__((always_inline)) void fsnotify_link_count(struct inode *inode)
{
fsnotify(inode, 0x00000004, inode, 2, ((void *)0), 0);
}
static inline __attribute__((always_inline)) void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
const unsigned char *old_name,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (0x08000000 | 0x00000040);
__u32 new_dir_mask = (0x08000000 | 0x00000080);
const unsigned char *new_name = moved->d_name.name;
if (old_dir == new_dir)
old_dir_mask |= 0x10000000;
if (isdir) {
old_dir_mask |= 0x40000000;
new_dir_mask |= 0x40000000;
}
fsnotify(old_dir, old_dir_mask, old_dir, 2, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, 2, new_name, fs_cookie);
if (target)
fsnotify_link_count(target);
if (source)
fsnotify(source, 0x00000800, moved->d_inode, 2, ((void *)0), 0);
do { ; } while (0);
}
static inline __attribute__((always_inline)) void fsnotify_inode_delete(struct inode *inode)
{
__fsnotify_inode_delete(inode);
}
static inline __attribute__((always_inline)) void fsnotify_vfsmount_delete(struct vfsmount *mnt)
{
__fsnotify_vfsmount_delete(mnt);
}
static inline __attribute__((always_inline)) void fsnotify_nameremove(struct dentry *dentry, int isdir)
{
__u32 mask = 0x00000200;
if (isdir)
mask |= 0x40000000;
fsnotify_parent(((void *)0), dentry, mask);
}
static inline __attribute__((always_inline)) void fsnotify_inoderemove(struct inode *inode)
{
fsnotify(inode, 0x00000400, inode, 2, ((void *)0), 0);
__fsnotify_inode_delete(inode);
}
static inline __attribute__((always_inline)) void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
do { ; } while (0);
fsnotify(inode, 0x00000100, dentry->d_inode, 2, dentry->d_name.name, 0);
}
static inline __attribute__((always_inline)) void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
fsnotify_link_count(inode);
do { ; } while (0);
fsnotify(dir, 0x00000100, inode, 2, new_dentry->d_name.name, 0);
}
static inline __attribute__((always_inline)) void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{
__u32 mask = (0x00000100 | 0x40000000);
struct inode *d_inode = dentry->d_inode;
do { ; } while (0);
fsnotify(inode, mask, d_inode, 2, dentry->d_name.name, 0);
}
static inline __attribute__((always_inline)) void fsnotify_access(struct file *file)
{
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = 0x00000001;
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
if (!(file->f_mode & (( fmode_t)0x1000000))) {
fsnotify_parent(path, ((void *)0), mask);
fsnotify(inode, mask, path, 1, ((void *)0), 0);
}
}
static inline __attribute__((always_inline)) void fsnotify_modify(struct file *file)
{
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = 0x00000002;
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
if (!(file->f_mode & (( fmode_t)0x1000000))) {
fsnotify_parent(path, ((void *)0), mask);
fsnotify(inode, mask, path, 1, ((void *)0), 0);
}
}
static inline __attribute__((always_inline)) void fsnotify_open(struct file *file)
{
struct path *path = &file->f_path;
struct inode *inode = path->dentry->d_inode;
__u32 mask = 0x00000020;
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
if (!(file->f_mode & (( fmode_t)0x1000000))) {
fsnotify_parent(path, ((void *)0), mask);
fsnotify(inode, mask, path, 1, ((void *)0), 0);
}
}
static inline __attribute__((always_inline)) void fsnotify_close(struct file *file)
{
struct path *path = &file->f_path;
struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & (( fmode_t)0x2)) ? 0x00000008 : 0x00000010;
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
if (!(file->f_mode & (( fmode_t)0x1000000))) {
fsnotify_parent(path, ((void *)0), mask);
fsnotify(inode, mask, path, 1, ((void *)0), 0);
}
}
static inline __attribute__((always_inline)) void fsnotify_xattr(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
__u32 mask = 0x00000004;
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
fsnotify_parent(((void *)0), dentry, mask);
fsnotify(inode, mask, inode, 2, ((void *)0), 0);
}
static inline __attribute__((always_inline)) void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
struct inode *inode = dentry->d_inode;
__u32 mask = 0;
if (ia_valid & (1 << 1))
mask |= 0x00000004;
if (ia_valid & (1 << 2))
mask |= 0x00000004;
if (ia_valid & (1 << 3))
mask |= 0x00000002;
if ((ia_valid & ((1 << 4) | (1 << 5))) == ((1 << 4) | (1 << 5)))
mask |= 0x00000004;
else if (ia_valid & (1 << 4))
mask |= 0x00000001;
else if (ia_valid & (1 << 5))
mask |= 0x00000002;
if (ia_valid & (1 << 0))
mask |= 0x00000004;
if (mask) {
if ((((inode->i_mode) & 00170000) == 0040000))
mask |= 0x40000000;
fsnotify_parent(((void *)0), dentry, mask);
fsnotify(inode, mask, inode, 2, ((void *)0), 0);
}
}
static inline __attribute__((always_inline)) const unsigned char *fsnotify_oldname_init(const unsigned char *name)
{
return kstrdup(name, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)));
}
static inline __attribute__((always_inline)) void fsnotify_oldname_free(const unsigned char *old_name)
{
kfree(old_name);
}
# 27 "include/linux/security.h" 2
# 1 "include/linux/binfmts.h" 1
struct pt_regs;
# 28 "include/linux/binfmts.h"
struct linux_binprm{
char buf[128];
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long p;
unsigned int
cred_prepared:1,
cap_effective:1;
unsigned int recursion_depth;
struct file * file;
struct cred *cred;
int unsafe;
unsigned int per_clear;
int argc, envc;
const char * filename;
const char * interp;
unsigned interp_flags;
unsigned interp_data;
unsigned long loader, exec;
};
# 72 "include/linux/binfmts.h"
struct coredump_params {
long signr;
struct pt_regs *regs;
struct file *file;
unsigned long limit;
unsigned long mm_flags;
};
struct linux_binfmt {
struct list_head lh;
struct module *module;
int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
int (*load_shlib)(struct file *);
int (*core_dump)(struct coredump_params *cprm);
unsigned long min_coredump;
int hasvdso;
};
extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
static inline __attribute__((always_inline)) int register_binfmt(struct linux_binfmt *fmt)
{
return __register_binfmt(fmt, 0);
}
static inline __attribute__((always_inline)) int insert_binfmt(struct linux_binfmt *fmt)
{
return __register_binfmt(fmt, 1);
}
extern void unregister_binfmt(struct linux_binfmt *);
extern int prepare_binprm(struct linux_binprm *);
extern int __attribute__((warn_unused_result)) remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
extern int flush_old_exec(struct linux_binprm * bprm);
extern void setup_new_exec(struct linux_binprm * bprm);
extern int suid_dumpable;
# 125 "include/linux/binfmts.h"
extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void do_coredump(long signr, int exit_code, struct pt_regs *regs);
extern void set_binfmt(struct linux_binfmt *new);
extern void free_bprm(struct linux_binprm *);
# 28 "include/linux/security.h" 2
# 1 "include/linux/shm.h" 1
# 28 "include/linux/shm.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/shmparam.h" 1
# 29 "include/linux/shm.h" 2
struct shmid_ds {
struct ipc_perm shm_perm;
int shm_segsz;
__kernel_time_t shm_atime;
__kernel_time_t shm_dtime;
__kernel_time_t shm_ctime;
__kernel_ipc_pid_t shm_cpid;
__kernel_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
unsigned short shm_unused;
void *shm_unused2;
void *shm_unused3;
};
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/shmbuf.h" 1
# 13 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/shmbuf.h"
struct shmid64_ds {
struct ipc64_perm shm_perm;
size_t shm_segsz;
__kernel_time_t shm_atime;
__kernel_time_t shm_dtime;
__kernel_time_t shm_ctime;
__kernel_pid_t shm_cpid;
__kernel_pid_t shm_lpid;
unsigned long shm_nattch;
unsigned long __unused1;
unsigned long __unused2;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
# 48 "include/linux/shm.h" 2
# 68 "include/linux/shm.h"
struct shminfo {
int shmmax;
int shmmin;
int shmmni;
int shmseg;
int shmall;
};
struct shm_info {
int used_ids;
unsigned long shm_tot;
unsigned long shm_rss;
unsigned long shm_swp;
unsigned long swap_attempts;
unsigned long swap_successes;
};
struct shmid_kernel
{
struct kern_ipc_perm shm_perm;
struct file * shm_file;
unsigned long shm_nattch;
unsigned long shm_segsz;
time_t shm_atim;
time_t shm_dtim;
time_t shm_ctim;
pid_t shm_cprid;
pid_t shm_lprid;
struct user_struct *mlock_user;
};
# 107 "include/linux/shm.h"
long do_shmat(int shmid, char *shmaddr, int shmflg, unsigned long *addr);
extern int is_file_shm_hugepages(struct file *file);
# 32 "include/linux/security.h" 2
# 1 "include/linux/msg.h" 1
# 15 "include/linux/msg.h"
struct msqid_ds {
struct ipc_perm msg_perm;
struct msg *msg_first;
struct msg *msg_last;
__kernel_time_t msg_stime;
__kernel_time_t msg_rtime;
__kernel_time_t msg_ctime;
unsigned long msg_lcbytes;
unsigned long msg_lqbytes;
unsigned short msg_cbytes;
unsigned short msg_qnum;
unsigned short msg_qbytes;
__kernel_ipc_pid_t msg_lspid;
__kernel_ipc_pid_t msg_lrpid;
};
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/msgbuf.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/msgbuf.h"
struct msqid64_ds {
struct ipc64_perm msg_perm;
unsigned long __unused1;
__kernel_time_t msg_stime;
unsigned long __unused2;
__kernel_time_t msg_rtime;
unsigned long __unused3;
__kernel_time_t msg_ctime;
unsigned long msg_cbytes;
unsigned long msg_qnum;
unsigned long msg_qbytes;
__kernel_pid_t msg_lspid;
__kernel_pid_t msg_lrpid;
unsigned long __unused4;
unsigned long __unused5;
};
# 33 "include/linux/msg.h" 2
struct msgbuf {
long mtype;
char mtext[1];
};
struct msginfo {
int msgpool;
int msgmap;
int msgmax;
int msgmnb;
int msgmni;
int msgssz;
int msgtql;
unsigned short msgseg;
};
# 78 "include/linux/msg.h"
struct msg_msg {
struct list_head m_list;
long m_type;
int m_ts;
struct msg_msgseg* next;
void *security;
};
struct msg_queue {
struct kern_ipc_perm q_perm;
time_t q_stime;
time_t q_rtime;
time_t q_ctime;
unsigned long q_cbytes;
unsigned long q_qnum;
unsigned long q_qbytes;
pid_t q_lspid;
pid_t q_lrpid;
struct list_head q_messages;
struct list_head q_receivers;
struct list_head q_senders;
};
extern long do_msgsnd(int msqid, long mtype, void *mtext,
size_t msgsz, int msgflg);
extern long do_msgrcv(int msqid, long *pmtype, void *mtext,
size_t msgsz, long msgtyp, int msgflg);
# 34 "include/linux/security.h" 2
# 47 "include/linux/security.h"
struct ctl_table;
struct audit_krule;
extern int cap_capable(struct task_struct *tsk, const struct cred *cred,
int cap, int audit);
extern int cap_settime(struct timespec *ts, struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
extern int cap_bprm_set_creds(struct linux_binprm *bprm);
extern int cap_bprm_secureexec(struct linux_binprm *bprm);
extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
extern int cap_inode_need_killpriv(struct dentry *dentry);
extern int cap_inode_killpriv(struct dentry *dentry);
extern int cap_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only);
extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5);
extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
extern int cap_task_setioprio(struct task_struct *p, int ioprio);
extern int cap_task_setnice(struct task_struct *p, int nice);
extern int cap_syslog(int type, int from_file);
extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
struct msghdr;
struct sk_buff;
struct sock;
struct sockaddr;
struct socket;
struct flowi;
struct dst_entry;
struct xfrm_selector;
struct xfrm_policy;
struct xfrm_state;
struct xfrm_user_sec_ctx;
struct seq_file;
extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
extern int cap_netlink_recv(struct sk_buff *skb, int cap);
void reset_security_ops(void);
extern unsigned long mmap_min_addr;
extern unsigned long dac_mmap_min_addr;
# 124 "include/linux/security.h"
struct sched_param;
struct request_sock;
# 137 "include/linux/security.h"
static inline __attribute__((always_inline)) unsigned long round_hint_to_min(unsigned long hint)
{
hint &= (~((1 << 12) - 1));
if (((void *)hint != ((void *)0)) &&
(hint < mmap_min_addr))
return ((((mmap_min_addr)) + ((typeof((mmap_min_addr)))((((1UL) << 12))) - 1)) & ~((typeof((mmap_min_addr)))((((1UL) << 12))) - 1));
return hint;
}
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
# 1801 "include/linux/security.h"
struct security_mnt_opts {
};
static inline __attribute__((always_inline)) void security_init_mnt_opts(struct security_mnt_opts *opts)
{
}
static inline __attribute__((always_inline)) void security_free_mnt_opts(struct security_mnt_opts *opts)
{
}
static inline __attribute__((always_inline)) int security_init(void)
{
return 0;
}
static inline __attribute__((always_inline)) int security_ptrace_access_check(struct task_struct *child,
unsigned int mode)
{
return cap_ptrace_access_check(child, mode);
}
static inline __attribute__((always_inline)) int security_ptrace_traceme(struct task_struct *parent)
{
return cap_ptrace_traceme(parent);
}
static inline __attribute__((always_inline)) int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
return cap_capget(target, effective, inheritable, permitted);
}
static inline __attribute__((always_inline)) int security_capset(struct cred *new,
const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return cap_capset(new, old, effective, inheritable, permitted);
}
static inline __attribute__((always_inline)) int security_capable(int cap)
{
return cap_capable((__current_thread_info->task), ((__current_thread_info->task)->cred), cap, 1);
}
static inline __attribute__((always_inline)) int security_real_capable(struct task_struct *tsk, int cap)
{
int ret;
rcu_read_lock();
ret = cap_capable(tsk, ({ const struct task_struct *__t = (tsk); ({ typeof(__t->real_cred) _________p1 = (*(volatile typeof(__t->real_cred) *)&(__t->real_cred)); do { } while(0); (_________p1); }); }), cap, 1);
rcu_read_unlock();
return ret;
}
static inline __attribute__((always_inline))
int security_real_capable_noaudit(struct task_struct *tsk, int cap)
{
int ret;
rcu_read_lock();
ret = cap_capable(tsk, ({ const struct task_struct *__t = (tsk); ({ typeof(__t->real_cred) _________p1 = (*(volatile typeof(__t->real_cred) *)&(__t->real_cred)); do { } while(0); (_________p1); }); }), cap,
0);
rcu_read_unlock();
return ret;
}
static inline __attribute__((always_inline)) int security_sysctl(struct ctl_table *table, int op)
{
return 0;
}
static inline __attribute__((always_inline)) int security_quotactl(int cmds, int type, int id,
struct super_block *sb)
{
return 0;
}
static inline __attribute__((always_inline)) int security_quota_on(struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_syslog(int type, int from_file)
{
return cap_syslog(type, from_file);
}
static inline __attribute__((always_inline)) int security_settime(struct timespec *ts, struct timezone *tz)
{
return cap_settime(ts, tz);
}
static inline __attribute__((always_inline)) int security_vm_enough_memory(long pages)
{
({ int __ret_warn_on = !!((__current_thread_info->task)->mm == ((void *)0)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/security.h", 1905); __builtin_expect(!!(__ret_warn_on), 0); });
return cap_vm_enough_memory((__current_thread_info->task)->mm, pages);
}
static inline __attribute__((always_inline)) int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
({ int __ret_warn_on = !!(mm == ((void *)0)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/security.h", 1911); __builtin_expect(!!(__ret_warn_on), 0); });
return cap_vm_enough_memory(mm, pages);
}
static inline __attribute__((always_inline)) int security_vm_enough_memory_kern(long pages)
{
return cap_vm_enough_memory((__current_thread_info->task)->mm, pages);
}
static inline __attribute__((always_inline)) int security_bprm_set_creds(struct linux_binprm *bprm)
{
return cap_bprm_set_creds(bprm);
}
static inline __attribute__((always_inline)) int security_bprm_check(struct linux_binprm *bprm)
{
return 0;
}
static inline __attribute__((always_inline)) void security_bprm_committing_creds(struct linux_binprm *bprm)
{
}
static inline __attribute__((always_inline)) void security_bprm_committed_creds(struct linux_binprm *bprm)
{
}
static inline __attribute__((always_inline)) int security_bprm_secureexec(struct linux_binprm *bprm)
{
return cap_bprm_secureexec(bprm);
}
static inline __attribute__((always_inline)) int security_sb_alloc(struct super_block *sb)
{
return 0;
}
static inline __attribute__((always_inline)) void security_sb_free(struct super_block *sb)
{ }
static inline __attribute__((always_inline)) int security_sb_copy_data(char *orig, char *copy)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_show_options(struct seq_file *m,
struct super_block *sb)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_statfs(struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags,
void *data)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_umount(struct vfsmount *mnt, int flags)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_pivotroot(struct path *old_path,
struct path *new_path)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
return 0;
}
static inline __attribute__((always_inline)) void security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{ }
static inline __attribute__((always_inline)) int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_alloc(struct inode *inode)
{
return 0;
}
static inline __attribute__((always_inline)) void security_inode_free(struct inode *inode)
{ }
static inline __attribute__((always_inline)) int security_inode_init_security(struct inode *inode,
struct inode *dir,
char **name,
void **value,
size_t *len)
{
return -122;
}
static inline __attribute__((always_inline)) int security_inode_create(struct inode *dir,
struct dentry *dentry,
int mode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_unlink(struct inode *dir,
struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_symlink(struct inode *dir,
struct dentry *dentry,
const char *old_name)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_mkdir(struct inode *dir,
struct dentry *dentry,
int mode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_rmdir(struct inode *dir,
struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_mknod(struct inode *dir,
struct dentry *dentry,
int mode, dev_t dev)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_readlink(struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_follow_link(struct dentry *dentry,
struct nameidata *nd)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_permission(struct inode *inode, int mask)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_setattr(struct dentry *dentry,
struct iattr *attr)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_getattr(struct vfsmount *mnt,
struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{
return cap_inode_setxattr(dentry, name, value, size, flags);
}
static inline __attribute__((always_inline)) void security_inode_post_setxattr(struct dentry *dentry,
const char *name, const void *value, size_t size, int flags)
{ }
static inline __attribute__((always_inline)) int security_inode_getxattr(struct dentry *dentry,
const char *name)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_listxattr(struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_inode_removexattr(struct dentry *dentry,
const char *name)
{
return cap_inode_removexattr(dentry, name);
}
static inline __attribute__((always_inline)) int security_inode_need_killpriv(struct dentry *dentry)
{
return cap_inode_need_killpriv(dentry);
}
static inline __attribute__((always_inline)) int security_inode_killpriv(struct dentry *dentry)
{
return cap_inode_killpriv(dentry);
}
static inline __attribute__((always_inline)) int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, int alloc)
{
return -122;
}
static inline __attribute__((always_inline)) int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
return -122;
}
static inline __attribute__((always_inline)) int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
return 0;
}
static inline __attribute__((always_inline)) void security_inode_getsecid(const struct inode *inode, u32 *secid)
{
*secid = 0;
}
static inline __attribute__((always_inline)) int security_file_permission(struct file *file, int mask)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_alloc(struct file *file)
{
return 0;
}
static inline __attribute__((always_inline)) void security_file_free(struct file *file)
{ }
static inline __attribute__((always_inline)) int security_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot,
unsigned long flags,
unsigned long addr,
unsigned long addr_only)
{
return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
}
static inline __attribute__((always_inline)) int security_file_mprotect(struct vm_area_struct *vma,
unsigned long reqprot,
unsigned long prot)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_lock(struct file *file, unsigned int cmd)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_set_fowner(struct file *file)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown,
int sig)
{
return 0;
}
static inline __attribute__((always_inline)) int security_file_receive(struct file *file)
{
return 0;
}
static inline __attribute__((always_inline)) int security_dentry_open(struct file *file,
const struct cred *cred)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_create(unsigned long clone_flags)
{
return 0;
}
static inline __attribute__((always_inline)) int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return 0;
}
static inline __attribute__((always_inline)) void security_cred_free(struct cred *cred)
{ }
static inline __attribute__((always_inline)) int security_prepare_creds(struct cred *new,
const struct cred *old,
gfp_t gfp)
{
return 0;
}
static inline __attribute__((always_inline)) void security_transfer_creds(struct cred *new,
const struct cred *old)
{
}
static inline __attribute__((always_inline)) int security_kernel_act_as(struct cred *cred, u32 secid)
{
return 0;
}
static inline __attribute__((always_inline)) int security_kernel_create_files_as(struct cred *cred,
struct inode *inode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_kernel_module_request(char *kmod_name)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_fix_setuid(struct cred *new,
const struct cred *old,
int flags)
{
return cap_task_fix_setuid(new, old, flags);
}
static inline __attribute__((always_inline)) int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_getpgid(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_getsid(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) void security_task_getsecid(struct task_struct *p, u32 *secid)
{
*secid = 0;
}
static inline __attribute__((always_inline)) int security_task_setnice(struct task_struct *p, int nice)
{
return cap_task_setnice(p, nice);
}
static inline __attribute__((always_inline)) int security_task_setioprio(struct task_struct *p, int ioprio)
{
return cap_task_setioprio(p, ioprio);
}
static inline __attribute__((always_inline)) int security_task_getioprio(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_setrlimit(struct task_struct *p,
unsigned int resource,
struct rlimit *new_rlim)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_setscheduler(struct task_struct *p,
int policy,
struct sched_param *lp)
{
return cap_task_setscheduler(p, policy, lp);
}
static inline __attribute__((always_inline)) int security_task_getscheduler(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_movememory(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_kill(struct task_struct *p,
struct siginfo *info, int sig,
u32 secid)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_wait(struct task_struct *p)
{
return 0;
}
static inline __attribute__((always_inline)) int security_task_prctl(int option, unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5)
{
return cap_task_prctl(option, arg2, arg3, arg3, arg5);
}
static inline __attribute__((always_inline)) void security_task_to_inode(struct task_struct *p, struct inode *inode)
{ }
static inline __attribute__((always_inline)) int security_ipc_permission(struct kern_ipc_perm *ipcp,
short flag)
{
return 0;
}
static inline __attribute__((always_inline)) void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
*secid = 0;
}
static inline __attribute__((always_inline)) int security_msg_msg_alloc(struct msg_msg *msg)
{
return 0;
}
static inline __attribute__((always_inline)) void security_msg_msg_free(struct msg_msg *msg)
{ }
static inline __attribute__((always_inline)) int security_msg_queue_alloc(struct msg_queue *msq)
{
return 0;
}
static inline __attribute__((always_inline)) void security_msg_queue_free(struct msg_queue *msq)
{ }
static inline __attribute__((always_inline)) int security_msg_queue_associate(struct msg_queue *msq,
int msqflg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
{
return 0;
}
static inline __attribute__((always_inline)) int security_msg_queue_msgsnd(struct msg_queue *msq,
struct msg_msg *msg, int msqflg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_msg_queue_msgrcv(struct msg_queue *msq,
struct msg_msg *msg,
struct task_struct *target,
long type, int mode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_shm_alloc(struct shmid_kernel *shp)
{
return 0;
}
static inline __attribute__((always_inline)) void security_shm_free(struct shmid_kernel *shp)
{ }
static inline __attribute__((always_inline)) int security_shm_associate(struct shmid_kernel *shp,
int shmflg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
{
return 0;
}
static inline __attribute__((always_inline)) int security_shm_shmat(struct shmid_kernel *shp,
char *shmaddr, int shmflg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sem_alloc(struct sem_array *sma)
{
return 0;
}
static inline __attribute__((always_inline)) void security_sem_free(struct sem_array *sma)
{ }
static inline __attribute__((always_inline)) int security_sem_associate(struct sem_array *sma, int semflg)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sem_semctl(struct sem_array *sma, int cmd)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sem_semop(struct sem_array *sma,
struct sembuf *sops, unsigned nsops,
int alter)
{
return 0;
}
static inline __attribute__((always_inline)) void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{ }
static inline __attribute__((always_inline)) int security_getprocattr(struct task_struct *p, char *name, char **value)
{
return -22;
}
static inline __attribute__((always_inline)) int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size)
{
return -22;
}
static inline __attribute__((always_inline)) int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
return cap_netlink_send(sk, skb);
}
static inline __attribute__((always_inline)) int security_netlink_recv(struct sk_buff *skb, int cap)
{
return cap_netlink_recv(skb, cap);
}
static inline __attribute__((always_inline)) int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -122;
}
static inline __attribute__((always_inline)) int security_secctx_to_secid(const char *secdata,
u32 seclen,
u32 *secid)
{
return -122;
}
static inline __attribute__((always_inline)) void security_release_secctx(char *secdata, u32 seclen)
{
}
static inline __attribute__((always_inline)) int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return -122;
}
static inline __attribute__((always_inline)) int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return -122;
}
static inline __attribute__((always_inline)) int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
return -122;
}
# 2559 "include/linux/security.h"
static inline __attribute__((always_inline)) int security_unix_stream_connect(struct socket *sock,
struct socket *other,
struct sock *newsk)
{
return 0;
}
static inline __attribute__((always_inline)) int security_unix_may_send(struct socket *sock,
struct socket *other)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_create(int family, int type,
int protocol, int kern)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_post_create(struct socket *sock,
int family,
int type,
int protocol, int kern)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_bind(struct socket *sock,
struct sockaddr *address,
int addrlen)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_connect(struct socket *sock,
struct sockaddr *address,
int addrlen)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_listen(struct socket *sock, int backlog)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_accept(struct socket *sock,
struct socket *newsock)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_sendmsg(struct socket *sock,
struct msghdr *msg, int size)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_recvmsg(struct socket *sock,
struct msghdr *msg, int size,
int flags)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_getsockname(struct socket *sock)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_getpeername(struct socket *sock)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_getsockopt(struct socket *sock,
int level, int optname)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_setsockopt(struct socket *sock,
int level, int optname)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_shutdown(struct socket *sock, int how)
{
return 0;
}
static inline __attribute__((always_inline)) int security_sock_rcv_skb(struct sock *sk,
struct sk_buff *skb)
{
return 0;
}
static inline __attribute__((always_inline)) int security_socket_getpeersec_stream(struct socket *sock, char *optval,
int *optlen, unsigned len)
{
return -99;
}
static inline __attribute__((always_inline)) int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
return -99;
}
static inline __attribute__((always_inline)) int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return 0;
}
static inline __attribute__((always_inline)) void security_sk_free(struct sock *sk)
{
}
static inline __attribute__((always_inline)) void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
}
static inline __attribute__((always_inline)) void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
}
static inline __attribute__((always_inline)) void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
{
}
static inline __attribute__((always_inline)) void security_sock_graft(struct sock *sk, struct socket *parent)
{
}
static inline __attribute__((always_inline)) int security_inet_conn_request(struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return 0;
}
static inline __attribute__((always_inline)) void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
}
static inline __attribute__((always_inline)) void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
}
static inline __attribute__((always_inline)) int security_tun_dev_create(void)
{
return 0;
}
static inline __attribute__((always_inline)) void security_tun_dev_post_create(struct sock *sk)
{
}
static inline __attribute__((always_inline)) int security_tun_dev_attach(struct sock *sk)
{
return 0;
}
# 2742 "include/linux/security.h"
static inline __attribute__((always_inline)) int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
{
return 0;
}
static inline __attribute__((always_inline)) int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp)
{
return 0;
}
static inline __attribute__((always_inline)) void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
}
static inline __attribute__((always_inline)) int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return 0;
}
static inline __attribute__((always_inline)) int security_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *sec_ctx)
{
return 0;
}
static inline __attribute__((always_inline)) int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
return 0;
}
static inline __attribute__((always_inline)) void security_xfrm_state_free(struct xfrm_state *x)
{
}
static inline __attribute__((always_inline)) int security_xfrm_state_delete(struct xfrm_state *x)
{
return 0;
}
static inline __attribute__((always_inline)) int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
{
return 0;
}
static inline __attribute__((always_inline)) int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp, struct flowi *fl)
{
return 1;
}
static inline __attribute__((always_inline)) int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
return 0;
}
static inline __attribute__((always_inline)) void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
{
}
# 2822 "include/linux/security.h"
static inline __attribute__((always_inline)) int security_path_unlink(struct path *dir, struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_mkdir(struct path *dir, struct dentry *dentry,
int mode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_mknod(struct path *dir, struct dentry *dentry,
int mode, unsigned int dev)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_truncate(struct path *path)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_link(struct dentry *old_dentry,
struct path *new_dir,
struct dentry *new_dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_rename(struct path *old_dir,
struct dentry *old_dentry,
struct path *new_dir,
struct dentry *new_dentry)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_chmod(struct dentry *dentry,
struct vfsmount *mnt,
mode_t mode)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_chown(struct path *path, uid_t uid, gid_t gid)
{
return 0;
}
static inline __attribute__((always_inline)) int security_path_chroot(struct path *path)
{
return 0;
}
# 2969 "include/linux/security.h"
static inline __attribute__((always_inline)) struct dentry *securityfs_create_dir(const char *name,
struct dentry *parent)
{
return ERR_PTR(-19);
}
static inline __attribute__((always_inline)) struct dentry *securityfs_create_file(const char *name,
mode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
return ERR_PTR(-19);
}
static inline __attribute__((always_inline)) void securityfs_remove(struct dentry *dentry)
{}
# 3003 "include/linux/security.h"
static inline __attribute__((always_inline)) char *alloc_secdata(void)
{
return (char *)1;
}
static inline __attribute__((always_inline)) void free_secdata(void *secdata)
{ }
# 54 "include/net/sock.h" 2
# 1 "include/linux/filter.h" 1
# 26 "include/linux/filter.h"
struct sock_filter {
__u16 code;
__u8 jt;
__u8 jf;
__u32 k;
};
struct sock_fprog {
unsigned short len;
struct sock_filter *filter;
};
# 94 "include/linux/filter.h"
enum {
BPF_S_RET_K = 0,
BPF_S_RET_A,
BPF_S_ALU_ADD_K,
BPF_S_ALU_ADD_X,
BPF_S_ALU_SUB_K,
BPF_S_ALU_SUB_X,
BPF_S_ALU_MUL_K,
BPF_S_ALU_MUL_X,
BPF_S_ALU_DIV_X,
BPF_S_ALU_AND_K,
BPF_S_ALU_AND_X,
BPF_S_ALU_OR_K,
BPF_S_ALU_OR_X,
BPF_S_ALU_LSH_K,
BPF_S_ALU_LSH_X,
BPF_S_ALU_RSH_K,
BPF_S_ALU_RSH_X,
BPF_S_ALU_NEG,
BPF_S_LD_W_ABS,
BPF_S_LD_H_ABS,
BPF_S_LD_B_ABS,
BPF_S_LD_W_LEN,
BPF_S_LD_W_IND,
BPF_S_LD_H_IND,
BPF_S_LD_B_IND,
BPF_S_LD_IMM,
BPF_S_LDX_W_LEN,
BPF_S_LDX_B_MSH,
BPF_S_LDX_IMM,
BPF_S_MISC_TAX,
BPF_S_MISC_TXA,
BPF_S_ALU_DIV_K,
BPF_S_LD_MEM,
BPF_S_LDX_MEM,
BPF_S_ST,
BPF_S_STX,
BPF_S_JMP_JA,
BPF_S_JMP_JEQ_K,
BPF_S_JMP_JEQ_X,
BPF_S_JMP_JGE_K,
BPF_S_JMP_JGE_X,
BPF_S_JMP_JGT_K,
BPF_S_JMP_JGT_X,
BPF_S_JMP_JSET_K,
BPF_S_JMP_JSET_X,
};
# 180 "include/linux/filter.h"
struct sk_filter
{
atomic_t refcnt;
unsigned int len;
struct rcu_head rcu;
struct sock_filter insns[0];
};
static inline __attribute__((always_inline)) unsigned int sk_filter_len(const struct sk_filter *fp)
{
return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
}
struct sk_buff;
struct sock;
extern int sk_filter(struct sock *sk, struct sk_buff *skb);
extern unsigned int sk_run_filter(struct sk_buff *skb,
struct sock_filter *filter, int flen);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
extern int sk_chk_filter(struct sock_filter *filter, int flen);
# 57 "include/net/sock.h" 2
# 1 "include/linux/rculist_nulls.h" 1
# 32 "include/linux/rculist_nulls.h"
static inline __attribute__((always_inline)) void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
n->pprev = ((void *)0);
}
}
# 59 "include/linux/rculist_nulls.h"
static inline __attribute__((always_inline)) void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
n->pprev = ((void *) 0x00200200 + 0);
}
# 84 "include/linux/rculist_nulls.h"
static inline __attribute__((always_inline)) void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *first = h->first;
n->next = first;
n->pprev = &h->first;
({ if (!__builtin_constant_p(n) || ((n) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (h->first) = (n); });
if (!is_a_nulls(first))
first->pprev = &n->next;
}
# 58 "include/net/sock.h" 2
# 1 "include/linux/poll.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/poll.h" 1
# 1 "include/asm-generic/poll.h" 1
# 31 "include/asm-generic/poll.h"
struct pollfd {
int fd;
short events;
short revents;
};
# 8 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/poll.h" 2
# 5 "include/linux/poll.h" 2
# 16 "include/linux/poll.h"
extern struct ctl_table epoll_table[];
# 28 "include/linux/poll.h"
struct poll_table_struct;
typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
typedef struct poll_table_struct {
poll_queue_proc qproc;
unsigned long key;
} poll_table;
static inline __attribute__((always_inline)) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{
if (p && wait_address)
p->qproc(filp, wait_address, p);
}
static inline __attribute__((always_inline)) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
{
pt->qproc = qproc;
pt->key = ~0UL;
}
struct poll_table_entry {
struct file *filp;
unsigned long key;
wait_queue_t wait;
wait_queue_head_t *wait_address;
};
struct poll_wqueues {
poll_table pt;
struct poll_table_page *table;
struct task_struct *polling_task;
int triggered;
int error;
int inline_index;
struct poll_table_entry inline_entries[((832 - 256) / sizeof(struct poll_table_entry))];
};
extern void poll_initwait(struct poll_wqueues *pwq);
extern void poll_freewait(struct poll_wqueues *pwq);
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
static inline __attribute__((always_inline)) int poll_schedule(struct poll_wqueues *pwq, int state)
{
return poll_schedule_timeout(pwq, state, ((void *)0), 0);
}
typedef struct {
unsigned long *in, *out, *ex;
unsigned long *res_in, *res_out, *res_ex;
} fd_set_bits;
# 104 "include/linux/poll.h"
static inline __attribute__((always_inline))
int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
nr = ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long));
if (ufdset)
return ({ void *__cu_to; const void *__cu_from; long __cu_len; __cu_to = (fdset); __cu_from = (ufdset); __cu_len = (nr); if (__builtin_expect(!!(({ unsigned long __addr = (unsigned long) ((__cu_from)); unsigned long __size = (__cu_len); unsigned long __mask = (__current_thread_info->addr_limit).seg; unsigned long __ok; (void)0; __ok = (signed long)(__mask & (__addr | (__addr + __size) | ((__builtin_constant_p(__size) && (signed long) (__size) > 0) ? 0 : (__size)))); __ok == 0; })), 1)) { might_fault(); __cu_len = ({ register void *__cu_to_r __asm__("$4"); register const void *__cu_from_r __asm__("$5"); register long __cu_len_r __asm__("$6"); __cu_to_r = (__cu_to); __cu_from_r = (__cu_from); __cu_len_r = (__cu_len); __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" "la" "\t$1, " "__copy_user" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t" ".set\tnoat\n\t" "addu" "\t$1, %1, %2\n\t" ".set\tat\n\t" ".set\treorder" : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) : : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", "$0", "memory"); __cu_len_r; }); } __cu_len; }) ? -14 : 0;
memset(fdset, 0, nr);
return 0;
}
static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result))
set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
if (ufdset)
return ({ void *__cu_to; const void *__cu_from; long __cu_len; __cu_to = (ufdset); __cu_from = (fdset); __cu_len = (((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long))); might_fault(); __cu_len = ({ register void *__cu_to_r __asm__("$4"); register const void *__cu_from_r __asm__("$5"); register long __cu_len_r __asm__("$6"); __cu_to_r = (__cu_to); __cu_from_r = (__cu_from); __cu_len_r = (__cu_len); __asm__ __volatile__( ".set\tnoat\n\t" "la" "\t$1, " "__copy_user" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t" : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) : : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", "$0", "memory"); __cu_len_r; }); __cu_len; });
return 0;
}
static inline __attribute__((always_inline))
void zero_fd_set(unsigned long nr, unsigned long *fdset)
{
memset(fdset, 0, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)));
}
extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
extern int do_sys_poll(struct pollfd * ufds, unsigned int nfds,
struct timespec *end_time);
extern int core_sys_select(int n, fd_set *inp, fd_set *outp,
fd_set *exp, struct timespec *end_time);
extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
# 59 "include/net/sock.h" 2
# 1 "include/net/dst.h" 1
# 13 "include/net/dst.h"
# 1 "include/linux/rtnetlink.h" 1
# 1 "include/linux/if_addr.h" 1
struct ifaddrmsg {
__u8 ifa_family;
__u8 ifa_prefixlen;
__u8 ifa_flags;
__u8 ifa_scope;
__u32 ifa_index;
};
# 22 "include/linux/if_addr.h"
enum {
IFA_UNSPEC,
IFA_ADDRESS,
IFA_LOCAL,
IFA_LABEL,
IFA_BROADCAST,
IFA_ANYCAST,
IFA_CACHEINFO,
IFA_MULTICAST,
__IFA_MAX,
};
# 48 "include/linux/if_addr.h"
struct ifa_cacheinfo {
__u32 ifa_prefered;
__u32 ifa_valid;
__u32 cstamp;
__u32 tstamp;
};
# 8 "include/linux/rtnetlink.h" 2
# 1 "include/linux/neighbour.h" 1
struct ndmsg {
__u8 ndm_family;
__u8 ndm_pad1;
__u16 ndm_pad2;
__s32 ndm_ifindex;
__u16 ndm_state;
__u8 ndm_flags;
__u8 ndm_type;
};
enum {
NDA_UNSPEC,
NDA_DST,
NDA_LLADDR,
NDA_CACHEINFO,
NDA_PROBES,
__NDA_MAX
};
# 57 "include/linux/neighbour.h"
struct nda_cacheinfo {
__u32 ndm_confirmed;
__u32 ndm_used;
__u32 ndm_updated;
__u32 ndm_refcnt;
};
# 89 "include/linux/neighbour.h"
struct ndt_stats {
__u64 ndts_allocs;
__u64 ndts_destroys;
__u64 ndts_hash_grows;
__u64 ndts_res_failed;
__u64 ndts_lookups;
__u64 ndts_hits;
__u64 ndts_rcv_probes_mcast;
__u64 ndts_rcv_probes_ucast;
__u64 ndts_periodic_gc_runs;
__u64 ndts_forced_gc_runs;
};
enum {
NDTPA_UNSPEC,
NDTPA_IFINDEX,
NDTPA_REFCNT,
NDTPA_REACHABLE_TIME,
NDTPA_BASE_REACHABLE_TIME,
NDTPA_RETRANS_TIME,
NDTPA_GC_STALETIME,
NDTPA_DELAY_PROBE_TIME,
NDTPA_QUEUE_LEN,
NDTPA_APP_PROBES,
NDTPA_UCAST_PROBES,
NDTPA_MCAST_PROBES,
NDTPA_ANYCAST_DELAY,
NDTPA_PROXY_DELAY,
NDTPA_PROXY_QLEN,
NDTPA_LOCKTIME,
__NDTPA_MAX
};
struct ndtmsg {
__u8 ndtm_family;
__u8 ndtm_pad1;
__u16 ndtm_pad2;
};
struct ndt_config {
__u16 ndtc_key_len;
__u16 ndtc_entry_size;
__u32 ndtc_entries;
__u32 ndtc_last_flush;
__u32 ndtc_last_rand;
__u32 ndtc_hash_rnd;
__u32 ndtc_hash_mask;
__u32 ndtc_hash_chain_gc;
__u32 ndtc_proxy_qlen;
};
enum {
NDTA_UNSPEC,
NDTA_NAME,
NDTA_THRESH1,
NDTA_THRESH2,
NDTA_THRESH3,
NDTA_CONFIG,
NDTA_PARMS,
NDTA_STATS,
NDTA_GC_INTERVAL,
__NDTA_MAX
};
# 9 "include/linux/rtnetlink.h" 2
# 23 "include/linux/rtnetlink.h"
enum {
RTM_BASE = 16,
RTM_NEWLINK = 16,
RTM_DELLINK,
RTM_GETLINK,
RTM_SETLINK,
RTM_NEWADDR = 20,
RTM_DELADDR,
RTM_GETADDR,
RTM_NEWROUTE = 24,
RTM_DELROUTE,
RTM_GETROUTE,
RTM_NEWNEIGH = 28,
RTM_DELNEIGH,
RTM_GETNEIGH,
RTM_NEWRULE = 32,
RTM_DELRULE,
RTM_GETRULE,
RTM_NEWQDISC = 36,
RTM_DELQDISC,
RTM_GETQDISC,
RTM_NEWTCLASS = 40,
RTM_DELTCLASS,
RTM_GETTCLASS,
RTM_NEWTFILTER = 44,
RTM_DELTFILTER,
RTM_GETTFILTER,
RTM_NEWACTION = 48,
RTM_DELACTION,
RTM_GETACTION,
RTM_NEWPREFIX = 52,
RTM_GETMULTICAST = 58,
RTM_GETANYCAST = 62,
RTM_NEWNEIGHTBL = 64,
RTM_GETNEIGHTBL = 66,
RTM_SETNEIGHTBL,
RTM_NEWNDUSEROPT = 68,
RTM_NEWADDRLABEL = 72,
RTM_DELADDRLABEL,
RTM_GETADDRLABEL,
RTM_GETDCB = 78,
RTM_SETDCB,
__RTM_MAX,
};
# 137 "include/linux/rtnetlink.h"
struct rtattr {
unsigned short rta_len;
unsigned short rta_type;
};
# 163 "include/linux/rtnetlink.h"
struct rtmsg {
unsigned char rtm_family;
unsigned char rtm_dst_len;
unsigned char rtm_src_len;
unsigned char rtm_tos;
unsigned char rtm_table;
unsigned char rtm_protocol;
unsigned char rtm_scope;
unsigned char rtm_type;
unsigned rtm_flags;
};
enum {
RTN_UNSPEC,
RTN_UNICAST,
RTN_LOCAL,
RTN_BROADCAST,
RTN_ANYCAST,
RTN_MULTICAST,
RTN_BLACKHOLE,
RTN_UNREACHABLE,
RTN_PROHIBIT,
RTN_THROW,
RTN_NAT,
RTN_XRESOLVE,
__RTN_MAX
};
# 237 "include/linux/rtnetlink.h"
enum rt_scope_t {
RT_SCOPE_UNIVERSE=0,
RT_SCOPE_SITE=200,
RT_SCOPE_LINK=253,
RT_SCOPE_HOST=254,
RT_SCOPE_NOWHERE=255
};
# 255 "include/linux/rtnetlink.h"
enum rt_class_t {
RT_TABLE_UNSPEC=0,
RT_TABLE_COMPAT=252,
RT_TABLE_DEFAULT=253,
RT_TABLE_MAIN=254,
RT_TABLE_LOCAL=255,
RT_TABLE_MAX=0xFFFFFFFF
};
enum rtattr_type_t {
RTA_UNSPEC,
RTA_DST,
RTA_SRC,
RTA_IIF,
RTA_OIF,
RTA_GATEWAY,
RTA_PRIORITY,
RTA_PREFSRC,
RTA_METRICS,
RTA_MULTIPATH,
RTA_PROTOINFO,
RTA_FLOW,
RTA_CACHEINFO,
RTA_SESSION,
RTA_MP_ALGO,
RTA_TABLE,
RTA_MARK,
__RTA_MAX
};
# 303 "include/linux/rtnetlink.h"
struct rtnexthop {
unsigned short rtnh_len;
unsigned char rtnh_flags;
unsigned char rtnh_hops;
int rtnh_ifindex;
};
# 329 "include/linux/rtnetlink.h"
struct rta_cacheinfo {
__u32 rta_clntref;
__u32 rta_lastuse;
__s32 rta_expires;
__u32 rta_error;
__u32 rta_used;
__u32 rta_id;
__u32 rta_ts;
__u32 rta_tsage;
};
enum {
RTAX_UNSPEC,
RTAX_LOCK,
RTAX_MTU,
RTAX_WINDOW,
RTAX_RTT,
RTAX_RTTVAR,
RTAX_SSTHRESH,
RTAX_CWND,
RTAX_ADVMSS,
RTAX_REORDERING,
RTAX_HOPLIMIT,
RTAX_INITCWND,
RTAX_FEATURES,
RTAX_RTO_MIN,
RTAX_INITRWND,
__RTAX_MAX
};
# 385 "include/linux/rtnetlink.h"
struct rta_session {
__u8 proto;
__u8 pad1;
__u16 pad2;
union {
struct {
__u16 sport;
__u16 dport;
} ports;
struct {
__u8 type;
__u8 code;
__u16 ident;
} icmpt;
__u32 spi;
} u;
};
struct rtgenmsg {
unsigned char rtgen_family;
};
# 423 "include/linux/rtnetlink.h"
struct ifinfomsg {
unsigned char ifi_family;
unsigned char __ifi_pad;
unsigned short ifi_type;
int ifi_index;
unsigned ifi_flags;
unsigned ifi_change;
};
struct prefixmsg {
unsigned char prefix_family;
unsigned char prefix_pad1;
unsigned short prefix_pad2;
int prefix_ifindex;
unsigned char prefix_type;
unsigned char prefix_len;
unsigned char prefix_flags;
unsigned char prefix_pad3;
};
enum
{
PREFIX_UNSPEC,
PREFIX_ADDRESS,
PREFIX_CACHEINFO,
__PREFIX_MAX
};
struct prefix_cacheinfo {
__u32 preferred_time;
__u32 valid_time;
};
struct tcmsg {
unsigned char tcm_family;
unsigned char tcm__pad1;
unsigned short tcm__pad2;
int tcm_ifindex;
__u32 tcm_handle;
__u32 tcm_parent;
__u32 tcm_info;
};
enum {
TCA_UNSPEC,
TCA_KIND,
TCA_OPTIONS,
TCA_STATS,
TCA_XSTATS,
TCA_RATE,
TCA_FCNT,
TCA_STATS2,
TCA_STAB,
__TCA_MAX
};
# 499 "include/linux/rtnetlink.h"
struct nduseroptmsg {
unsigned char nduseropt_family;
unsigned char nduseropt_pad1;
unsigned short nduseropt_opts_len;
int nduseropt_ifindex;
__u8 nduseropt_icmp_type;
__u8 nduseropt_icmp_code;
unsigned short nduseropt_pad2;
unsigned int nduseropt_pad3;
};
enum {
NDUSEROPT_UNSPEC,
NDUSEROPT_SRCADDR,
__NDUSEROPT_MAX
};
# 543 "include/linux/rtnetlink.h"
enum rtnetlink_groups {
RTNLGRP_NONE,
RTNLGRP_LINK,
RTNLGRP_NOTIFY,
RTNLGRP_NEIGH,
RTNLGRP_TC,
RTNLGRP_IPV4_IFADDR,
RTNLGRP_IPV4_MROUTE,
RTNLGRP_IPV4_ROUTE,
RTNLGRP_IPV4_RULE,
RTNLGRP_IPV6_IFADDR,
RTNLGRP_IPV6_MROUTE,
RTNLGRP_IPV6_ROUTE,
RTNLGRP_IPV6_IFINFO,
RTNLGRP_DECnet_IFADDR,
RTNLGRP_NOP2,
RTNLGRP_DECnet_ROUTE,
RTNLGRP_DECnet_RULE,
RTNLGRP_NOP4,
RTNLGRP_IPV6_PREFIX,
RTNLGRP_IPV6_RULE,
RTNLGRP_ND_USEROPT,
RTNLGRP_PHONET_IFADDR,
RTNLGRP_PHONET_ROUTE,
__RTNLGRP_MAX
};
struct tcamsg {
unsigned char tca_family;
unsigned char tca__pad1;
unsigned short tca__pad2;
};
# 609 "include/linux/rtnetlink.h"
static __inline__ __attribute__((always_inline)) int rtattr_strcmp(const struct rtattr *rta, const char *str)
{
int len = strlen(str) + 1;
return len > rta->rta_len || memcmp(((void*)(((char*)(rta)) + (( ((sizeof(struct rtattr))+4 -1) & ~(4 -1) ) + (0)))), str, len);
}
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
u32 group, struct nlmsghdr *nlh, gfp_t flags);
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
u32 id, u32 ts, u32 tsage, long expires,
u32 error);
extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
# 723 "include/linux/rtnetlink.h"
static inline __attribute__((always_inline)) struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct rtattr *rta;
int size = (( ((sizeof(struct rtattr))+4 -1) & ~(4 -1) ) + (attrlen));
rta = (struct rtattr*)skb_put(skb, ( ((size)+4 -1) & ~(4 -1) ));
rta->rta_type = attrtype;
rta->rta_len = size;
memset(((void*)(((char*)(rta)) + (( ((sizeof(struct rtattr))+4 -1) & ~(4 -1) ) + (0)))) + attrlen, 0, ( ((size)+4 -1) & ~(4 -1) ) - size);
return rta;
}
extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
extern void rtnl_lock(void);
extern void rtnl_unlock(void);
extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
extern void rtnetlink_init(void);
extern void __rtnl_unlock(void);
# 763 "include/linux/rtnetlink.h"
static inline __attribute__((always_inline)) u32 rtm_get_table(struct rtattr **rta, u8 table)
{
return ({ if (!rta[RTA_TABLE-1] || ((int)((rta[RTA_TABLE-1])->rta_len) - (( ((sizeof(struct rtattr))+4 -1) & ~(4 -1) ) + (0))) < sizeof(u32)) goto rtattr_failure; *(u32 *) ((void*)(((char*)(rta[RTA_TABLE-1])) + (( ((sizeof(struct rtattr))+4 -1) & ~(4 -1) ) + (0)))); });
rtattr_failure:
return table;
}
# 14 "include/net/dst.h" 2
# 1 "include/net/neighbour.h" 1
# 28 "include/net/neighbour.h"
# 1 "include/net/rtnetlink.h" 1
# 1 "include/net/netlink.h" 1
# 164 "include/net/netlink.h"
enum {
NLA_UNSPEC,
NLA_U8,
NLA_U16,
NLA_U32,
NLA_U64,
NLA_STRING,
NLA_FLAG,
NLA_MSECS,
NLA_NESTED,
NLA_NESTED_COMPAT,
NLA_NUL_STRING,
NLA_BINARY,
__NLA_TYPE_MAX,
};
# 205 "include/net/netlink.h"
struct nla_policy {
u16 type;
u16 len;
};
struct nl_info {
struct nlmsghdr *nlh;
struct net *nl_net;
u32 pid;
};
extern int netlink_rcv_skb(struct sk_buff *skb,
int (*cb)(struct sk_buff *,
struct nlmsghdr *));
extern int nlmsg_notify(struct sock *sk, struct sk_buff *skb,
u32 pid, unsigned int group, int report,
gfp_t flags);
extern int nla_validate(struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy);
extern int nla_parse(struct nlattr *tb[], int maxtype,
struct nlattr *head, int len,
const struct nla_policy *policy);
extern int nla_policy_len(const struct nla_policy *, int);
extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype);
extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
size_t dstsize);
extern int nla_memcpy(void *dest, const struct nlattr *src, int count);
extern int nla_memcmp(const struct nlattr *nla, const void *data,
size_t size);
extern int nla_strcmp(const struct nlattr *nla, const char *str);
extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
int attrlen);
extern void * __nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
int attrlen);
extern void * nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
extern void __nla_put(struct sk_buff *skb, int attrtype,
int attrlen, const void *data);
extern void __nla_put_nohdr(struct sk_buff *skb, int attrlen,
const void *data);
extern int nla_put(struct sk_buff *skb, int attrtype,
int attrlen, const void *data);
extern int nla_put_nohdr(struct sk_buff *skb, int attrlen,
const void *data);
extern int nla_append(struct sk_buff *skb, int attrlen,
const void *data);
# 266 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nlmsg_msg_size(int payload)
{
return ((int) ( ((sizeof(struct nlmsghdr))+4 -1) & ~(4 -1) )) + payload;
}
static inline __attribute__((always_inline)) int nlmsg_total_size(int payload)
{
return ( ((nlmsg_msg_size(payload))+4 -1) & ~(4 -1) );
}
static inline __attribute__((always_inline)) int nlmsg_padlen(int payload)
{
return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
}
static inline __attribute__((always_inline)) void *nlmsg_data(const struct nlmsghdr *nlh)
{
return (unsigned char *) nlh + ((int) ( ((sizeof(struct nlmsghdr))+4 -1) & ~(4 -1) ));
}
static inline __attribute__((always_inline)) int nlmsg_len(const struct nlmsghdr *nlh)
{
return nlh->nlmsg_len - ((int) ( ((sizeof(struct nlmsghdr))+4 -1) & ~(4 -1) ));
}
static inline __attribute__((always_inline)) struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
int hdrlen)
{
unsigned char *data = nlmsg_data(nlh);
return (struct nlattr *) (data + ( ((hdrlen)+4 -1) & ~(4 -1) ));
}
static inline __attribute__((always_inline)) int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
{
return nlmsg_len(nlh) - ( ((hdrlen)+4 -1) & ~(4 -1) );
}
static inline __attribute__((always_inline)) int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
{
return (remaining >= (int) sizeof(struct nlmsghdr) &&
nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
nlh->nlmsg_len <= remaining);
}
# 349 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining)
{
int totlen = ( ((nlh->nlmsg_len)+4 -1) & ~(4 -1) );
*remaining -= totlen;
return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
}
# 368 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
const struct nla_policy *policy)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -22;
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), policy);
}
# 387 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
int hdrlen, int attrtype)
{
return nla_find(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), attrtype);
}
# 401 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype,
const struct nla_policy *policy)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -22;
return nla_validate(nlmsg_attrdata(nlh, hdrlen),
nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
}
static inline __attribute__((always_inline)) int nlmsg_report(const struct nlmsghdr *nlh)
{
return !!(nlh->nlmsg_flags & 8);
}
# 480 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
int type, int payload, int flags)
{
if (__builtin_expect(!!(skb_tailroom(skb) < nlmsg_total_size(payload)), 0))
return ((void *)0);
return __nlmsg_put(skb, pid, seq, type, payload, flags);
}
# 500 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
struct netlink_callback *cb,
int type, int payload,
int flags)
{
return nlmsg_put(skb, (*(struct netlink_skb_parms*)&((cb->skb)->cb)).pid, cb->nlh->nlmsg_seq,
type, payload, flags);
}
# 517 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct sk_buff *nlmsg_new(size_t payload, gfp_t flags)
{
return alloc_skb(nlmsg_total_size(payload), flags);
}
# 533 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh;
return skb->len;
}
static inline __attribute__((always_inline)) void *nlmsg_get_pos(struct sk_buff *skb)
{
return skb_tail_pointer(skb);
}
# 558 "include/net/netlink.h"
static inline __attribute__((always_inline)) void nlmsg_trim(struct sk_buff *skb, const void *mark)
{
if (mark)
skb_trim(skb, (unsigned char *) mark - skb->data);
}
# 572 "include/net/netlink.h"
static inline __attribute__((always_inline)) void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
{
nlmsg_trim(skb, nlh);
}
static inline __attribute__((always_inline)) void nlmsg_free(struct sk_buff *skb)
{
kfree_skb(skb);
}
# 594 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
u32 pid, unsigned int group, gfp_t flags)
{
int err;
(*(struct netlink_skb_parms*)&((skb)->cb)).dst_group = group;
err = netlink_broadcast(sk, skb, pid, group, flags);
if (err > 0)
err = 0;
return err;
}
static inline __attribute__((always_inline)) int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
{
int err;
err = netlink_unicast(sk, skb, pid, 0x40);
if (err > 0)
err = 0;
return err;
}
# 645 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nla_attr_size(int payload)
{
return ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1))) + payload;
}
static inline __attribute__((always_inline)) int nla_total_size(int payload)
{
return (((nla_attr_size(payload)) + 4 - 1) & ~(4 - 1));
}
static inline __attribute__((always_inline)) int nla_padlen(int payload)
{
return nla_total_size(payload) - nla_attr_size(payload);
}
static inline __attribute__((always_inline)) int nla_type(const struct nlattr *nla)
{
return nla->nla_type & ~((1 << 15) | (1 << 14));
}
static inline __attribute__((always_inline)) void *nla_data(const struct nlattr *nla)
{
return (char *) nla + ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1)));
}
static inline __attribute__((always_inline)) int nla_len(const struct nlattr *nla)
{
return nla->nla_len - ((int) (((sizeof(struct nlattr)) + 4 - 1) & ~(4 - 1)));
}
static inline __attribute__((always_inline)) int nla_ok(const struct nlattr *nla, int remaining)
{
return remaining >= (int) sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
# 715 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
{
int totlen = (((nla->nla_len) + 4 - 1) & ~(4 - 1));
*remaining -= totlen;
return (struct nlattr *) ((char *) nla + totlen);
}
# 730 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
{
return nla_find(nla_data(nla), nla_len(nla), attrtype);
}
# 744 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nla_parse_nested(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
const struct nla_policy *policy)
{
return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
}
static inline __attribute__((always_inline)) int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
return nla_put(skb, attrtype, sizeof(u8), &value);
}
static inline __attribute__((always_inline)) int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
return nla_put(skb, attrtype, sizeof(u16), &value);
}
static inline __attribute__((always_inline)) int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
return nla_put(skb, attrtype, sizeof(u32), &value);
}
static inline __attribute__((always_inline)) int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
{
return nla_put(skb, attrtype, sizeof(u64), &value);
}
static inline __attribute__((always_inline)) int nla_put_string(struct sk_buff *skb, int attrtype,
const char *str)
{
return nla_put(skb, attrtype, strlen(str) + 1, str);
}
static inline __attribute__((always_inline)) int nla_put_flag(struct sk_buff *skb, int attrtype)
{
return nla_put(skb, attrtype, 0, ((void *)0));
}
static inline __attribute__((always_inline)) int nla_put_msecs(struct sk_buff *skb, int attrtype,
unsigned long jiffies)
{
u64 tmp = jiffies_to_msecs(jiffies);
return nla_put(skb, attrtype, sizeof(u64), &tmp);
}
# 879 "include/net/netlink.h"
static inline __attribute__((always_inline)) u32 nla_get_u32(const struct nlattr *nla)
{
return *(u32 *) nla_data(nla);
}
static inline __attribute__((always_inline)) __be32 nla_get_be32(const struct nlattr *nla)
{
return *(__be32 *) nla_data(nla);
}
static inline __attribute__((always_inline)) u16 nla_get_u16(const struct nlattr *nla)
{
return *(u16 *) nla_data(nla);
}
static inline __attribute__((always_inline)) __be16 nla_get_be16(const struct nlattr *nla)
{
return *(__be16 *) nla_data(nla);
}
static inline __attribute__((always_inline)) __le16 nla_get_le16(const struct nlattr *nla)
{
return *(__le16 *) nla_data(nla);
}
static inline __attribute__((always_inline)) u8 nla_get_u8(const struct nlattr *nla)
{
return *(u8 *) nla_data(nla);
}
static inline __attribute__((always_inline)) u64 nla_get_u64(const struct nlattr *nla)
{
u64 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
static inline __attribute__((always_inline)) __be64 nla_get_be64(const struct nlattr *nla)
{
__be64 tmp;
nla_memcpy(&tmp, nla, sizeof(tmp));
return tmp;
}
static inline __attribute__((always_inline)) int nla_get_flag(const struct nlattr *nla)
{
return !!nla;
}
static inline __attribute__((always_inline)) unsigned long nla_get_msecs(const struct nlattr *nla)
{
u64 msecs = nla_get_u64(nla);
return msecs_to_jiffies((unsigned long) msecs);
}
# 984 "include/net/netlink.h"
static inline __attribute__((always_inline)) struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
{
struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
if (nla_put(skb, attrtype, 0, ((void *)0)) < 0)
return ((void *)0);
return start;
}
# 1004 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
{
start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
return skb->len;
}
# 1018 "include/net/netlink.h"
static inline __attribute__((always_inline)) void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
{
nlmsg_trim(skb, start);
}
# 1035 "include/net/netlink.h"
static inline __attribute__((always_inline)) int nla_validate_nested(struct nlattr *start, int maxtype,
const struct nla_policy *policy)
{
return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
}
# 6 "include/net/rtnetlink.h" 2
typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
extern int __rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func);
extern void rtnl_register(int protocol, int msgtype,
rtnl_doit_func, rtnl_dumpit_func);
extern int rtnl_unregister(int protocol, int msgtype);
extern void rtnl_unregister_all(int protocol);
static inline __attribute__((always_inline)) int rtnl_msg_family(const struct nlmsghdr *nlh)
{
if (nlmsg_len(nlh) >= sizeof(struct rtgenmsg))
return ((struct rtgenmsg *) nlmsg_data(nlh))->rtgen_family;
else
return 0;
}
# 45 "include/net/rtnetlink.h"
struct rtnl_link_ops {
struct list_head list;
const char *kind;
size_t priv_size;
void (*setup)(struct net_device *dev);
int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
struct nlattr *data[]);
int (*newlink)(struct net *src_net,
struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[]);
int (*changelink)(struct net_device *dev,
struct nlattr *tb[],
struct nlattr *data[]);
void (*dellink)(struct net_device *dev,
struct list_head *head);
size_t (*get_size)(const struct net_device *dev);
int (*fill_info)(struct sk_buff *skb,
const struct net_device *dev);
size_t (*get_xstats_size)(const struct net_device *dev);
int (*fill_xstats)(struct sk_buff *skb,
const struct net_device *dev);
int (*get_tx_queues)(struct net *net, struct nlattr *tb[],
unsigned int *tx_queues,
unsigned int *real_tx_queues);
};
extern int __rtnl_link_register(struct rtnl_link_ops *ops);
extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
extern void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops);
extern int rtnl_link_register(struct rtnl_link_ops *ops);
extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
extern int rtnl_configure_link(struct net_device *dev,
const struct ifinfomsg *ifm);
extern const struct nla_policy ifla_policy[(__IFLA_MAX - 1)+1];
# 29 "include/net/neighbour.h" 2
# 38 "include/net/neighbour.h"
struct neighbour;
struct neigh_parms {
struct net_device *dev;
struct neigh_parms *next;
int (*neigh_setup)(struct neighbour *);
void (*neigh_cleanup)(struct neighbour *);
struct neigh_table *tbl;
void *sysctl_table;
int dead;
atomic_t refcnt;
struct rcu_head rcu_head;
int base_reachable_time;
int retrans_time;
int gc_staletime;
int reachable_time;
int delay_probe_time;
int queue_len;
int ucast_probes;
int app_probes;
int mcast_probes;
int anycast_delay;
int proxy_delay;
int proxy_qlen;
int locktime;
char dlf_route[64];
};
struct neigh_statistics {
unsigned long allocs;
unsigned long destroys;
unsigned long hash_grows;
unsigned long res_failed;
unsigned long lookups;
unsigned long hits;
unsigned long rcv_probes_mcast;
unsigned long rcv_probes_ucast;
unsigned long periodic_gc_runs;
unsigned long forced_gc_runs;
unsigned long unres_discards;
};
struct neighbour {
struct neighbour *next;
struct neigh_table *tbl;
struct neigh_parms *parms;
struct net_device *dev;
unsigned long used;
unsigned long confirmed;
unsigned long updated;
__u8 flags;
__u8 nud_state;
__u8 type;
__u8 dead;
atomic_t probes;
rwlock_t lock;
unsigned char ha[((((32)) + ((typeof((32)))((sizeof(unsigned long))) - 1)) & ~((typeof((32)))((sizeof(unsigned long))) - 1))];
struct hh_cache *hh;
atomic_t refcnt;
int (*output)(struct sk_buff *skb);
struct sk_buff_head arp_queue;
struct timer_list timer;
const struct neigh_ops *ops;
u8 primary_key[0];
};
struct neigh_ops {
int family;
void (*solicit)(struct neighbour *, struct sk_buff*);
void (*error_report)(struct neighbour *, struct sk_buff*);
int (*output)(struct sk_buff*);
int (*connected_output)(struct sk_buff*);
int (*hh_output)(struct sk_buff*);
int (*queue_xmit)(struct sk_buff*);
};
struct pneigh_entry {
struct pneigh_entry *next;
struct net_device *dev;
u8 flags;
u8 key[0];
};
struct neigh_table {
struct neigh_table *next;
int family;
int entry_size;
int key_len;
__u32 (*hash)(const void *pkey, const struct net_device *);
int (*constructor)(struct neighbour *);
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
char *id;
struct neigh_parms parms;
int gc_interval;
int gc_thresh1;
int gc_thresh2;
int gc_thresh3;
unsigned long last_flush;
struct delayed_work gc_work;
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
struct kmem_cache *kmem_cachep;
struct neigh_statistics *stats;
struct neighbour **hash_buckets;
unsigned int hash_mask;
__u32 hash_rnd;
struct pneigh_entry **phash_buckets;
};
# 189 "include/net/neighbour.h"
extern void neigh_table_init(struct neigh_table *tbl);
extern void neigh_table_init_no_netlink(struct neigh_table *tbl);
extern int neigh_table_clear(struct neigh_table *tbl);
extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev);
extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
struct net *net,
const void *pkey);
extern struct neighbour * neigh_create(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev);
extern void neigh_destroy(struct neighbour *neigh);
extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
u32 flags);
extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
extern int neigh_resolve_output(struct sk_buff *skb);
extern int neigh_connected_output(struct sk_buff *skb);
extern int neigh_compat_output(struct sk_buff *skb);
extern struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev);
extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
static inline __attribute__((always_inline))
struct net *neigh_parms_net(const struct neigh_parms *parms)
{
return (&init_net);
}
extern unsigned long neigh_rand_reach_time(unsigned long base);
extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb);
extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat);
extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
struct net *net,
const void *key,
struct net_device *dev);
extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev);
static inline __attribute__((always_inline))
struct net *pneigh_net(const struct pneigh_entry *pneigh)
{
return (&init_net);
}
extern void neigh_app_ns(struct neighbour *n);
extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
struct neigh_table *tbl;
void *(*neigh_sub_iter)(struct neigh_seq_state *state,
struct neighbour *n, loff_t *pos);
unsigned int bucket;
unsigned int flags;
};
extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
extern void neigh_seq_stop(struct seq_file *, void *);
extern int neigh_sysctl_register(struct net_device *dev,
struct neigh_parms *p,
char *p_name,
proc_handler *proc_handler);
extern void neigh_sysctl_unregister(struct neigh_parms *p);
static inline __attribute__((always_inline)) void __neigh_parms_put(struct neigh_parms *parms)
{
atomic_sub(1, (&parms->refcnt));
}
static inline __attribute__((always_inline)) struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
{
atomic_add(1, (&parms->refcnt));
return parms;
}
static inline __attribute__((always_inline)) void neigh_release(struct neighbour *neigh)
{
if ((atomic_sub_return(1, (&neigh->refcnt)) == 0))
neigh_destroy(neigh);
}
static inline __attribute__((always_inline)) struct neighbour * neigh_clone(struct neighbour *neigh)
{
if (neigh)
atomic_add(1, (&neigh->refcnt));
return neigh;
}
static inline __attribute__((always_inline)) void neigh_confirm(struct neighbour *neigh)
{
if (neigh)
neigh->confirmed = jiffies;
}
static inline __attribute__((always_inline)) int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
neigh->used = jiffies;
if (!(neigh->nud_state&((0x80|0x40|0x02)|0x08|0x10)))
return __neigh_event_send(neigh, skb);
return 0;
}
# 324 "include/net/neighbour.h"
static inline __attribute__((always_inline)) int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
{
unsigned seq;
int hh_len;
do {
int hh_alen;
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
hh_alen = (((hh_len)+(16 -1))&~(16 - 1));
memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
} while (read_seqretry(&hh->hh_lock, seq));
skb_push(skb, hh_len);
return hh->hh_output(skb);
}
static inline __attribute__((always_inline)) struct neighbour *
__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
{
struct neighbour *n = neigh_lookup(tbl, pkey, dev);
if (n || !creat)
return n;
n = neigh_create(tbl, pkey, dev);
return IS_ERR(n) ? ((void *)0) : n;
}
static inline __attribute__((always_inline)) struct neighbour *
__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
struct neighbour *n = neigh_lookup(tbl, pkey, dev);
if (n)
return n;
return neigh_create(tbl, pkey, dev);
}
struct neighbour_cb {
unsigned long sched_next;
unsigned int flags;
};
# 17 "include/net/dst.h" 2
# 37 "include/net/dst.h"
struct sk_buff;
struct dst_entry {
struct rcu_head rcu_head;
struct dst_entry *child;
struct net_device *dev;
short error;
short obsolete;
int flags;
unsigned long expires;
unsigned short header_len;
unsigned short trailer_len;
unsigned int rate_tokens;
unsigned long rate_last;
struct dst_entry *path;
struct neighbour *neighbour;
struct hh_cache *hh;
struct xfrm_state *xfrm;
int (*input)(struct sk_buff*);
int (*output)(struct sk_buff*);
struct dst_ops *ops;
u32 metrics[(__RTAX_MAX - 1)];
__u32 __pad2;
# 92 "include/net/dst.h"
atomic_t __refcnt;
int __use;
unsigned long lastuse;
union {
struct dst_entry *next;
struct rtable *rt_next;
struct rt6_info *rt6_next;
struct dn_route *dn_next;
};
};
static inline __attribute__((always_inline)) u32
dst_metric(const struct dst_entry *dst, int metric)
{
return dst->metrics[metric-1];
}
static inline __attribute__((always_inline)) u32
dst_feature(const struct dst_entry *dst, u32 feature)
{
return dst_metric(dst, RTAX_FEATURES) & feature;
}
static inline __attribute__((always_inline)) u32 dst_mtu(const struct dst_entry *dst)
{
u32 mtu = dst_metric(dst, RTAX_MTU);
__asm__ __volatile__("": : :"memory");
return mtu;
}
static inline __attribute__((always_inline)) unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
{
return msecs_to_jiffies(dst_metric(dst, metric));
}
static inline __attribute__((always_inline)) void set_dst_metric_rtt(struct dst_entry *dst, int metric,
unsigned long rtt)
{
dst->metrics[metric-1] = jiffies_to_msecs(rtt);
}
static inline __attribute__((always_inline)) u32
dst_allfrag(const struct dst_entry *dst)
{
int ret = dst_feature(dst, 0x00000008);
__asm__ __volatile__("": : :"memory");
return ret;
}
static inline __attribute__((always_inline)) int
dst_metric_locked(struct dst_entry *dst, int metric)
{
return dst_metric(dst, RTAX_LOCK) & (1<<metric);
}
static inline __attribute__((always_inline)) void dst_hold(struct dst_entry * dst)
{
((void)(sizeof(struct { int:-!!(__builtin_offsetof(struct dst_entry,__refcnt) & 63); })));
atomic_add(1, (&dst->__refcnt));
}
static inline __attribute__((always_inline)) void dst_use(struct dst_entry *dst, unsigned long time)
{
dst_hold(dst);
dst->__use++;
dst->lastuse = time;
}
static inline __attribute__((always_inline)) void dst_use_noref(struct dst_entry *dst, unsigned long time)
{
dst->__use++;
dst->lastuse = time;
}
static inline __attribute__((always_inline))
struct dst_entry * dst_clone(struct dst_entry * dst)
{
if (dst)
atomic_add(1, (&dst->__refcnt));
return dst;
}
extern void dst_release(struct dst_entry *dst);
static inline __attribute__((always_inline)) void refdst_drop(unsigned long refdst)
{
if (!(refdst & 1UL))
dst_release((struct dst_entry *)(refdst & ~(1UL)));
}
static inline __attribute__((always_inline)) void skb_dst_drop(struct sk_buff *skb)
{
if (skb->_skb_refdst) {
refdst_drop(skb->_skb_refdst);
skb->_skb_refdst = 0UL;
}
}
static inline __attribute__((always_inline)) void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
{
nskb->_skb_refdst = oskb->_skb_refdst;
if (!(nskb->_skb_refdst & 1UL))
dst_clone(skb_dst(nskb));
}
static inline __attribute__((always_inline)) void skb_dst_force(struct sk_buff *skb)
{
if (skb_dst_is_noref(skb)) {
({ int __ret_warn_on = !!(!rcu_read_lock_held()); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/dst.h", 223); __builtin_expect(!!(__ret_warn_on), 0); });
skb->_skb_refdst &= ~1UL;
dst_clone(skb_dst(skb));
}
}
# 238 "include/net/dst.h"
static inline __attribute__((always_inline)) void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->rxhash = 0;
skb_set_queue_mapping(skb, 0);
skb_dst_drop(skb);
nf_reset(skb);
}
static inline __attribute__((always_inline)) struct dst_entry *skb_dst_pop(struct sk_buff *skb)
{
struct dst_entry *child = skb_dst(skb)->child;
skb_dst_drop(skb);
return child;
}
extern int dst_discard(struct sk_buff *skb);
extern void * dst_alloc(struct dst_ops * ops);
extern void __dst_free(struct dst_entry * dst);
extern struct dst_entry *dst_destroy(struct dst_entry * dst);
static inline __attribute__((always_inline)) void dst_free(struct dst_entry * dst)
{
if (dst->obsolete > 1)
return;
if (!(*(volatile int *)&(&dst->__refcnt)->counter)) {
dst = dst_destroy(dst);
if (!dst)
return;
}
__dst_free(dst);
}
static inline __attribute__((always_inline)) void dst_rcu_free(struct rcu_head *head)
{
struct dst_entry *dst = ({ const typeof( ((struct dst_entry *)0)->rcu_head ) *__mptr = (head); (struct dst_entry *)( (char *)__mptr - __builtin_offsetof(struct dst_entry,rcu_head) );});
dst_free(dst);
}
static inline __attribute__((always_inline)) void dst_confirm(struct dst_entry *dst)
{
if (dst)
neigh_confirm(dst->neighbour);
}
static inline __attribute__((always_inline)) void dst_link_failure(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops && dst->ops->link_failure)
dst->ops->link_failure(skb);
}
static inline __attribute__((always_inline)) void dst_set_expires(struct dst_entry *dst, int timeout)
{
unsigned long expires = jiffies + timeout;
if (expires == 0)
expires = 1;
if (dst->expires == 0 || (({ unsigned long __dummy; typeof(dst->expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ({ unsigned long __dummy; typeof(expires) __dummy2; (void)(&__dummy == &__dummy2); 1; }) && ((long)(expires) - (long)(dst->expires) < 0)))
dst->expires = expires;
}
static inline __attribute__((always_inline)) int dst_output(struct sk_buff *skb)
{
return skb_dst(skb)->output(skb);
}
static inline __attribute__((always_inline)) int dst_input(struct sk_buff *skb)
{
return skb_dst(skb)->input(skb);
}
static inline __attribute__((always_inline)) struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
{
if (dst->obsolete)
dst = dst->ops->check(dst, cookie);
return dst;
}
extern void dst_init(void);
enum {
XFRM_LOOKUP_WAIT = 1 << 0,
XFRM_LOOKUP_ICMP = 1 << 1,
};
struct flowi;
# 349 "include/net/dst.h"
extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
struct flowi *fl, struct sock *sk, int flags);
extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
struct flowi *fl, struct sock *sk, int flags);
# 62 "include/net/sock.h" 2
# 1 "include/linux/foe_hook.h" 1
# 52 "include/linux/foe_hook.h"
struct port_info {
unsigned long int txq:4;
unsigned long int channel:8;
unsigned long int tsid:8;
unsigned long int atm_pppoa:1;
unsigned long int atm_ipoa:1;
unsigned long int atm_vc_mux:1;
unsigned long int eth_macSTagEn:1;
unsigned long int eth_is_wan:1;
unsigned long int ds_to_qdma:1;
unsigned long int ds_need_offload:1;
unsigned long int txq_is_valid:1;
unsigned long int resv0:4;
unsigned long int stag:16;
unsigned long int magic:16;
};
typedef union {
struct {
unsigned int resv : 1 ;
unsigned int tsid : 5 ;
unsigned int tse : 1 ;
unsigned int dei : 1 ;
unsigned int gem : 12 ;
unsigned int oam : 1 ;
unsigned int channel : 8 ;
unsigned int queue : 3 ;
# 92 "include/linux/foe_hook.h"
unsigned int ico : 1 ;
unsigned int uco : 1 ;
unsigned int tco : 1 ;
unsigned int tso : 1 ;
unsigned int pmap : 6 ;
unsigned int fport : 3 ;
unsigned int insv : 1 ;
unsigned int tpid : 2 ;
unsigned int vid : 16 ;
# 112 "include/linux/foe_hook.h"
} raw ;
struct {
unsigned int mtr : 1 ;
unsigned int fport_ppe : 3 ;
unsigned int gem : 16 ;
unsigned int oam : 1 ;
unsigned int channel_ppe : 5 ;
unsigned int channel :3 ;
unsigned int queue : 3 ;
# 134 "include/linux/foe_hook.h"
unsigned int ico : 1 ;
unsigned int uco : 1 ;
unsigned int tco : 1 ;
unsigned int tso : 1 ;
unsigned int mtr_index : 6 ;
unsigned int fport : 3 ;
unsigned int insv : 1 ;
unsigned int tpid : 2 ;
unsigned int vid : 16 ;
# 154 "include/linux/foe_hook.h"
} raw1 ;
unsigned int word[2] ;
}FETxMsg_T ;
# 183 "include/linux/foe_hook.h"
struct SkbFoeInfo{
unsigned short ppe_magic;
unsigned short ppe_foe_entry;
unsigned char ppe_ai;
};
struct sk_buff;
struct net_device;
extern int (*ra_sw_nat_hook_rx) (struct sk_buff * skb);
extern int (*ra_sw_nat_ds_offload) (struct sk_buff * skb, int *dp);
extern int (*ra_sw_nat_hook_update_dp)(int index, int dp);
extern int (*ra_sw_nat_hook_update_vlan)(int index,int outer_vlan,int inner_vlan);
extern int (*ra_sw_nat_hook_save_rxinfo)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_restore_rxinfo)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_save_txinfo)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_restore_txinfo)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_is_hwnat_pkt)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_sendto_ppe)(struct sk_buff *skb);
extern int (*ra_sw_nat_hook_set_l2tp_dev)(struct net_device *dev);
extern struct net_device* (*ra_sw_nat_hook_read_l2tp_dev)(void);
extern int (*ra_sw_nat_rtsp_offload_restore) (struct sk_buff * skb);
extern int (*ra_sw_nat_rtsp_data_handle) (struct sk_buff * skb, char *rb_ptr, unsigned int datalen);
extern int (*ra_sw_nat_hook_tx) (struct sk_buff * skb, struct port_info * pinfo, int magic);
extern int (*ra_sw_nat_hook_free) (struct sk_buff * skb);
extern int (*ra_sw_nat_hook_rxinfo) (struct sk_buff * skb, int magic, char *data, int data_length);
extern int (*ra_sw_nat_hook_txq) (struct sk_buff * skb, int txq);
extern int (*ra_sw_nat_hook_magic) (struct sk_buff * skb, int magic);
extern int (*ra_sw_nat_hook_set_magic) (struct sk_buff * skb, int magic);
extern int (*ra_sw_nat_hook_xfer) (struct sk_buff *skb, const struct sk_buff *prev_p);
extern int (*ra_sw_nat_hook_foeentry) (void * inputvalue,int operation);
extern int (*ra_sw_nat_hook_is_alive_pkt)(unsigned int crsn);
extern int (*MT7530LanPortMap2Switch_hook)(int port);
extern int (*ra_sw_nat_hook_drop_packet) (struct sk_buff * skb);
extern int (*ra_sw_nat_hook_clean_table) (void);
extern void (*restore_offload_info_hook)(struct sk_buff *skb, struct port_info *pinfo, int magic);
extern int (*ra_sw_nat_hook_cpu_meter)(struct sk_buff* skb,FETxMsg_T* txMsg,struct port_info* pinfo,unsigned char dir,unsigned short mtrIndex,unsigned int rateScale);
# 64 "include/net/sock.h" 2
# 88 "include/net/sock.h"
typedef struct {
spinlock_t slock;
int owned;
wait_queue_head_t wq;
# 101 "include/net/sock.h"
} socket_lock_t;
struct sock;
struct proto;
struct net;
# 127 "include/net/sock.h"
struct sock_common {
union {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
atomic_t skc_refcnt;
int skc_tx_queue_mapping;
union {
unsigned int skc_hash;
__u16 skc_u16hashes[2];
};
unsigned short skc_family;
volatile unsigned char skc_state;
unsigned char skc_reuse;
int skc_bound_dev_if;
union {
struct hlist_node skc_bind_node;
struct hlist_nulls_node skc_portaddr_node;
};
struct proto *skc_prot;
};
# 225 "include/net/sock.h"
struct sock {
struct sock_common __sk_common;
# 245 "include/net/sock.h"
;
unsigned int sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4,
sk_protocol : 8,
sk_type : 16;
;
int sk_rcvbuf;
socket_lock_t sk_lock;
struct {
struct sk_buff *head;
struct sk_buff *tail;
int len;
} sk_backlog;
struct socket_wq *sk_wq;
struct dst_entry *sk_dst_cache;
struct xfrm_policy *sk_policy[2];
spinlock_t sk_dst_lock;
atomic_t sk_rmem_alloc;
atomic_t sk_wmem_alloc;
atomic_t sk_omem_alloc;
int sk_sndbuf;
struct sk_buff_head sk_receive_queue;
struct sk_buff_head sk_write_queue;
int sk_wmem_queued;
int sk_forward_alloc;
gfp_t sk_allocation;
int sk_route_caps;
int sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
int sk_rcvlowat;
int tcp_dbg;
unsigned long sk_flags;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
sk_err_soft;
atomic_t sk_drops;
unsigned short sk_ack_backlog;
unsigned short sk_max_ack_backlog;
__u32 sk_priority;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
long sk_sndtimeo;
struct sk_filter *sk_filter;
void *sk_protinfo;
struct timer_list sk_timer;
ktime_t sk_stamp;
struct socket *sk_socket;
void *sk_user_data;
struct page *sk_sndmsg_page;
struct sk_buff *sk_send_head;
__u32 sk_sndmsg_off;
int sk_write_pending;
__u32 sk_mark;
struct SkbFoeInfo sk_foe_info;
__u16 lVlanId;
__u16 lPbit;
u32 sk_classid;
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
struct sk_buff *skb);
void (*sk_destruct)(struct sock *sk);
};
static inline __attribute__((always_inline)) struct sock *sk_entry(const struct hlist_node *node)
{
return ({ const typeof( ((struct sock *)0)->__sk_common.skc_node ) *__mptr = (node); (struct sock *)( (char *)__mptr - __builtin_offsetof(struct sock,__sk_common.skc_node) );});
}
static inline __attribute__((always_inline)) struct sock *__sk_head(const struct hlist_head *head)
{
return ({ const typeof( ((struct sock *)0)->__sk_common.skc_node ) *__mptr = (head->first); (struct sock *)( (char *)__mptr - __builtin_offsetof(struct sock,__sk_common.skc_node) );});
}
static inline __attribute__((always_inline)) struct sock *sk_head(const struct hlist_head *head)
{
return hlist_empty(head) ? ((void *)0) : __sk_head(head);
}
static inline __attribute__((always_inline)) struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
return ({ const typeof( ((struct sock *)0)->__sk_common.skc_nulls_node ) *__mptr = (head->first); (struct sock *)( (char *)__mptr - __builtin_offsetof(struct sock,__sk_common.skc_nulls_node) );});
}
static inline __attribute__((always_inline)) struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
return hlist_nulls_empty(head) ? ((void *)0) : __sk_nulls_head(head);
}
static inline __attribute__((always_inline)) struct sock *sk_next(const struct sock *sk)
{
return sk->__sk_common.skc_node.next ?
({ const typeof( ((struct sock *)0)->__sk_common.skc_node ) *__mptr = (sk->__sk_common.skc_node.next); (struct sock *)( (char *)__mptr - __builtin_offsetof(struct sock,__sk_common.skc_node) );}) : ((void *)0);
}
static inline __attribute__((always_inline)) struct sock *sk_nulls_next(const struct sock *sk)
{
return (!is_a_nulls(sk->__sk_common.skc_nulls_node.next)) ?
({ const typeof( ((struct sock *)0)->__sk_common.skc_nulls_node ) *__mptr = (sk->__sk_common.skc_nulls_node.next); (struct sock *)( (char *)__mptr - __builtin_offsetof(struct sock,__sk_common.skc_nulls_node) );})
:
((void *)0);
}
static inline __attribute__((always_inline)) int sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->__sk_common.skc_node);
}
static inline __attribute__((always_inline)) int sk_hashed(const struct sock *sk)
{
return !sk_unhashed(sk);
}
static __inline__ __attribute__((always_inline)) void sk_node_init(struct hlist_node *node)
{
node->pprev = ((void *)0);
}
static __inline__ __attribute__((always_inline)) void sk_nulls_node_init(struct hlist_nulls_node *node)
{
node->pprev = ((void *)0);
}
static __inline__ __attribute__((always_inline)) void __sk_del_node(struct sock *sk)
{
__hlist_del(&sk->__sk_common.skc_node);
}
static __inline__ __attribute__((always_inline)) int __sk_del_node_init(struct sock *sk)
{
if (sk_hashed(sk)) {
__sk_del_node(sk);
sk_node_init(&sk->__sk_common.skc_node);
return 1;
}
return 0;
}
static inline __attribute__((always_inline)) void sock_hold(struct sock *sk)
{
atomic_add(1, (&sk->__sk_common.skc_refcnt));
}
static inline __attribute__((always_inline)) void __sock_put(struct sock *sk)
{
atomic_sub(1, (&sk->__sk_common.skc_refcnt));
}
static __inline__ __attribute__((always_inline)) int sk_del_node_init(struct sock *sk)
{
int rc = __sk_del_node_init(sk);
if (rc) {
({ int __ret_warn_on = !!((*(volatile int *)&(&sk->__sk_common.skc_refcnt)->counter) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/sock.h", 451); __builtin_expect(!!(__ret_warn_on), 0); });
__sock_put(sk);
}
return rc;
}
static __inline__ __attribute__((always_inline)) int __sk_nulls_del_node_init_rcu(struct sock *sk)
{
if (sk_hashed(sk)) {
hlist_nulls_del_init_rcu(&sk->__sk_common.skc_nulls_node);
return 1;
}
return 0;
}
static __inline__ __attribute__((always_inline)) int sk_nulls_del_node_init_rcu(struct sock *sk)
{
int rc = __sk_nulls_del_node_init_rcu(sk);
if (rc) {
({ int __ret_warn_on = !!((*(volatile int *)&(&sk->__sk_common.skc_refcnt)->counter) == 1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/sock.h", 473); __builtin_expect(!!(__ret_warn_on), 0); });
__sock_put(sk);
}
return rc;
}
static __inline__ __attribute__((always_inline)) void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
hlist_add_head(&sk->__sk_common.skc_node, list);
}
static __inline__ __attribute__((always_inline)) void sk_add_node(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
__sk_add_node(sk, list);
}
static __inline__ __attribute__((always_inline)) void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
sock_hold(sk);
hlist_add_head_rcu(&sk->__sk_common.skc_node, list);
}
static __inline__ __attribute__((always_inline)) void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->__sk_common.skc_nulls_node, list);
}
static __inline__ __attribute__((always_inline)) void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
__sk_nulls_add_node_rcu(sk, list);
}
static __inline__ __attribute__((always_inline)) void __sk_del_bind_node(struct sock *sk)
{
__hlist_del(&sk->__sk_common.skc_bind_node);
}
static __inline__ __attribute__((always_inline)) void sk_add_bind_node(struct sock *sk,
struct hlist_head *list)
{
hlist_add_head(&sk->__sk_common.skc_bind_node, list);
}
# 541 "include/net/sock.h"
enum sock_flags {
SOCK_DEAD,
SOCK_DONE,
SOCK_URGINLINE,
SOCK_KEEPOPEN,
SOCK_LINGER,
SOCK_DESTROY,
SOCK_BROADCAST,
SOCK_TIMESTAMP,
SOCK_ZAPPED,
SOCK_USE_WRITE_QUEUE,
SOCK_DBG,
SOCK_RCVTSTAMP,
SOCK_RCVTSTAMPNS,
SOCK_LOCALROUTE,
SOCK_QUEUE_SHRUNK,
SOCK_TIMESTAMPING_TX_HARDWARE,
SOCK_TIMESTAMPING_TX_SOFTWARE,
SOCK_TIMESTAMPING_RX_HARDWARE,
SOCK_TIMESTAMPING_RX_SOFTWARE,
SOCK_TIMESTAMPING_SOFTWARE,
SOCK_TIMESTAMPING_RAW_HARDWARE,
SOCK_TIMESTAMPING_SYS_HARDWARE,
SOCK_FASYNC,
SOCK_RXQ_OVFL,
};
static inline __attribute__((always_inline)) void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
nsk->sk_flags = osk->sk_flags;
}
static inline __attribute__((always_inline)) void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
__set_bit(flag, &sk->sk_flags);
}
static inline __attribute__((always_inline)) void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
__clear_bit(flag, &sk->sk_flags);
}
static inline __attribute__((always_inline)) int sock_flag(struct sock *sk, enum sock_flags flag)
{
return test_bit(flag, &sk->sk_flags);
}
static inline __attribute__((always_inline)) void sk_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
}
static inline __attribute__((always_inline)) void sk_acceptq_added(struct sock *sk)
{
sk->sk_ack_backlog++;
}
static inline __attribute__((always_inline)) int sk_acceptq_is_full(struct sock *sk)
{
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
static inline __attribute__((always_inline)) int sk_stream_min_wspace(struct sock *sk)
{
return sk->sk_wmem_queued >> 1;
}
static inline __attribute__((always_inline)) int sk_stream_wspace(struct sock *sk)
{
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
extern void sk_stream_write_space(struct sock *sk);
static inline __attribute__((always_inline)) int sk_stream_memory_free(struct sock *sk)
{
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
static inline __attribute__((always_inline)) void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
sk->sk_backlog.head = skb;
else
sk->sk_backlog.tail->next = skb;
sk->sk_backlog.tail = skb;
skb->next = ((void *)0);
}
static inline __attribute__((always_inline)) int sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
unsigned int qsize = sk->sk_backlog.len + (*(volatile int *)&(&sk->sk_rmem_alloc)->counter);
return qsize + skb->truesize > sk->sk_rcvbuf;
}
static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (sk_rcvqueues_full(sk, skb))
return -132;
__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
}
static inline __attribute__((always_inline)) int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
}
static inline __attribute__((always_inline)) void sock_rps_record_flow(const struct sock *sk)
{
# 674 "include/net/sock.h"
}
static inline __attribute__((always_inline)) void sock_rps_reset_flow(const struct sock *sk)
{
# 686 "include/net/sock.h"
}
static inline __attribute__((always_inline)) void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
{
}
# 710 "include/net/sock.h"
extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);
extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
struct timewait_sock_ops;
struct inet_hashinfo;
struct raw_hashinfo;
struct proto {
void (*close)(struct sock *sk,
long timeout);
int (*connect)(struct sock *sk,
struct sockaddr *uaddr,
int addr_len);
int (*disconnect)(struct sock *sk, int flags);
struct sock * (*accept) (struct sock *sk, int flags, int *err);
int (*ioctl)(struct sock *sk, int cmd,
unsigned long arg);
int (*init)(struct sock *sk);
void (*destroy)(struct sock *sk);
void (*shutdown)(struct sock *sk, int how);
int (*setsockopt)(struct sock *sk, int level,
int optname, char *optval,
unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level,
int optname, char *optval,
int *option);
# 758 "include/net/sock.h"
int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg,
size_t len, int noblock, int flags,
int *addr_len);
int (*sendpage)(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int (*bind)(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
void (*rehash)(struct sock *sk);
int (*get_port)(struct sock *sk, unsigned short snum);
unsigned int inuse_idx;
void (*enter_memory_pressure)(struct sock *sk);
atomic_t *memory_allocated;
struct percpu_counter *sockets_allocated;
int *memory_pressure;
int *sysctl_mem;
int *sysctl_wmem;
int *sysctl_rmem;
int max_header;
int no_autobind;
struct kmem_cache *slab;
unsigned int obj_size;
int slab_flags;
struct percpu_counter *orphan_count;
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
union {
struct inet_hashinfo *hashinfo;
struct udp_table *udp_table;
struct raw_hashinfo *raw_hash;
} h;
struct module *owner;
char name[32];
struct list_head node;
};
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
# 856 "include/net/sock.h"
extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
# 869 "include/net/sock.h"
static inline __attribute__((always_inline)) void __sk_prot_rehash(struct sock *sk)
{
sk->__sk_common.skc_prot->unhash(sk);
sk->__sk_common.skc_prot->hash(sk);
}
# 891 "include/net/sock.h"
struct sock_iocb {
struct list_head list;
int flags;
int size;
struct socket *sock;
struct sock *sk;
struct scm_cookie *scm;
struct msghdr *msg, async_msg;
struct kiocb *kiocb;
};
static inline __attribute__((always_inline)) struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
{
return (struct sock_iocb *)iocb->private;
}
static inline __attribute__((always_inline)) struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
{
return si->kiocb;
}
struct socket_alloc {
struct socket socket;
struct inode vfs_inode;
};
static inline __attribute__((always_inline)) struct socket *SOCKET_I(struct inode *inode)
{
return &({ const typeof( ((struct socket_alloc *)0)->vfs_inode ) *__mptr = (inode); (struct socket_alloc *)( (char *)__mptr - __builtin_offsetof(struct socket_alloc,vfs_inode) );})->socket;
}
static inline __attribute__((always_inline)) struct inode *SOCK_INODE(struct socket *socket)
{
return &({ const typeof( ((struct socket_alloc *)0)->socket ) *__mptr = (socket); (struct socket_alloc *)( (char *)__mptr - __builtin_offsetof(struct socket_alloc,socket) );})->vfs_inode;
}
extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
extern void __sk_mem_reclaim(struct sock *sk);
static inline __attribute__((always_inline)) int sk_mem_pages(int amt)
{
return (amt + ((int)((1UL) << 12)) - 1) >> ( __builtin_constant_p(((int)((1UL) << 12))) ? ( (((int)((1UL) << 12))) < 1 ? ____ilog2_NaN() : (((int)((1UL) << 12))) & (1ULL << 63) ? 63 : (((int)((1UL) << 12))) & (1ULL << 62) ? 62 : (((int)((1UL) << 12))) & (1ULL << 61) ? 61 : (((int)((1UL) << 12))) & (1ULL << 60) ? 60 : (((int)((1UL) << 12))) & (1ULL << 59) ? 59 : (((int)((1UL) << 12))) & (1ULL << 58) ? 58 : (((int)((1UL) << 12))) & (1ULL << 57) ? 57 : (((int)((1UL) << 12))) & (1ULL << 56) ? 56 : (((int)((1UL) << 12))) & (1ULL << 55) ? 55 : (((int)((1UL) << 12))) & (1ULL << 54) ? 54 : (((int)((1UL) << 12))) & (1ULL << 53) ? 53 : (((int)((1UL) << 12))) & (1ULL << 52) ? 52 : (((int)((1UL) << 12))) & (1ULL << 51) ? 51 : (((int)((1UL) << 12))) & (1ULL << 50) ? 50 : (((int)((1UL) << 12))) & (1ULL << 49) ? 49 : (((int)((1UL) << 12))) & (1ULL << 48) ? 48 : (((int)((1UL) << 12))) & (1ULL << 47) ? 47 : (((int)((1UL) << 12))) & (1ULL << 46) ? 46 : (((int)((1UL) << 12))) & (1ULL << 45) ? 45 : (((int)((1UL) << 12))) & (1ULL << 44) ? 44 : (((int)((1UL) << 12))) & (1ULL << 43) ? 43 : (((int)((1UL) << 12))) & (1ULL << 42) ? 42 : (((int)((1UL) << 12))) & (1ULL << 41) ? 41 : (((int)((1UL) << 12))) & (1ULL << 40) ? 40 : (((int)((1UL) << 12))) & (1ULL << 39) ? 39 : (((int)((1UL) << 12))) & (1ULL << 38) ? 38 : (((int)((1UL) << 12))) & (1ULL << 37) ? 37 : (((int)((1UL) << 12))) & (1ULL << 36) ? 36 : (((int)((1UL) << 12))) & (1ULL << 35) ? 35 : (((int)((1UL) << 12))) & (1ULL << 34) ? 34 : (((int)((1UL) << 12))) & (1ULL << 33) ? 33 : (((int)((1UL) << 12))) & (1ULL << 32) ? 32 : (((int)((1UL) << 12))) & (1ULL << 31) ? 31 : (((int)((1UL) << 12))) & (1ULL << 30) ? 30 : (((int)((1UL) << 12))) & (1ULL << 29) ? 29 : (((int)((1UL) << 12))) & (1ULL << 28) ? 28 : (((int)((1UL) << 12))) & (1ULL << 27) ? 27 : (((int)((1UL) << 12))) & (1ULL << 26) ? 26 : (((int)((1UL) << 12))) & (1ULL << 25) ? 25 : (((int)((1UL) << 12))) & (1ULL << 24) ? 24 : (((int)((1UL) << 12))) & (1ULL << 23) ? 23 : (((int)((1UL) << 12))) & (1ULL << 22) ? 22 : (((int)((1UL) << 12))) & (1ULL << 21) ? 21 : (((int)((1UL) << 12))) & (1ULL << 20) ? 20 : (((int)((1UL) << 12))) & (1ULL << 19) ? 19 : (((int)((1UL) << 12))) & (1ULL << 18) ? 18 : (((int)((1UL) << 12))) & (1ULL << 17) ? 17 : (((int)((1UL) << 12))) & (1ULL << 16) ? 16 : (((int)((1UL) << 12))) & (1ULL << 15) ? 15 : (((int)((1UL) << 12))) & (1ULL << 14) ? 14 : (((int)((1UL) << 12))) & (1ULL << 13) ? 13 : (((int)((1UL) << 12))) & (1ULL << 12) ? 12 : (((int)((1UL) << 12))) & (1ULL << 11) ? 11 : (((int)((1UL) << 12))) & (1ULL << 10) ? 10 : (((int)((1UL) << 12))) & (1ULL << 9) ? 9 : (((int)((1UL) << 12))) & (1ULL << 8) ? 8 : (((int)((1UL) << 12))) & (1ULL << 7) ? 7 : (((int)((1UL) << 12))) & (1ULL << 6) ? 6 : (((int)((1UL) << 12))) & (1ULL << 5) ? 5 : (((int)((1UL) << 12))) & (1ULL << 4) ? 4 : (((int)((1UL) << 12))) & (1ULL << 3) ? 3 : (((int)((1UL) << 12))) & (1ULL << 2) ? 2 : (((int)((1UL) << 12))) & (1ULL << 1) ? 1 : (((int)((1UL) << 12))) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(((int)((1UL) << 12))) <= 4) ? __ilog2_u32(((int)((1UL) << 12))) : __ilog2_u64(((int)((1UL) << 12))) );
}
static inline __attribute__((always_inline)) int sk_has_account(struct sock *sk)
{
return !!sk->__sk_common.skc_prot->memory_allocated;
}
static inline __attribute__((always_inline)) int sk_wmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return 1;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, 0);
}
static inline __attribute__((always_inline)) int sk_rmem_schedule(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return 1;
return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, 1);
}
static inline __attribute__((always_inline)) void sk_mem_reclaim(struct sock *sk)
{
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc >= ((int)((1UL) << 12)))
__sk_mem_reclaim(sk);
}
static inline __attribute__((always_inline)) void sk_mem_reclaim_partial(struct sock *sk)
{
if (!sk_has_account(sk))
return;
if (sk->sk_forward_alloc > ((int)((1UL) << 12)))
__sk_mem_reclaim(sk);
}
static inline __attribute__((always_inline)) void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc -= size;
}
static inline __attribute__((always_inline)) void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
}
static inline __attribute__((always_inline)) void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
__kfree_skb(skb);
}
# 1038 "include/net/sock.h"
extern void lock_sock_nested(struct sock *sk, int subclass);
static inline __attribute__((always_inline)) void lock_sock(struct sock *sk)
{
lock_sock_nested(sk, 0);
}
extern void release_sock(struct sock *sk);
# 1054 "include/net/sock.h"
extern int lock_sock_fast(struct sock *sk);
# 1063 "include/net/sock.h"
static inline __attribute__((always_inline)) void unlock_sock_fast(struct sock *sk, int slow)
{
if (slow)
release_sock(sk);
else
spin_unlock_bh(&sk->sk_lock.slock);
}
extern struct sock *sk_alloc(struct net *net, int family,
gfp_t priority,
struct proto *prot);
extern void sk_free(struct sock *sk);
extern void sk_release_kernel(struct sock *sk);
extern struct sock *sk_clone(const struct sock *sk,
const gfp_t priority);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
gfp_t priority);
extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
extern int sock_setsockopt(struct socket *sock, int level,
int op, char *optval,
unsigned int optlen);
extern int sock_getsockopt(struct socket *sock, int level,
int op, char *optval,
int *optlen);
extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
unsigned long size,
int noblock,
int *errcode);
extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
unsigned long header_len,
unsigned long data_len,
int noblock,
int *errcode);
extern void *sock_kmalloc(struct sock *sk, int size,
gfp_t priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
extern void sk_send_sigurg(struct sock *sk);
extern void sock_update_classid(struct sock *sk);
# 1122 "include/net/sock.h"
extern int sock_no_bind(struct socket *,
struct sockaddr *, int);
extern int sock_no_connect(struct socket *,
struct sockaddr *, int, int);
extern int sock_no_socketpair(struct socket *,
struct socket *);
extern int sock_no_accept(struct socket *,
struct socket *, int);
extern int sock_no_getname(struct socket *,
struct sockaddr *, int *, int);
extern unsigned int sock_no_poll(struct file *, struct socket *,
struct poll_table_struct *);
extern int sock_no_ioctl(struct socket *, unsigned int,
unsigned long);
extern int sock_no_listen(struct socket *, int);
extern int sock_no_shutdown(struct socket *, int);
extern int sock_no_getsockopt(struct socket *, int , int,
char *, int *);
extern int sock_no_setsockopt(struct socket *, int, int,
char *, unsigned int);
extern int sock_no_sendmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t);
extern int sock_no_recvmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t, int);
extern int sock_no_mmap(struct file *file,
struct socket *sock,
struct vm_area_struct *vma);
extern ssize_t sock_no_sendpage(struct socket *sock,
struct page *page,
int offset, size_t size,
int flags);
extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen);
extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags);
extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen);
extern int compat_sock_common_getsockopt(struct socket *sock, int level,
int optname, char *optval, int *optlen);
extern int compat_sock_common_setsockopt(struct socket *sock, int level,
int optname, char *optval, unsigned int optlen);
extern void sk_common_release(struct sock *sk);
extern void sock_init_data(struct socket *sock, struct sock *sk);
# 1185 "include/net/sock.h"
static inline __attribute__((always_inline)) void sk_filter_release(struct sk_filter *fp)
{
if ((atomic_sub_return(1, (&fp->refcnt)) == 0))
kfree(fp);
}
static inline __attribute__((always_inline)) void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
unsigned int size = sk_filter_len(fp);
atomic_sub(size, &sk->sk_omem_alloc);
sk_filter_release(fp);
}
static inline __attribute__((always_inline)) void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
atomic_add(1, (&fp->refcnt));
atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
}
# 1231 "include/net/sock.h"
static inline __attribute__((always_inline)) void sock_put(struct sock *sk)
{
if ((atomic_sub_return(1, (&sk->__sk_common.skc_refcnt)) == 0))
sk_free(sk);
}
extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested);
static inline __attribute__((always_inline)) void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
sk->__sk_common.skc_tx_queue_mapping = tx_queue;
}
static inline __attribute__((always_inline)) void sk_tx_queue_clear(struct sock *sk)
{
sk->__sk_common.skc_tx_queue_mapping = -1;
}
static inline __attribute__((always_inline)) int sk_tx_queue_get(const struct sock *sk)
{
return sk ? sk->__sk_common.skc_tx_queue_mapping : -1;
}
static inline __attribute__((always_inline)) void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk_tx_queue_clear(sk);
sk->sk_socket = sock;
}
static inline __attribute__((always_inline)) wait_queue_head_t *sk_sleep(struct sock *sk)
{
return &sk->sk_wq->wait;
}
static inline __attribute__((always_inline)) void sock_orphan(struct sock *sk)
{
_raw_write_lock_bh(&sk->sk_callback_lock);
sock_set_flag(sk, SOCK_DEAD);
sk_set_socket(sk, ((void *)0));
sk->sk_wq = ((void *)0);
_raw_write_unlock_bh(&sk->sk_callback_lock);
}
static inline __attribute__((always_inline)) void sock_graft(struct sock *sk, struct socket *parent)
{
_raw_write_lock_bh(&sk->sk_callback_lock);
({ if (!__builtin_constant_p(parent->wq) || ((parent->wq) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (sk->sk_wq) = (parent->wq); });
parent->sk = sk;
sk_set_socket(sk, parent);
security_sock_graft(sk, parent);
_raw_write_unlock_bh(&sk->sk_callback_lock);
}
extern int sock_i_uid(struct sock *sk);
extern unsigned long sock_i_ino(struct sock *sk);
static inline __attribute__((always_inline)) struct dst_entry *
__sk_dst_get(struct sock *sk)
{
return ({ typeof(sk->sk_dst_cache) _________p1 = (*(volatile typeof(sk->sk_dst_cache) *)&(sk->sk_dst_cache)); do { } while(0); (_________p1); })
;
}
static inline __attribute__((always_inline)) struct dst_entry *
sk_dst_get(struct sock *sk)
{
struct dst_entry *dst;
rcu_read_lock();
dst = ({ typeof(sk->sk_dst_cache) _________p1 = (*(volatile typeof(sk->sk_dst_cache) *)&(sk->sk_dst_cache)); do { } while(0); (_________p1); });
if (dst)
dst_hold(dst);
rcu_read_unlock();
return dst;
}
extern void sk_reset_txq(struct sock *sk);
static inline __attribute__((always_inline)) void dst_negative_advice(struct sock *sk)
{
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
if (dst && dst->ops->negative_advice) {
ndst = dst->ops->negative_advice(dst);
if (ndst != dst) {
({ if (!__builtin_constant_p(ndst) || ((ndst) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (sk->sk_dst_cache) = (ndst); });
sk_reset_txq(sk);
}
}
}
static inline __attribute__((always_inline)) void
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old_dst;
sk_tx_queue_clear(sk);
old_dst = ({ typeof(sk->sk_dst_cache) _________p1 = (*(volatile typeof(sk->sk_dst_cache) *)&(sk->sk_dst_cache)); do { } while(0); (_________p1); });
({ if (!__builtin_constant_p(dst) || ((dst) != ((void *)0))) __asm__ __volatile__("": : :"memory"); (sk->sk_dst_cache) = (dst); });
dst_release(old_dst);
}
static inline __attribute__((always_inline)) void
sk_dst_set(struct sock *sk, struct dst_entry *dst)
{
spin_lock(&sk->sk_dst_lock);
__sk_dst_set(sk, dst);
spin_unlock(&sk->sk_dst_lock);
}
static inline __attribute__((always_inline)) void
__sk_dst_reset(struct sock *sk)
{
__sk_dst_set(sk, ((void *)0));
}
static inline __attribute__((always_inline)) void
sk_dst_reset(struct sock *sk)
{
spin_lock(&sk->sk_dst_lock);
__sk_dst_reset(sk);
spin_unlock(&sk->sk_dst_lock);
}
extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline __attribute__((always_inline)) int sk_can_gso(const struct sock *sk)
{
return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
}
extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
static inline __attribute__((always_inline)) void sk_nocaps_add(struct sock *sk, int flags)
{
sk->sk_route_nocaps |= flags;
sk->sk_route_caps &= ~flags;
}
static inline __attribute__((always_inline)) int skb_copy_to_page(struct sock *sk, char *from,
struct sk_buff *skb, struct page *page,
int off, int copy)
{
if (skb->ip_summed == 0) {
int err = 0;
__wsum csum = csum_and_copy_from_user(from,
lowmem_page_address(page) + off,
copy, 0, &err);
if (err)
return err;
skb->csum = csum_block_add(skb->csum, csum, skb->len);
} else if (({ void *__cu_to; const void *__cu_from; long __cu_len; __cu_to = (lowmem_page_address(page) + off); __cu_from = (from); __cu_len = (copy); if (__builtin_expect(!!(({ unsigned long __addr = (unsigned long) ((__cu_from)); unsigned long __size = (__cu_len); unsigned long __mask = (__current_thread_info->addr_limit).seg; unsigned long __ok; (void)0; __ok = (signed long)(__mask & (__addr | (__addr + __size) | ((__builtin_constant_p(__size) && (signed long) (__size) > 0) ? 0 : (__size)))); __ok == 0; })), 1)) { might_fault(); __cu_len = ({ register void *__cu_to_r __asm__("$4"); register const void *__cu_from_r __asm__("$5"); register long __cu_len_r __asm__("$6"); __cu_to_r = (__cu_to); __cu_from_r = (__cu_from); __cu_len_r = (__cu_len); __asm__ __volatile__( ".set\tnoreorder\n\t" ".set\tnoat\n\t" "la" "\t$1, " "__copy_user" "\n\t" "jalr\t$1\n\t" ".set\tat\n\t" ".set\tnoat\n\t" "addu" "\t$1, %1, %2\n\t" ".set\tat\n\t" ".set\treorder" : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) : : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", "$0", "memory"); __cu_len_r; }); } __cu_len; }))
return -14;
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
sk->sk_wmem_queued += copy;
sk_mem_charge(sk, copy);
return 0;
}
static inline __attribute__((always_inline)) int sk_wmem_alloc_get(const struct sock *sk)
{
return (*(volatile int *)&(&sk->sk_wmem_alloc)->counter) - 1;
}
static inline __attribute__((always_inline)) int sk_rmem_alloc_get(const struct sock *sk)
{
return (*(volatile int *)&(&sk->sk_rmem_alloc)->counter);
}
static inline __attribute__((always_inline)) int sk_has_allocations(const struct sock *sk)
{
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
# 1473 "include/net/sock.h"
static inline __attribute__((always_inline)) int wq_has_sleeper(struct socket_wq *wq)
{
__asm__ __volatile__("": : :"memory");
return wq && waitqueue_active(&wq->wait);
}
# 1494 "include/net/sock.h"
static inline __attribute__((always_inline)) void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
{
if (p && wait_address) {
poll_wait(filp, wait_address, p);
__asm__ __volatile__("": : :"memory");
}
}
# 1518 "include/net/sock.h"
static inline __attribute__((always_inline)) void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_wfree;
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
}
static inline __attribute__((always_inline)) void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
unsigned long expires);
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
static inline __attribute__((always_inline)) int sock_error(struct sock *sk)
{
int err;
if (__builtin_expect(!!(!sk->sk_err), 1))
return 0;
err = ({ ((void)(sizeof(struct { int:-!!(sizeof(*(&sk->sk_err)) & ~0xc); }))); ((__typeof__(*(&sk->sk_err))) __xchg((unsigned long)(0), (&sk->sk_err), sizeof(*(&sk->sk_err)))); });
return -err;
}
static inline __attribute__((always_inline)) unsigned long sock_wspace(struct sock *sk)
{
int amt = 0;
if (!(sk->sk_shutdown & 2)) {
amt = sk->sk_sndbuf - (*(volatile int *)&(&sk->sk_wmem_alloc)->counter);
if (amt < 0)
amt = 0;
}
return amt;
}
static inline __attribute__((always_inline)) void sk_wake_async(struct sock *sk, int how, int band)
{
if (sock_flag(sk, SOCK_FASYNC))
sock_wake_async(sk->sk_socket, how, band);
}
static inline __attribute__((always_inline)) void sk_stream_moderate_sndbuf(struct sock *sk)
{
if (!(sk->sk_userlocks & 1)) {
sk->sk_sndbuf = ({ typeof(sk->sk_sndbuf) _min1 = (sk->sk_sndbuf); typeof(sk->sk_wmem_queued >> 1) _min2 = (sk->sk_wmem_queued >> 1); (void) (&_min1 == &_min2); _min1 < _min2 ? _min1 : _min2; });
sk->sk_sndbuf = ({ typeof(sk->sk_sndbuf) _max1 = (sk->sk_sndbuf); typeof(2048) _max2 = (2048); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; });
}
}
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
static inline __attribute__((always_inline)) struct page *sk_stream_alloc_page(struct sock *sk)
{
struct page *page = ((void *)0);
page = alloc_pages_node(numa_node_id(), sk->sk_allocation, 0);
if (!page) {
sk->__sk_common.skc_prot->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
}
return page;
}
static inline __attribute__((always_inline)) int sock_writeable(const struct sock *sk)
{
return (*(volatile int *)&(&sk->sk_wmem_alloc)->counter) < (sk->sk_sndbuf >> 1);
}
static inline __attribute__((always_inline)) gfp_t gfp_any(void)
{
return (((__current_thread_info->preempt_count) & (((1UL << (8))-1) << (0 + 8)))) ? ((( gfp_t)0x20u)) : ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u));
}
static inline __attribute__((always_inline)) long sock_rcvtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_rcvtimeo;
}
static inline __attribute__((always_inline)) long sock_sndtimeo(const struct sock *sk, int noblock)
{
return noblock ? 0 : sk->sk_sndtimeo;
}
static inline __attribute__((always_inline)) int sock_rcvlowat(const struct sock *sk, int waitall, int len)
{
return (waitall ? len : ({ int __min1 = (sk->sk_rcvlowat); int __min2 = (len); __min1 < __min2 ? __min1: __min2; })) ? : 1;
}
static inline __attribute__((always_inline)) int sock_intr_errno(long timeo)
{
return timeo == ((long)(~0UL>>1)) ? -512 : -4;
}
extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
static __inline__ __attribute__((always_inline)) void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
# 1660 "include/net/sock.h"
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
(kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
(hwtstamps->hwtstamp.tv64 &&
sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
(hwtstamps->syststamp.tv64 &&
sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sk->sk_stamp = kt;
}
extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
static inline __attribute__((always_inline)) void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
if (sk->sk_flags & ((1UL << SOCK_RXQ_OVFL) | (1UL << SOCK_RCVTSTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | (1UL << SOCK_TIMESTAMPING_SOFTWARE) | (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)))
__sock_recv_ts_and_drops(msg, sk, skb);
else
sk->sk_stamp = skb->tstamp;
}
# 1700 "include/net/sock.h"
extern int sock_tx_timestamp(struct msghdr *msg,
struct sock *sk,
union skb_shared_tx *shtx);
# 1724 "include/net/sock.h"
static inline __attribute__((always_inline)) void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
{
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
static inline __attribute__((always_inline))
struct net *sock_net(const struct sock *sk)
{
return (&init_net);
}
static inline __attribute__((always_inline))
void sock_net_set(struct sock *sk, struct net *net)
{
do { (void)(net);} while (0);
}
static inline __attribute__((always_inline)) void sk_change_net(struct sock *sk, struct net *net)
{
put_net(sock_net(sk));
sock_net_set(sk, hold_net(net));
}
static inline __attribute__((always_inline)) struct sock *skb_steal_sock(struct sk_buff *skb)
{
if (__builtin_expect(!!(skb->sk), 0)) {
struct sock *sk = skb->sk;
skb->destructor = ((void *)0);
skb->sk = ((void *)0);
return sk;
}
return ((void *)0);
}
extern void sock_enable_timestamp(struct sock *sk, int flag);
extern int sock_get_timestamp(struct sock *, struct timeval *);
extern int sock_get_timestampns(struct sock *, struct timespec *);
extern int net_msg_warn;
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
extern void sk_init(void);
extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
# 210 "include/linux/tcp.h" 2
# 1 "include/net/inet_connection_sock.h" 1
# 23 "include/net/inet_connection_sock.h"
# 1 "include/net/inet_sock.h" 1
# 23 "include/net/inet_sock.h"
# 1 "include/linux/jhash.h" 1
# 44 "include/linux/jhash.h"
static inline __attribute__((always_inline)) u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c, len;
const u8 *k = key;
len = length;
a = b = 0x9e3779b9;
c = initval;
while (len >= 12) {
a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
{ a -= b; a -= c; a ^= (c>>13); b -= c; b -= a; b ^= (a<<8); c -= a; c -= b; c ^= (b>>13); a -= b; a -= c; a ^= (c>>12); b -= c; b -= a; b ^= (a<<16); c -= a; c -= b; c ^= (b>>5); a -= b; a -= c; a ^= (c>>3); b -= c; b -= a; b ^= (a<<10); c -= a; c -= b; c ^= (b>>15); };
k += 12;
len -= 12;
}
c += length;
switch (len) {
case 11: c += ((u32)k[10]<<24);
case 10: c += ((u32)k[9]<<16);
case 9 : c += ((u32)k[8]<<8);
case 8 : b += ((u32)k[7]<<24);
case 7 : b += ((u32)k[6]<<16);
case 6 : b += ((u32)k[5]<<8);
case 5 : b += k[4];
case 4 : a += ((u32)k[3]<<24);
case 3 : a += ((u32)k[2]<<16);
case 2 : a += ((u32)k[1]<<8);
case 1 : a += k[0];
};
{ a -= b; a -= c; a ^= (c>>13); b -= c; b -= a; b ^= (a<<8); c -= a; c -= b; c ^= (b>>13); a -= b; a -= c; a ^= (c>>12); b -= c; b -= a; b ^= (a<<16); c -= a; c -= b; c ^= (b>>5); a -= b; a -= c; a ^= (c>>3); b -= c; b -= a; b ^= (a<<10); c -= a; c -= b; c ^= (b>>15); };
return c;
}
static inline __attribute__((always_inline)) u32 jhash2(const u32 *k, u32 length, u32 initval)
{
u32 a, b, c, len;
a = b = 0x9e3779b9;
c = initval;
len = length;
while (len >= 3) {
a += k[0];
b += k[1];
c += k[2];
{ a -= b; a -= c; a ^= (c>>13); b -= c; b -= a; b ^= (a<<8); c -= a; c -= b; c ^= (b>>13); a -= b; a -= c; a ^= (c>>12); b -= c; b -= a; b ^= (a<<16); c -= a; c -= b; c ^= (b>>5); a -= b; a -= c; a ^= (c>>3); b -= c; b -= a; b ^= (a<<10); c -= a; c -= b; c ^= (b>>15); };
k += 3; len -= 3;
}
c += length * 4;
switch (len) {
case 2 : b += k[1];
case 1 : a += k[0];
};
{ a -= b; a -= c; a ^= (c>>13); b -= c; b -= a; b ^= (a<<8); c -= a; c -= b; c ^= (b>>13); a -= b; a -= c; a ^= (c>>12); b -= c; b -= a; b ^= (a<<16); c -= a; c -= b; c ^= (b>>5); a -= b; a -= c; a ^= (c>>3); b -= c; b -= a; b ^= (a<<10); c -= a; c -= b; c ^= (b>>15); };
return c;
}
# 122 "include/linux/jhash.h"
static inline __attribute__((always_inline)) u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
{
a += 0x9e3779b9;
b += 0x9e3779b9;
c += initval;
{ a -= b; a -= c; a ^= (c>>13); b -= c; b -= a; b ^= (a<<8); c -= a; c -= b; c ^= (b>>13); a -= b; a -= c; a ^= (c>>12); b -= c; b -= a; b ^= (a<<16); c -= a; c -= b; c ^= (b>>5); a -= b; a -= c; a ^= (c>>3); b -= c; b -= a; b ^= (a<<10); c -= a; c -= b; c ^= (b>>15); };
return c;
}
static inline __attribute__((always_inline)) u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return jhash_3words(a, b, 0, initval);
}
static inline __attribute__((always_inline)) u32 jhash_1word(u32 a, u32 initval)
{
return jhash_3words(a, 0, 0, initval);
}
# 24 "include/net/inet_sock.h" 2
# 1 "include/net/request_sock.h" 1
# 25 "include/net/request_sock.h"
struct request_sock;
struct sk_buff;
struct dst_entry;
struct proto;
struct request_values {
};
struct request_sock_ops {
int family;
int obj_size;
struct kmem_cache *slab;
char *slab_name;
int (*rtx_syn_ack)(struct sock *sk,
struct request_sock *req,
struct request_values *rvp);
void (*send_ack)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
void (*send_reset)(struct sock *sk,
struct sk_buff *skb);
void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(struct sock *sk,
struct request_sock *req);
};
struct request_sock {
struct request_sock *dl_next;
u16 mss;
u8 retrans;
u8 cookie_ts;
u32 window_clamp;
u32 rcv_wnd;
u32 ts_recent;
unsigned long expires;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
u32 secid;
u32 peer_secid;
};
static inline __attribute__((always_inline)) struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
{
struct request_sock *req = kmem_cache_alloc(ops->slab, ((( gfp_t)0x20u)));
if (req != ((void *)0))
req->rsk_ops = ops;
return req;
}
static inline __attribute__((always_inline)) void __reqsk_free(struct request_sock *req)
{
kmem_cache_free(req->rsk_ops->slab, req);
}
static inline __attribute__((always_inline)) void reqsk_free(struct request_sock *req)
{
req->rsk_ops->destructor(req);
__reqsk_free(req);
}
extern int sysctl_max_syn_backlog;
struct listen_sock {
u8 max_qlen_log;
int qlen;
int qlen_young;
int clock_hand;
u32 hash_rnd;
u32 nr_table_entries;
struct request_sock *syn_table[0];
};
# 124 "include/net/request_sock.h"
struct request_sock_queue {
struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail;
rwlock_t syn_wait_lock;
u8 rskq_defer_accept;
struct listen_sock *listen_opt;
};
extern int reqsk_queue_alloc(struct request_sock_queue *queue,
unsigned int nr_table_entries);
extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
extern void reqsk_queue_destroy(struct request_sock_queue *queue);
static inline __attribute__((always_inline)) struct request_sock *
reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
{
struct request_sock *req = queue->rskq_accept_head;
queue->rskq_accept_head = ((void *)0);
return req;
}
static inline __attribute__((always_inline)) int reqsk_queue_empty(struct request_sock_queue *queue)
{
return queue->rskq_accept_head == ((void *)0);
}
static inline __attribute__((always_inline)) void reqsk_queue_unlink(struct request_sock_queue *queue,
struct request_sock *req,
struct request_sock **prev_req)
{
_raw_write_lock(&queue->syn_wait_lock);
*prev_req = req->dl_next;
__raw_write_unlock(&queue->syn_wait_lock);
}
static inline __attribute__((always_inline)) void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
struct sock *child)
{
req->sk = child;
sk_acceptq_added(parent);
if (queue->rskq_accept_head == ((void *)0))
queue->rskq_accept_head = req;
else
queue->rskq_accept_tail->dl_next = req;
queue->rskq_accept_tail = req;
req->dl_next = ((void *)0);
}
static inline __attribute__((always_inline)) struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
{
struct request_sock *req = queue->rskq_accept_head;
({ int __ret_warn_on = !!(req == ((void *)0)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/request_sock.h", 183); __builtin_expect(!!(__ret_warn_on), 0); });
queue->rskq_accept_head = req->dl_next;
if (queue->rskq_accept_head == ((void *)0))
queue->rskq_accept_tail = ((void *)0);
return req;
}
static inline __attribute__((always_inline)) struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
struct sock *parent)
{
struct request_sock *req = reqsk_queue_remove(queue);
struct sock *child = req->sk;
({ int __ret_warn_on = !!(child == ((void *)0)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/request_sock.h", 198); __builtin_expect(!!(__ret_warn_on), 0); });
sk_acceptq_removed(parent);
__reqsk_free(req);
return child;
}
static inline __attribute__((always_inline)) int reqsk_queue_removed(struct request_sock_queue *queue,
struct request_sock *req)
{
struct listen_sock *lopt = queue->listen_opt;
if (req->retrans == 0)
--lopt->qlen_young;
return --lopt->qlen;
}
static inline __attribute__((always_inline)) int reqsk_queue_added(struct request_sock_queue *queue)
{
struct listen_sock *lopt = queue->listen_opt;
const int prev_qlen = lopt->qlen;
lopt->qlen_young++;
lopt->qlen++;
return prev_qlen;
}
static inline __attribute__((always_inline)) int reqsk_queue_len(const struct request_sock_queue *queue)
{
return queue->listen_opt != ((void *)0) ? queue->listen_opt->qlen : 0;
}
static inline __attribute__((always_inline)) int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
return queue->listen_opt->qlen_young;
}
static inline __attribute__((always_inline)) int reqsk_queue_is_full(const struct request_sock_queue *queue)
{
return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
}
static inline __attribute__((always_inline)) void reqsk_queue_hash_req(struct request_sock_queue *queue,
u32 hash, struct request_sock *req,
unsigned long timeout)
{
struct listen_sock *lopt = queue->listen_opt;
req->expires = jiffies + timeout;
req->retrans = 0;
req->sk = ((void *)0);
req->dl_next = lopt->syn_table[hash];
_raw_write_lock(&queue->syn_wait_lock);
lopt->syn_table[hash] = req;
__raw_write_unlock(&queue->syn_wait_lock);
}
# 29 "include/net/inet_sock.h" 2
# 1 "include/net/netns/hash.h" 1
struct net;
static inline __attribute__((always_inline)) unsigned net_hash_mix(struct net *net)
{
# 18 "include/net/netns/hash.h"
return 0;
}
# 30 "include/net/inet_sock.h" 2
# 42 "include/net/inet_sock.h"
struct ip_options {
__be32 faddr;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
unsigned char ts;
unsigned char is_strictroute:1,
srr_is_hit:1,
is_changed:1,
rr_needaddr:1,
ts_needtime:1,
ts_needaddr:1;
unsigned char router_alert;
unsigned char cipso;
unsigned char __pad2;
unsigned char __data[0];
};
struct inet_request_sock {
struct request_sock req;
u16 inet6_rsk_offset;
__be16 loc_port;
__be32 loc_addr;
__be32 rmt_addr;
__be16 rmt_port;
;
u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
wscale_ok : 1,
ecn_ok : 1,
acked : 1,
no_srccheck: 1;
;
struct ip_options *opt;
};
static inline __attribute__((always_inline)) struct inet_request_sock *inet_rsk(const struct request_sock *sk)
{
return (struct inet_request_sock *)sk;
}
struct ip_mc_socklist;
struct ipv6_pinfo;
struct rtable;
# 112 "include/net/inet_sock.h"
struct inet_sock {
struct sock sk;
struct ipv6_pinfo *pinet6;
__be32 inet_daddr;
__be32 inet_rcv_saddr;
__be16 inet_dport;
__u16 inet_num;
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
__be16 inet_sport;
__u16 inet_id;
struct ip_options *opt;
__u8 tos;
__u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
__u8 recverr:1,
is_icsk:1,
freebind:1,
hdrincl:1,
mc_loop:1,
transparent:1,
mc_all:1,
nodefrag:1;
int mc_index;
__be32 mc_addr;
struct ip_mc_socklist *mc_list;
struct {
unsigned int flags;
unsigned int fragsize;
struct ip_options *opt;
struct dst_entry *dst;
int length;
__be32 addr;
struct flowi fl;
} cork;
};
static inline __attribute__((always_inline)) struct inet_sock *inet_sk(const struct sock *sk)
{
return (struct inet_sock *)sk;
}
static inline __attribute__((always_inline)) void __inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from,
const int ancestor_size)
{
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->__sk_common.skc_prot->obj_size - ancestor_size);
}
# 179 "include/net/inet_sock.h"
extern int inet_sk_rebuild_header(struct sock *sk);
extern u32 inet_ehash_secret;
extern void build_ehash_secret(void);
static inline __attribute__((always_inline)) unsigned int inet_ehashfn(struct net *net,
const __be32 laddr, const __u16 lport,
const __be32 faddr, const __be16 fport)
{
return jhash_3words(( __u32) laddr,
( __u32) faddr,
((__u32) lport) << 16 | ( __u32)fport,
inet_ehash_secret + net_hash_mix(net));
}
static inline __attribute__((always_inline)) int inet_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const __be32 laddr = inet->inet_rcv_saddr;
const __u16 lport = inet->inet_num;
const __be32 faddr = inet->inet_daddr;
const __be16 fport = inet->inet_dport;
struct net *net = sock_net(sk);
return inet_ehashfn(net, laddr, lport, faddr, fport);
}
static inline __attribute__((always_inline)) struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
struct inet_request_sock *ireq = inet_rsk(req);
if (req != ((void *)0)) {
do { } while (0);
ireq->opt = ((void *)0);
}
return req;
}
static inline __attribute__((always_inline)) __u8 inet_sk_flowi_flags(const struct sock *sk)
{
return inet_sk(sk)->transparent ? 0x01 : 0;
}
# 24 "include/net/inet_connection_sock.h" 2
struct inet_bind_bucket;
struct tcp_congestion_ops;
struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
int (*remember_stamp)(struct sock *sk);
u16 net_header_len;
u16 sockaddr_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
char *optval, unsigned int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char *optval, int *optlen);
# 61 "include/net/inet_connection_sock.h"
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int (*bind_conflict)(const struct sock *sk,
const struct inet_bind_bucket *tb);
};
# 86 "include/net/inet_connection_sock.h"
struct inet_connection_sock {
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
__u8 icsk_ca_state;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
__u8 icsk_syn_retries;
__u8 icsk_probes_out;
__u16 icsk_ext_hdr_len;
struct {
__u8 pending;
__u8 quick;
__u8 pingpong;
__u8 blocked;
__u32 ato;
unsigned long timeout;
__u32 lrcvtime;
__u16 last_seg_size;
__u16 rcv_mss;
} icsk_ack;
struct {
int enabled;
int search_high;
int search_low;
int probe_size;
} icsk_mtup;
u32 icsk_ca_priv[16];
};
static inline __attribute__((always_inline)) struct inet_connection_sock *inet_csk(const struct sock *sk)
{
return (struct inet_connection_sock *)sk;
}
static inline __attribute__((always_inline)) void *inet_csk_ca(const struct sock *sk)
{
return (void *)inet_csk(sk)->icsk_ca_priv;
}
extern struct sock *inet_csk_clone(struct sock *sk,
const struct request_sock *req,
const gfp_t priority);
enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
ICSK_ACK_PUSHED2 = 8
};
extern void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long));
extern void inet_csk_clear_xmit_timers(struct sock *sk);
static inline __attribute__((always_inline)) void inet_csk_schedule_ack(struct sock *sk)
{
inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
}
static inline __attribute__((always_inline)) int inet_csk_ack_scheduled(const struct sock *sk)
{
return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
}
static inline __attribute__((always_inline)) void inet_csk_delack_init(struct sock *sk)
{
memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
}
extern void inet_csk_delete_keepalive_timer(struct sock *sk);
extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
extern const char inet_csk_timer_bug_msg[];
static inline __attribute__((always_inline)) void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (what == 1 || what == 3) {
icsk->icsk_pending = 0;
} else if (what == 2) {
icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
}
else {
({ if (0) printk("<7>" "%s", inet_csk_timer_bug_msg); 0; });
}
}
static inline __attribute__((always_inline)) void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
unsigned long when,
const unsigned long max_when)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (when > max_when) {
({ if (0) printk("<7>" "reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, ({ __label__ _l; _l: &&_l;})); 0; })
;
when = max_when;
}
if (what == 1 || what == 3) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
} else if (what == 2) {
icsk->icsk_ack.pending |= ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
}
else {
({ if (0) printk("<7>" "%s", inet_csk_timer_bug_msg); 0; });
}
}
extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
extern struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp,
const __be16 rport,
const __be32 raddr,
const __be32 laddr);
extern int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb);
extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req);
static inline __attribute__((always_inline)) void inet_csk_reqsk_queue_add(struct sock *sk,
struct request_sock *req,
struct sock *child)
{
reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
}
extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
struct request_sock *req,
unsigned long timeout);
static inline __attribute__((always_inline)) void inet_csk_reqsk_queue_removed(struct sock *sk,
struct request_sock *req)
{
if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0)
inet_csk_delete_keepalive_timer(sk);
}
static inline __attribute__((always_inline)) void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{
if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0)
inet_csk_reset_keepalive_timer(sk, timeout);
}
static inline __attribute__((always_inline)) int inet_csk_reqsk_queue_len(const struct sock *sk)
{
return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
}
static inline __attribute__((always_inline)) int inet_csk_reqsk_queue_young(const struct sock *sk)
{
return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
}
static inline __attribute__((always_inline)) int inet_csk_reqsk_queue_is_full(const struct sock *sk)
{
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
}
static inline __attribute__((always_inline)) void inet_csk_reqsk_queue_unlink(struct sock *sk,
struct request_sock *req,
struct request_sock **prev)
{
reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev);
}
static inline __attribute__((always_inline)) void inet_csk_reqsk_queue_drop(struct sock *sk,
struct request_sock *req,
struct request_sock **prev)
{
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
reqsk_free(req);
}
extern void inet_csk_reqsk_queue_prune(struct sock *parent,
const unsigned long interval,
const unsigned long timeout,
const unsigned long max_rto);
extern void inet_csk_destroy_sock(struct sock *sk);
static inline __attribute__((always_inline)) unsigned int inet_csk_listen_poll(const struct sock *sk)
{
return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
(0x0001 | 0x0040) : 0;
}
extern int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
extern void inet_csk_listen_stop(struct sock *sk);
extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
char *optval, int *optlen);
extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char *optval, unsigned int optlen);
# 211 "include/linux/tcp.h" 2
# 1 "include/net/inet_timewait_sock.h" 1
# 28 "include/net/inet_timewait_sock.h"
# 1 "include/net/tcp_states.h" 1
# 16 "include/net/tcp_states.h"
enum {
TCP_ESTABLISHED = 1,
TCP_SYN_SENT,
TCP_SYN_RECV,
TCP_FIN_WAIT1,
TCP_FIN_WAIT2,
TCP_TIME_WAIT,
TCP_CLOSE,
TCP_CLOSE_WAIT,
TCP_LAST_ACK,
TCP_LISTEN,
TCP_CLOSING,
TCP_MAX_STATES
};
enum {
TCPF_ESTABLISHED = (1 << 1),
TCPF_SYN_SENT = (1 << 2),
TCPF_SYN_RECV = (1 << 3),
TCPF_FIN_WAIT1 = (1 << 4),
TCPF_FIN_WAIT2 = (1 << 5),
TCPF_TIME_WAIT = (1 << 6),
TCPF_CLOSE = (1 << 7),
TCPF_CLOSE_WAIT = (1 << 8),
TCPF_LAST_ACK = (1 << 9),
TCPF_LISTEN = (1 << 10),
TCPF_CLOSING = (1 << 11)
};
# 29 "include/net/inet_timewait_sock.h" 2
# 1 "include/net/timewait_sock.h" 1
# 17 "include/net/timewait_sock.h"
struct timewait_sock_ops {
struct kmem_cache *twsk_slab;
char *twsk_slab_name;
unsigned int twsk_obj_size;
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
void (*twsk_destructor)(struct sock *sk);
};
static inline __attribute__((always_inline)) int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
if (sk->__sk_common.skc_prot->twsk_prot->twsk_unique != ((void *)0))
return sk->__sk_common.skc_prot->twsk_prot->twsk_unique(sk, sktw, twp);
return 0;
}
static inline __attribute__((always_inline)) void twsk_destructor(struct sock *sk)
{
__BUG_ON((unsigned long)(sk == ((void *)0)));
__BUG_ON((unsigned long)(sk->__sk_common.skc_prot == ((void *)0)));
__BUG_ON((unsigned long)(sk->__sk_common.skc_prot->twsk_prot == ((void *)0)));
if (sk->__sk_common.skc_prot->twsk_prot->twsk_destructor != ((void *)0))
sk->__sk_common.skc_prot->twsk_prot->twsk_destructor(sk);
}
# 30 "include/net/inet_timewait_sock.h" 2
struct inet_hashinfo;
# 67 "include/net/inet_timewait_sock.h"
struct inet_timewait_death_row {
int twcal_hand;
unsigned long twcal_jiffie;
struct timer_list twcal_timer;
struct hlist_head twcal_row[(1 << 5)];
spinlock_t death_lock;
int tw_count;
int period;
u32 thread_slots;
struct work_struct twkill_work;
struct timer_list tw_timer;
int slot;
struct hlist_head cells[8];
struct inet_hashinfo *hashinfo;
int sysctl_tw_recycle;
int sysctl_max_tw_buckets;
};
extern void inet_twdr_hangman(unsigned long data);
extern void inet_twdr_twkill_work(struct work_struct *work);
extern void inet_twdr_twcal_tick(unsigned long data);
struct inet_bind_bucket;
struct inet_timewait_sock {
struct sock_common __tw_common;
# 120 "include/net/inet_timewait_sock.h"
int tw_timeout;
volatile unsigned char tw_substate;
unsigned char tw_rcv_wscale;
__be16 tw_sport;
__be32 tw_daddr __attribute__((aligned(4)));
__be32 tw_rcv_saddr;
__be16 tw_dport;
__u16 tw_num;
;
unsigned int tw_ipv6only : 1,
tw_transparent : 1,
tw_pad : 14,
tw_ipv6_offset : 16;
;
unsigned long tw_ttd;
struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
};
static inline __attribute__((always_inline)) void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&tw->__tw_common.skc_nulls_node, list);
}
static inline __attribute__((always_inline)) void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->__tw_common.skc_bind_node, list);
}
static inline __attribute__((always_inline)) int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
{
return !hlist_unhashed(&tw->tw_death_node);
}
static inline __attribute__((always_inline)) void inet_twsk_dead_node_init(struct inet_timewait_sock *tw)
{
tw->tw_death_node.pprev = ((void *)0);
}
static inline __attribute__((always_inline)) void __inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
{
__hlist_del(&tw->tw_death_node);
inet_twsk_dead_node_init(tw);
}
static inline __attribute__((always_inline)) int inet_twsk_del_dead_node(struct inet_timewait_sock *tw)
{
if (inet_twsk_dead_hashed(tw)) {
__inet_twsk_del_dead_node(tw);
return 1;
}
return 0;
}
# 189 "include/net/inet_timewait_sock.h"
static inline __attribute__((always_inline)) struct inet_timewait_sock *inet_twsk(const struct sock *sk)
{
return (struct inet_timewait_sock *)sk;
}
static inline __attribute__((always_inline)) __be32 inet_rcv_saddr(const struct sock *sk)
{
return __builtin_expect(!!(sk->__sk_common.skc_state != TCP_TIME_WAIT), 1) ?
inet_sk(sk)->inet_rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
}
extern void inet_twsk_put(struct inet_timewait_sock *tw);
extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
struct inet_hashinfo *hashinfo);
extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
const int state);
extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
struct sock *sk,
struct inet_hashinfo *hashinfo);
extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
struct inet_timewait_death_row *twdr,
const int timeo, const int timewait_len);
extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
struct inet_timewait_death_row *twdr);
extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
struct inet_timewait_death_row *twdr, int family);
static inline __attribute__((always_inline))
struct net *twsk_net(const struct inet_timewait_sock *twsk)
{
return &init_net;
}
static inline __attribute__((always_inline))
void twsk_net_set(struct inet_timewait_sock *twsk, struct net *net)
{
}
# 212 "include/linux/tcp.h" 2
static inline __attribute__((always_inline)) struct tcphdr *tcp_hdr(const struct sk_buff *skb)
{
return (struct tcphdr *)skb_transport_header(skb);
}
static inline __attribute__((always_inline)) unsigned int tcp_hdrlen(const struct sk_buff *skb)
{
return tcp_hdr(skb)->doff * 4;
}
static inline __attribute__((always_inline)) unsigned int tcp_optlen(const struct sk_buff *skb)
{
return (tcp_hdr(skb)->doff - 5) * 4;
}
struct tcp_sack_block_wire {
__be32 start_seq;
__be32 end_seq;
};
struct tcp_sack_block {
u32 start_seq;
u32 end_seq;
};
struct tcp_options_received {
long ts_recent_stamp;
u32 ts_recent;
u32 rcv_tsval;
u32 rcv_tsecr;
u16 saw_tstamp : 1,
tstamp_ok : 1,
dsack : 1,
wscale_ok : 1,
sack_ok : 4,
snd_wscale : 4,
rcv_wscale : 4;
u8 cookie_plus:6,
cookie_out_never:1,
cookie_in_always:1;
u8 num_sacks;
u16 user_mss;
u16 mss_clamp;
};
static inline __attribute__((always_inline)) void tcp_clear_options(struct tcp_options_received *rx_opt)
{
rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
rx_opt->cookie_plus = 0;
}
struct tcp_cookie_values;
struct tcp_request_sock_ops;
struct tcp_request_sock {
struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific;
u32 rcv_isn;
u32 snt_isn;
};
static inline __attribute__((always_inline)) struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
{
return (struct tcp_request_sock *)req;
}
struct tcp_sock {
struct inet_connection_sock inet_conn;
u16 tcp_header_len;
u16 xmit_size_goal_segs;
__be32 pred_flags;
u32 rcv_nxt;
u32 copied_seq;
u32 rcv_wup;
u32 snd_nxt;
u32 snd_una;
u32 snd_sml;
u32 rcv_tstamp;
u32 lsndtime;
struct {
struct sk_buff_head prequeue;
struct task_struct *task;
struct iovec *iov;
int memory;
int len;
} ucopy;
u32 snd_wl1;
u32 snd_wnd;
u32 max_window;
u32 mss_cache;
u32 window_clamp;
u32 rcv_ssthresh;
u32 frto_highmark;
u16 advmss;
u8 frto_counter;
u8 nonagle : 4,
thin_lto : 1,
thin_dupack : 1,
unused : 2;
u32 srtt;
u32 mdev;
u32 mdev_max;
u32 rttvar;
u32 rtt_seq;
u32 packets_out;
u32 retrans_out;
u16 urg_data;
u8 ecn_flags;
u8 reordering;
u32 snd_up;
u8 keepalive_probes;
struct tcp_options_received rx_opt;
u32 snd_ssthresh;
u32 snd_cwnd;
u32 snd_cwnd_cnt;
u32 snd_cwnd_clamp;
u32 snd_cwnd_used;
u32 snd_cwnd_stamp;
u32 rcv_wnd;
u32 write_seq;
u32 pushed_seq;
u32 lost_out;
u32 sacked_out;
u32 fackets_out;
u32 tso_deferred;
u32 bytes_acked;
struct sk_buff* lost_skb_hint;
struct sk_buff *scoreboard_skb_hint;
struct sk_buff *retransmit_skb_hint;
struct sk_buff_head out_of_order_queue;
struct tcp_sack_block duplicate_sack[1];
struct tcp_sack_block selective_acks[4];
struct tcp_sack_block recv_sack_cache[4];
struct sk_buff *highest_sack;
int lost_cnt_hint;
u32 retransmit_high;
u32 lost_retrans_low;
u32 prior_ssthresh;
u32 high_seq;
u32 retrans_stamp;
u32 undo_marker;
int undo_retrans;
u32 total_retrans;
u32 urg_seq;
unsigned int keepalive_time;
unsigned int keepalive_intvl;
int linger2;
struct {
u32 rtt;
u32 seq;
u32 time;
} rcv_rtt_est;
struct {
int space;
u32 seq;
u32 time;
} rcvq_space;
struct {
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
const struct tcp_sock_af_ops *af_specific;
struct tcp_md5sig_info *md5sig_info;
struct tcp_cookie_values *cookie_values;
};
static inline __attribute__((always_inline)) struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
}
struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk;
u32 tw_rcv_nxt;
u32 tw_snd_nxt;
u32 tw_rcv_wnd;
u32 tw_ts_recent;
long tw_ts_recent_stamp;
u16 tw_md5_keylen;
u8 tw_md5_key[80];
struct tcp_cookie_values *tw_cookie_values;
};
static inline __attribute__((always_inline)) struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
{
return (struct tcp_timewait_sock *)sk;
}
# 52 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/udp.h" 1
# 22 "include/linux/udp.h"
struct udphdr {
__be16 source;
__be16 dest;
__be16 len;
__sum16 check;
};
# 43 "include/linux/udp.h"
static inline __attribute__((always_inline)) struct udphdr *udp_hdr(const struct sk_buff *skb)
{
return (struct udphdr *)skb_transport_header(skb);
}
static inline __attribute__((always_inline)) int udp_hashfn(struct net *net, unsigned num, unsigned mask)
{
return (num + net_hash_mix(net)) & mask;
}
struct udp_sock {
struct inet_sock inet;
int pending;
unsigned int corkflag;
__u16 encap_type;
__u16 len;
__u16 pcslen;
__u16 pcrlen;
__u8 pcflag;
__u8 unused[3];
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
};
static inline __attribute__((always_inline)) struct udp_sock *udp_sk(const struct sock *sk)
{
return (struct udp_sock *)sk;
}
# 53 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/icmp.h" 1
# 68 "include/linux/icmp.h"
struct icmphdr {
__u8 type;
__u8 code;
__sum16 checksum;
union {
struct {
__be16 id;
__be16 sequence;
} echo;
__be32 gateway;
struct {
__be16 __unused;
__be16 mtu;
} frag;
} un;
};
static inline __attribute__((always_inline)) struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
# 100 "include/linux/icmp.h"
struct icmp_filter {
__u32 data;
};
# 54 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/if_arp.h" 1
# 109 "include/linux/if_arp.h"
struct arpreq {
struct sockaddr arp_pa;
struct sockaddr arp_ha;
int arp_flags;
struct sockaddr arp_netmask;
char arp_dev[16];
};
struct arpreq_old {
struct sockaddr arp_pa;
struct sockaddr arp_ha;
int arp_flags;
struct sockaddr arp_netmask;
};
# 137 "include/linux/if_arp.h"
struct arphdr {
__be16 ar_hrd;
__be16 ar_pro;
unsigned char ar_hln;
unsigned char ar_pln;
__be16 ar_op;
# 154 "include/linux/if_arp.h"
};
static inline __attribute__((always_inline)) struct arphdr *arp_hdr(const struct sk_buff *skb)
{
return (struct arphdr *)skb_network_header(skb);
}
static inline __attribute__((always_inline)) int arp_hdr_len(struct net_device *dev)
{
return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
}
# 57 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/if_vlan.h" 1
# 38 "include/linux/if_vlan.h"
struct vlan_hdr {
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
# 51 "include/linux/if_vlan.h"
struct vlan_ethhdr {
unsigned char h_dest[6];
unsigned char h_source[6];
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
};
static inline __attribute__((always_inline)) struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
{
return (struct vlan_ethhdr *)skb_mac_header(skb);
}
# 73 "include/linux/if_vlan.h"
extern void vlan_ioctl_set(int (*hook)(struct net *, void *));
# 83 "include/linux/if_vlan.h"
struct vlan_group {
struct net_device *real_dev;
unsigned int nr_vlans;
int killall;
struct hlist_node hlist;
struct net_device **vlan_devices_arrays[8];
struct rcu_head rcu;
};
static inline __attribute__((always_inline)) struct net_device *vlan_group_get_device(struct vlan_group *vg,
u16 vlan_id)
{
struct net_device **array;
array = vg->vlan_devices_arrays[vlan_id / (4096/8)];
return array ? array[vlan_id % (4096/8)] : ((void *)0);
}
static inline __attribute__((always_inline)) void vlan_group_set_device(struct vlan_group *vg,
u16 vlan_id,
struct net_device *dev)
{
struct net_device **array;
if (!vg)
return;
array = vg->vlan_devices_arrays[vlan_id / (4096/8)];
array[vlan_id % (4096/8)] = dev;
}
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
u16 vlan_tci, int polling);
extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
extern gro_result_t
vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci, struct sk_buff *skb);
extern gro_result_t
vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
unsigned int vlan_tci);
# 176 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) int vlan_hwaccel_rx(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 0);
}
static inline __attribute__((always_inline)) int vlan_hwaccel_receive_skb(struct sk_buff *skb,
struct vlan_group *grp,
u16 vlan_tci)
{
return __vlan_hwaccel_rx(skb, grp, vlan_tci, 1);
}
# 207 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, 4) < 0) {
kfree_skb(skb);
return ((void *)0);
}
veth = (struct vlan_ethhdr *)skb_push(skb, 4);
memmove(skb->data, skb->data + 4, 2 * 6);
skb->mac_header -= 4;
veth->h_vlan_proto = (( __be16)(__u16)(0x8100));
veth->h_vlan_TCI = (( __be16)(__u16)(vlan_tci));
skb->protocol = (( __be16)(__u16)(0x8100));
return skb;
}
extern int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type* ptype, struct net_device *orig_dev);
static inline __attribute__((always_inline)) struct sk_buff *__pon_vlan_put_tag(struct sk_buff *skb, u16 tpid,unsigned short vlan_tci)
{
struct vlan_ethhdr *veth;
if (skb_cow_head(skb, 4) < 0) {
kfree_skb(skb);
return ((void *)0);
}
veth = (struct vlan_ethhdr *)skb_push(skb, 4);
memmove(skb->data, skb->data + 4, 2 * 6);
skb->mac_header -= 4;
veth->h_vlan_proto = (( __be16)(__u16)(tpid));
veth->h_vlan_TCI = (( __be16)(__u16)(vlan_tci));
skb->protocol = (( __be16)(__u16)(tpid));
return skb;
}
# 273 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb,
u16 vlan_tci)
{
skb->vlan_tci = 0x1000 | vlan_tci;
return skb;
}
# 290 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) struct sk_buff *vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
{
if (skb->dev->features & 128) {
return __vlan_hwaccel_put_tag(skb, vlan_tci);
} else {
return __vlan_put_tag(skb, vlan_tci);
}
}
# 306 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
if (veth->h_vlan_proto != (( __be16)(__u16)(0x8100))) {
return -22;
}
*vlan_tci = (( __u16)(__be16)(veth->h_vlan_TCI));
return 0;
}
# 325 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
u16 *vlan_tci)
{
if (((skb)->vlan_tci & 0x1000)) {
*vlan_tci = ((skb)->vlan_tci & ~0x1000);
return 0;
} else {
*vlan_tci = 0;
return -22;
}
}
# 346 "include/linux/if_vlan.h"
static inline __attribute__((always_inline)) int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
if (skb->dev->features & 128) {
return __vlan_hwaccel_get_tag(skb, vlan_tci);
} else {
return __vlan_get_tag(skb, vlan_tci);
}
}
enum vlan_ioctl_cmds {
ADD_VLAN_CMD,
DEL_VLAN_CMD,
SET_VLAN_INGRESS_PRIORITY_CMD,
SET_VLAN_EGRESS_PRIORITY_CMD,
GET_VLAN_INGRESS_PRIORITY_CMD,
GET_VLAN_EGRESS_PRIORITY_CMD,
SET_VLAN_NAME_TYPE_CMD,
SET_VLAN_FLAG_CMD,
GET_VLAN_REALDEV_NAME_CMD,
GET_VLAN_VID_CMD
};
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
VLAN_FLAG_LOOSE_BINDING = 0x4,
};
enum vlan_name_types {
VLAN_NAME_TYPE_PLUS_VID,
VLAN_NAME_TYPE_RAW_PLUS_VID,
VLAN_NAME_TYPE_PLUS_VID_NO_PAD,
VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD,
VLAN_NAME_TYPE_HIGHEST
};
struct vlan_ioctl_args {
int cmd;
char device1[24];
union {
char device2[24];
int VID;
unsigned int skb_priority;
unsigned int name_type;
unsigned int bind_type;
unsigned int flag;
} u;
short vlan_qos;
};
# 59 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/if_pppox.h" 1
# 27 "include/linux/if_pppox.h"
# 1 "include/linux/ppp_channel.h" 1
# 27 "include/linux/ppp_channel.h"
struct ppp_channel;
struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
};
struct ppp_channel {
void *private;
const struct ppp_channel_ops *ops;
int mtu;
int hdrlen;
void *ppp;
int speed;
int latency;
};
extern void ppp_output_wakeup(struct ppp_channel *);
extern void ppp_input(struct ppp_channel *, struct sk_buff *);
extern void ppp_input_error(struct ppp_channel *, int code);
extern int ppp_register_net_channel(struct net *, struct ppp_channel *);
extern int ppp_register_channel(struct ppp_channel *);
extern void ppp_unregister_channel(struct ppp_channel *);
extern int ppp_channel_index(struct ppp_channel *);
extern int ppp_unit_number(struct ppp_channel *);
extern char *ppp_dev_name(struct ppp_channel *);
# 28 "include/linux/if_pppox.h" 2
# 1 "include/linux/if_pppol2tp.h" 1
# 27 "include/linux/if_pppol2tp.h"
struct pppol2tp_addr {
__kernel_pid_t pid;
int fd;
struct sockaddr_in addr;
__u16 s_tunnel, s_session;
__u16 d_tunnel, d_session;
};
struct pppol2tpv3_addr {
pid_t pid;
int fd;
struct sockaddr_in addr;
__u32 s_tunnel, s_session;
__u32 d_tunnel, d_session;
};
# 62 "include/linux/if_pppol2tp.h"
enum {
PPPOL2TP_SO_DEBUG = 1,
PPPOL2TP_SO_RECVSEQ = 2,
PPPOL2TP_SO_SENDSEQ = 3,
PPPOL2TP_SO_LNSMODE = 4,
PPPOL2TP_SO_REORDERTO = 5,
};
enum {
PPPOL2TP_MSG_DEBUG = (1 << 0),
PPPOL2TP_MSG_CONTROL = (1 << 1),
PPPOL2TP_MSG_SEQ = (1 << 2),
PPPOL2TP_MSG_DATA = (1 << 3),
};
# 30 "include/linux/if_pppox.h" 2
# 42 "include/linux/if_pppox.h"
typedef __be16 sid_t;
struct pppoe_addr{
sid_t sid;
unsigned char remote[6];
char dev[16];
};
# 56 "include/linux/if_pppox.h"
struct sockaddr_pppox {
sa_family_t sa_family;
unsigned int sa_protocol;
union{
struct pppoe_addr pppoe;
}sa_addr;
} __attribute__((packed));
struct sockaddr_pppol2tp {
sa_family_t sa_family;
unsigned int sa_protocol;
struct pppol2tp_addr pppol2tp;
} __attribute__((packed));
struct sockaddr_pppol2tpv3 {
sa_family_t sa_family;
unsigned int sa_protocol;
struct pppol2tpv3_addr pppol2tp;
} __attribute__((packed));
# 100 "include/linux/if_pppox.h"
struct pppoe_tag {
__be16 tag_type;
__be16 tag_len;
char tag_data[0];
} __attribute__ ((packed));
# 118 "include/linux/if_pppox.h"
struct pppoe_hdr {
__u8 type : 4;
__u8 ver : 4;
__u8 code;
__be16 sid;
__be16 length;
struct pppoe_tag tag[0];
} __attribute__((packed));
static inline __attribute__((always_inline)) struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
{
return (struct pppoe_hdr *)skb_network_header(skb);
}
struct pppoe_opt {
struct net_device *dev;
int ifindex;
struct pppoe_addr pa;
struct sockaddr_pppox relay;
};
struct pppox_sock {
struct sock sk;
struct ppp_channel chan;
struct pppox_sock *next;
union {
struct pppoe_opt pppoe;
} proto;
__be16 num;
};
static inline __attribute__((always_inline)) struct pppox_sock *pppox_sk(struct sock *sk)
{
return (struct pppox_sock *)sk;
}
static inline __attribute__((always_inline)) struct sock *sk_pppox(struct pppox_sock *po)
{
return (struct sock *)po;
}
struct module;
struct pppox_proto {
int (*create)(struct net *net, struct socket *sock);
int (*ioctl)(struct socket *sock, unsigned int cmd,
unsigned long arg);
struct module *owner;
};
extern int register_pppox_proto(int proto_num, struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
enum {
PPPOX_NONE = 0,
PPPOX_CONNECTED = 1,
PPPOX_BOUND = 2,
PPPOX_RELAY = 4,
PPPOX_ZOMBIE = 8,
PPPOX_DEAD = 16
};
# 60 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/linux/ppp_defs.h" 1
# 100 "include/linux/ppp_defs.h"
# 1 "include/linux/crc-ccitt.h" 1
extern u16 const crc_ccitt_table[256];
extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
static inline __attribute__((always_inline)) u16 crc_ccitt_byte(u16 crc, const u8 c)
{
return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
}
# 101 "include/linux/ppp_defs.h" 2
typedef __u32 ext_accm[8];
enum NPmode {
NPMODE_PASS,
NPMODE_DROP,
NPMODE_ERROR,
NPMODE_QUEUE
};
struct pppstat {
__u32 ppp_discards;
__u32 ppp_ibytes;
__u32 ppp_ioctects;
__u32 ppp_ipackets;
__u32 ppp_ierrors;
__u32 ppp_ilqrs;
__u32 ppp_obytes;
__u32 ppp_ooctects;
__u32 ppp_opackets;
__u32 ppp_oerrors;
__u32 ppp_olqrs;
};
struct vjstat {
__u32 vjs_packets;
__u32 vjs_compressed;
__u32 vjs_searches;
__u32 vjs_misses;
__u32 vjs_uncompressedin;
__u32 vjs_compressedin;
__u32 vjs_errorin;
__u32 vjs_tossed;
};
struct compstat {
__u32 unc_bytes;
__u32 unc_packets;
__u32 comp_bytes;
__u32 comp_packets;
__u32 inc_bytes;
__u32 inc_packets;
__u32 in_count;
__u32 bytes_out;
double ratio;
};
struct ppp_stats {
struct pppstat p;
struct vjstat vj;
};
struct ppp_comp_stats {
struct compstat c;
struct compstat d;
};
struct ppp_idle {
__kernel_time_t xmit_idle;
__kernel_time_t recv_idle;
};
# 61 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/net/ip.h" 1
# 26 "include/net/ip.h"
# 1 "include/linux/ip.h" 1
# 85 "include/linux/ip.h"
struct iphdr {
__u8 version:4,
ihl:4;
__u8 tos;
__be16 tot_len;
__be16 id;
__be16 frag_off;
__u8 ttl;
__u8 protocol;
__sum16 check;
__be32 saddr;
__be32 daddr;
};
static inline __attribute__((always_inline)) struct iphdr *ip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_network_header(skb);
}
static inline __attribute__((always_inline)) struct iphdr *ipip_hdr(const struct sk_buff *skb)
{
return (struct iphdr *)skb_transport_header(skb);
}
struct ip_auth_hdr {
__u8 nexthdr;
__u8 hdrlen;
__be16 reserved;
__be32 spi;
__be32 seq_no;
__u8 auth_data[0];
};
struct ip_esp_hdr {
__be32 spi;
__be32 seq_no;
__u8 enc_data[0];
};
struct ip_comp_hdr {
__u8 nexthdr;
__u8 flags;
__be16 cpi;
};
struct ip_beet_phdr {
__u8 nexthdr;
__u8 hdrlen;
__u8 padlen;
__u8 reserved;
};
# 27 "include/net/ip.h" 2
struct sock;
struct inet_skb_parm {
struct ip_options opt;
unsigned char flags;
};
static inline __attribute__((always_inline)) unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
}
struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options *opt;
union skb_shared_tx shtx;
};
struct ip_ra_chain {
struct ip_ra_chain *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
struct sock *saved_sk;
};
struct rcu_head rcu;
};
extern struct ip_ra_chain *ip_ra_chain;
# 81 "include/net/ip.h"
struct msghdr;
struct net_device;
struct packet_type;
struct rtable;
struct sockaddr;
extern int igmp_mc_proc_init(void);
extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr,
struct ip_options *opt);
extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
extern int ip_local_deliver(struct sk_buff *skb);
extern int ip_mr_input(struct sk_buff *skb);
extern int ip_output(struct sk_buff *skb);
extern int ip_mc_output(struct sk_buff *skb);
extern int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
extern int ip_do_nat(struct sk_buff *skb);
extern void ip_send_check(struct iphdr *ip);
extern int __ip_local_out(struct sk_buff *skb);
extern int ip_local_out(struct sk_buff *skb);
extern int ip_queue_xmit(struct sk_buff *skb);
extern void ip_init(void);
extern int ip_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int len, int protolen,
struct ipcm_cookie *ipc,
struct rtable **rt,
unsigned int flags);
extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
extern ssize_t ip_append_page(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
extern int ip_push_pending_frames(struct sock *sk);
extern void ip_flush_pending_frames(struct sock *sk);
extern int ip4_datagram_connect(struct sock *sk,
struct sockaddr *uaddr, int addr_len);
# 133 "include/net/ip.h"
static inline __attribute__((always_inline)) void ip_tr_mc_map(__be32 addr, char *buf)
{
buf[0]=0xC0;
buf[1]=0x00;
buf[2]=0x00;
buf[3]=0x04;
buf[4]=0x00;
buf[5]=0x00;
}
struct ip_reply_arg {
struct kvec iov[1];
int flags;
__wsum csum;
int csumoffset;
int bound_dev_if;
};
static inline __attribute__((always_inline)) __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
{
return (arg->flags & 1) ? 0x01 : 0;
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
unsigned int len);
struct ipv4_config {
int log_martians;
int no_pmtu_disc;
};
extern struct ipv4_config ipv4_config;
# 180 "include/net/ip.h"
extern unsigned long snmp_fold_field(void *mib[], int offt);
extern u64 snmp_fold_field64(void *mib[], int offt, size_t sync_off);
extern int snmp_mib_init(void *ptr[2], size_t mibsize, size_t align);
extern void snmp_mib_free(void *ptr[2]);
extern struct local_ports {
seqlock_t lock;
int range[2];
} sysctl_local_ports;
extern void inet_get_local_port_range(int *low, int *high);
extern unsigned long *sysctl_local_reserved_ports;
static inline __attribute__((always_inline)) int inet_is_reserved_local_port(int port)
{
return test_bit(port, sysctl_local_reserved_ports);
}
extern int sysctl_ip_default_ttl;
extern int sysctl_ip_nonlocal_bind;
extern struct ctl_path net_core_path[];
extern struct ctl_path net_ipv4_ctl_path[];
extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
extern int inet_peer_gc_mintime;
extern int inet_peer_gc_maxtime;
extern int sysctl_ip_dynaddr;
extern void ipfrag_init(void);
extern void ip_static_sysctl_init(void);
static inline __attribute__((always_inline))
int ip_decrease_ttl(struct iphdr *iph)
{
u32 check = ( u32)iph->check;
check += ( u32)(( __be16)(__u16)(0x0100));
iph->check = ( __sum16)(check + (check>=0xFFFF));
return --iph->ttl;
}
static inline __attribute__((always_inline))
int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
{
return (inet_sk(sk)->pmtudisc == 2 ||
(inet_sk(sk)->pmtudisc == 1 &&
!(dst_metric_locked(dst, RTAX_MTU))));
}
extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
static inline __attribute__((always_inline)) void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
{
if (iph->frag_off & (( __be16)(__u16)(0x4000))) {
iph->id = (sk && inet_sk(sk)->inet_daddr) ?
(( __be16)(__u16)(inet_sk(sk)->inet_id++)) : 0;
} else
__ip_select_ident(iph, dst, 0);
}
static inline __attribute__((always_inline)) void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
{
if (iph->frag_off & (( __be16)(__u16)(0x4000))) {
if (sk && inet_sk(sk)->inet_daddr) {
iph->id = (( __be16)(__u16)(inet_sk(sk)->inet_id));
inet_sk(sk)->inet_id += 1 + more;
} else
iph->id = 0;
} else
__ip_select_ident(iph, dst, more);
}
static inline __attribute__((always_inline)) void ip_eth_mc_map(__be32 naddr, char *buf)
{
__u32 addr=(( __u32)(__be32)(naddr));
buf[0]=0x01;
buf[1]=0x00;
buf[2]=0x5e;
buf[5]=addr&0xFF;
addr>>=8;
buf[4]=addr&0xFF;
addr>>=8;
buf[3]=addr&0x7F;
}
static inline __attribute__((always_inline)) void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
{
__u32 addr;
unsigned char scope = broadcast[5] & 0xF;
buf[0] = 0;
buf[1] = 0xff;
buf[2] = 0xff;
buf[3] = 0xff;
addr = (( __u32)(__be32)(naddr));
buf[4] = 0xff;
buf[5] = 0x10 | scope;
buf[6] = 0x40;
buf[7] = 0x1b;
buf[8] = broadcast[8];
buf[9] = broadcast[9];
buf[10] = 0;
buf[11] = 0;
buf[12] = 0;
buf[13] = 0;
buf[14] = 0;
buf[15] = 0;
buf[19] = addr & 0xff;
addr >>= 8;
buf[18] = addr & 0xff;
addr >>= 8;
buf[17] = addr & 0xff;
addr >>= 8;
buf[16] = addr & 0x0f;
}
# 1 "include/linux/ipv6.h" 1
# 19 "include/linux/ipv6.h"
struct in6_pktinfo {
struct in6_addr ipi6_addr;
int ipi6_ifindex;
};
struct ip6_mtuinfo {
struct sockaddr_in6 ip6m_addr;
__u32 ip6m_mtu;
};
struct in6_ifreq {
struct in6_addr ifr6_addr;
__u32 ifr6_prefixlen;
int ifr6_ifindex;
};
# 42 "include/linux/ipv6.h"
struct ipv6_rt_hdr {
__u8 nexthdr;
__u8 hdrlen;
__u8 type;
__u8 segments_left;
};
struct ipv6_opt_hdr {
__u8 nexthdr;
__u8 hdrlen;
} __attribute__((packed));
# 74 "include/linux/ipv6.h"
struct rt0_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
struct in6_addr addr[0];
};
struct rt2_hdr {
struct ipv6_rt_hdr rt_hdr;
__u32 reserved;
struct in6_addr addr;
};
struct ipv6_destopt_hao {
__u8 type;
__u8 length;
struct in6_addr addr;
} __attribute__((packed));
# 111 "include/linux/ipv6.h"
struct ipv6hdr {
__u8 version:4,
priority:4;
__u8 flow_lbl[3];
__be16 payload_len;
__u8 nexthdr;
__u8 hop_limit;
struct in6_addr saddr;
struct in6_addr daddr;
};
struct ipv6_devconf {
__s32 forwarding;
__s32 hop_limit;
__s32 mtu6;
__s32 accept_ra;
__s32 accept_redirects;
__s32 autoconf;
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 max_addresses;
__s32 accept_ra_defrtr;
__s32 accept_ra_pinfo;
__s32 proxy_ndp;
__s32 accept_source_route;
__s32 disable_ipv6;
__s32 accept_dad;
__s32 force_tllao;
char slaac_addr[64];
__s32 child_prefix_orign;
char child_prefix[64];
char parent_pd_prefix[64];
char slaac_prefix[64];
void *sysctl;
};
# 198 "include/linux/ipv6.h"
struct ipv6_params {
__s32 disable_ipv6;
__s32 autoconf;
};
extern struct ipv6_params ipv6_defaults;
enum {
DEVCONF_FORWARDING = 0,
DEVCONF_HOPLIMIT,
DEVCONF_MTU6,
DEVCONF_ACCEPT_RA,
DEVCONF_ACCEPT_REDIRECTS,
DEVCONF_AUTOCONF,
DEVCONF_DAD_TRANSMITS,
DEVCONF_RTR_SOLICITS,
DEVCONF_RTR_SOLICIT_INTERVAL,
DEVCONF_RTR_SOLICIT_DELAY,
DEVCONF_USE_TEMPADDR,
DEVCONF_TEMP_VALID_LFT,
DEVCONF_TEMP_PREFERED_LFT,
DEVCONF_REGEN_MAX_RETRY,
DEVCONF_MAX_DESYNC_FACTOR,
DEVCONF_MAX_ADDRESSES,
DEVCONF_FORCE_MLD_VERSION,
DEVCONF_ACCEPT_RA_DEFRTR,
DEVCONF_ACCEPT_RA_PINFO,
DEVCONF_ACCEPT_RA_RTR_PREF,
DEVCONF_RTR_PROBE_INTERVAL,
DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN,
DEVCONF_PROXY_NDP,
DEVCONF_OPTIMISTIC_DAD,
DEVCONF_ACCEPT_SOURCE_ROUTE,
DEVCONF_MC_FORWARDING,
DEVCONF_DISABLE_IPV6,
DEVCONF_ACCEPT_DAD,
DEVCONF_FORCE_TLLAO,
DEVCONF_MAX
};
# 1 "include/linux/icmpv6.h" 1
struct icmp6hdr {
__u8 icmp6_type;
__u8 icmp6_code;
__sum16 icmp6_cksum;
union {
__be32 un_data32[1];
__be16 un_data16[2];
__u8 un_data8[4];
struct icmpv6_echo {
__be16 identifier;
__be16 sequence;
} u_echo;
struct icmpv6_nd_advt {
__u32 router:1,
solicited:1,
override:1,
reserved:29;
} u_nd_advt;
struct icmpv6_nd_ra {
__u8 hop_limit;
# 51 "include/linux/icmpv6.h"
__u8 managed:1,
other:1,
home_agent:1,
router_pref:2,
reserved:3;
__be16 rt_lifetime;
} u_nd_ra;
} icmp6_dataun;
# 79 "include/linux/icmpv6.h"
};
static inline __attribute__((always_inline)) struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
{
return (struct icmp6hdr *)skb_transport_header(skb);
}
# 155 "include/linux/icmpv6.h"
struct icmp6_filter {
__u32 data[8];
};
# 175 "include/linux/icmpv6.h"
extern void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code,
__u32 info);
extern int icmpv6_init(void);
extern int icmpv6_err_convert(u8 type, u8 code,
int *err);
extern void icmpv6_cleanup(void);
extern void icmpv6_param_prob(struct sk_buff *skb,
u8 code, int pos);
struct flowi;
struct in6_addr;
extern void icmpv6_flow_init(struct sock *sk,
struct flowi *fl,
u8 type,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
# 241 "include/linux/ipv6.h" 2
static inline __attribute__((always_inline)) struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
{
return (struct ipv6hdr *)skb_network_header(skb);
}
static inline __attribute__((always_inline)) struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
{
return (struct ipv6hdr *)skb_transport_header(skb);
}
struct inet6_skb_parm {
int iif;
__u16 ra;
__u16 hop;
__u16 dst0;
__u16 srcrt;
__u16 dst1;
__u16 lastopt;
__u16 nhoff;
__u16 flags;
};
static inline __attribute__((always_inline)) int inet6_iif(const struct sk_buff *skb)
{
return ((struct inet6_skb_parm*)((skb)->cb))->iif;
}
struct inet6_request_sock {
struct in6_addr loc_addr;
struct in6_addr rmt_addr;
struct sk_buff *pktopts;
int iif;
};
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
struct inet6_request_sock tcp6rsk_inet6;
};
struct ipv6_mc_socklist;
struct ipv6_ac_socklist;
struct ipv6_fl_socklist;
# 312 "include/linux/ipv6.h"
struct ipv6_pinfo {
struct in6_addr saddr;
struct in6_addr rcv_saddr;
struct in6_addr daddr;
struct in6_pktinfo sticky_pktinfo;
struct in6_addr *daddr_cache;
__be32 flow_label;
__u32 frag_size;
__s16 hop_limit:9;
__u16 __unused_1:7;
__s16 mcast_hops:9;
__u16 __unused_2:6,
mc_loop:1;
int mcast_oif;
union {
struct {
__u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
rxhlim:1,
rxohlim:1,
hopopts:1,
ohopopts:1,
dstopts:1,
odstopts:1,
rxflow:1,
rxtclass:1,
rxpmtu:1;
} bits;
__u16 all;
} rxopt;
__u16 recverr:1,
sndflow:1,
pmtudisc:2,
ipv6only:1,
srcprefs:3,
dontfrag:1;
__u8 min_hopcount;
__u8 tclass;
__u8 padding;
__u32 dst_cookie;
struct ipv6_mc_socklist *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist *ipv6_fl_list;
struct ipv6_txoptions *opt;
struct sk_buff *pktoptions;
struct sk_buff *rxpmtu;
struct {
struct ipv6_txoptions *opt;
u8 hop_limit;
u8 tclass;
} cork;
};
struct raw6_sock {
struct inet_sock inet;
__u32 checksum;
__u32 offset;
struct icmp6_filter filter;
__u32 ip6mr_table;
struct ipv6_pinfo inet6;
};
struct udp6_sock {
struct udp_sock udp;
struct ipv6_pinfo inet6;
};
struct tcp6_sock {
struct tcp_sock tcp;
struct ipv6_pinfo inet6;
};
extern int inet6_sk_rebuild_header(struct sock *sk);
static inline __attribute__((always_inline)) struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
return inet_sk(__sk)->pinet6;
}
static inline __attribute__((always_inline)) struct inet6_request_sock *
inet6_rsk(const struct request_sock *rsk)
{
return (struct inet6_request_sock *)(((u8 *)rsk) +
inet_rsk(rsk)->inet6_rsk_offset);
}
static inline __attribute__((always_inline)) u32 inet6_rsk_offset(struct request_sock *rsk)
{
return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
}
static inline __attribute__((always_inline)) struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
{
struct request_sock *req = reqsk_alloc(ops);
if (req != ((void *)0)) {
inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
inet6_rsk(req)->pktopts = ((void *)0);
}
return req;
}
static inline __attribute__((always_inline)) struct raw6_sock *raw6_sk(const struct sock *sk)
{
return (struct raw6_sock *)sk;
}
static inline __attribute__((always_inline)) void inet_sk_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
int ancestor_size = sizeof(struct inet_sock);
if (sk_from->__sk_common.skc_family == 10)
ancestor_size += sizeof(struct ipv6_pinfo);
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
}
struct inet6_timewait_sock {
struct in6_addr tw_v6_daddr;
struct in6_addr tw_v6_rcv_saddr;
};
struct tcp6_timewait_sock {
struct tcp_timewait_sock tcp6tw_tcp;
struct inet6_timewait_sock tcp6tw_inet6;
};
static inline __attribute__((always_inline)) u16 inet6_tw_offset(const struct proto *prot)
{
return prot->twsk_prot->twsk_obj_size -
sizeof(struct inet6_timewait_sock);
}
static inline __attribute__((always_inline)) struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
{
return (struct inet6_timewait_sock *)(((u8 *)sk) +
inet_twsk(sk)->tw_ipv6_offset);
}
static inline __attribute__((always_inline)) struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
{
return __builtin_expect(!!(sk->__sk_common.skc_state != TCP_TIME_WAIT), 1) ?
&inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
}
static inline __attribute__((always_inline)) struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
{
return sk->__sk_common.skc_family == 10 ? __inet6_rcv_saddr(sk) : ((void *)0);
}
static inline __attribute__((always_inline)) int inet_v6_ipv6only(const struct sock *sk)
{
return __builtin_expect(!!(sk->__sk_common.skc_state != TCP_TIME_WAIT), 1) ?
((sk)->__sk_common.skc_family == 10 && (inet6_sk(sk)->ipv6only)) : inet_twsk(sk)->tw_ipv6only;
}
# 329 "include/net/ip.h" 2
static __inline__ __attribute__((always_inline)) void inet_reset_saddr(struct sock *sk)
{
inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
if (sk->__sk_common.skc_family == 10) {
struct ipv6_pinfo *np = inet6_sk(sk);
memset(&np->saddr, 0, sizeof(np->saddr));
memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
}
}
static inline __attribute__((always_inline)) int sk_mc_loop(struct sock *sk)
{
if (!sk)
return 1;
switch (sk->__sk_common.skc_family) {
case 2:
return inet_sk(sk)->mc_loop;
case 10:
return inet6_sk(sk)->mc_loop;
}
({ int __ret_warn_on = !!(1); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/net/ip.h", 358); __builtin_expect(!!(__ret_warn_on), 0); });
return 1;
}
extern int ip_call_ra_chain(struct sk_buff *skb);
enum ip_defrag_users {
IP_DEFRAG_LOCAL_DELIVER,
IP_DEFRAG_CALL_RA_CHAIN,
IP_DEFRAG_CONNTRACK_IN,
__IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + ((u16)(~0U)),
IP_DEFRAG_CONNTRACK_OUT,
__IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + ((u16)(~0U)),
IP_DEFRAG_CONNTRACK_BRIDGE_IN,
__IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + ((u16)(~0U)),
IP_DEFRAG_VPN_SPEED,
IP_DEFRAG_VS_IN,
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD
};
int ip_defrag(struct sk_buff *skb, u32 user);
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
extern int ip_forward(struct sk_buff *skb);
extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag);
extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
extern void ip_options_fragment(struct sk_buff *skb);
extern int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb);
extern int ip_options_get(struct net *net, struct ip_options **optp,
unsigned char *data, int optlen);
extern int ip_options_get_from_user(struct net *net, struct ip_options **optp,
unsigned char *data, int optlen);
extern void ip_options_undo(struct ip_options * opt);
extern void ip_forward_options(struct sk_buff *skb);
extern int ip_options_rcv_srr(struct sk_buff *skb);
extern int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
extern int ip_cmsg_send(struct net *net,
struct msghdr *msg, struct ipcm_cookie *ipc);
extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, unsigned int optlen);
extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen);
extern int compat_ip_setsockopt(struct sock *sk, int level,
int optname, char *optval, unsigned int optlen);
extern int compat_ip_getsockopt(struct sock *sk, int level,
int optname, char *optval, int *optlen);
extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload);
extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
u32 info);
int ipv4_doint_and_flush(ctl_table *ctl, int write,
void *buffer,
size_t *lenp, loff_t *ppos);
int ipv4_doint_and_flush_strategy(ctl_table *table,
void *oldval, size_t *oldlenp,
void *newval, size_t newlen);
extern int ip_misc_proc_init(void);
# 66 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/net/netfilter/nf_conntrack.h" 1
# 15 "include/net/netfilter/nf_conntrack.h"
# 1 "include/linux/netfilter/nf_conntrack_common.h" 1
enum ip_conntrack_info {
IP_CT_ESTABLISHED,
IP_CT_RELATED,
IP_CT_NEW,
IP_CT_IS_REPLY,
IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
};
enum ip_conntrack_status {
IPS_EXPECTED_BIT = 0,
IPS_EXPECTED = (1 << IPS_EXPECTED_BIT),
IPS_SEEN_REPLY_BIT = 1,
IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT),
IPS_ASSURED_BIT = 2,
IPS_ASSURED = (1 << IPS_ASSURED_BIT),
IPS_CONFIRMED_BIT = 3,
IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT),
IPS_SRC_NAT_BIT = 4,
IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT),
IPS_DST_NAT_BIT = 5,
IPS_DST_NAT = (1 << IPS_DST_NAT_BIT),
IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT),
IPS_SEQ_ADJUST_BIT = 6,
IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT),
IPS_SRC_NAT_DONE_BIT = 7,
IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT),
IPS_DST_NAT_DONE_BIT = 8,
IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT),
IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
IPS_DYING_BIT = 9,
IPS_DYING = (1 << IPS_DYING_BIT),
IPS_FIXED_TIMEOUT_BIT = 10,
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
IPS_TEMPLATE_BIT = 11,
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
# 93 "include/linux/netfilter/nf_conntrack_common.h"
};
enum ip_conntrack_events {
IPCT_NEW,
IPCT_RELATED,
IPCT_DESTROY,
IPCT_REPLY,
IPCT_ASSURED,
IPCT_PROTOINFO,
IPCT_HELPER,
IPCT_MARK,
IPCT_NATSEQADJ,
IPCT_SECMARK,
};
enum ip_conntrack_expect_events {
IPEXP_NEW,
};
struct ip_conntrack_stat {
unsigned int searched;
unsigned int found;
unsigned int new;
unsigned int invalid;
unsigned int ignore;
unsigned int delete;
unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
unsigned int early_drop;
unsigned int error;
unsigned int expect_new;
unsigned int expect_create;
unsigned int expect_delete;
unsigned int search_restart;
};
extern void need_conntrack(void);
# 16 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_tcp.h" 1
enum tcp_conntrack {
TCP_CONNTRACK_NONE,
TCP_CONNTRACK_SYN_SENT,
TCP_CONNTRACK_SYN_RECV,
TCP_CONNTRACK_ESTABLISHED,
TCP_CONNTRACK_FIN_WAIT,
TCP_CONNTRACK_CLOSE_WAIT,
TCP_CONNTRACK_LAST_ACK,
TCP_CONNTRACK_TIME_WAIT,
TCP_CONNTRACK_CLOSE,
TCP_CONNTRACK_LISTEN,
TCP_CONNTRACK_MAX,
TCP_CONNTRACK_IGNORE
};
# 42 "include/linux/netfilter/nf_conntrack_tcp.h"
struct nf_ct_tcp_flags {
__u8 flags;
__u8 mask;
};
struct ip_ct_tcp_state {
u_int32_t td_end;
u_int32_t td_maxend;
u_int32_t td_maxwin;
u_int32_t td_maxack;
u_int8_t td_scale;
u_int8_t flags;
};
struct ip_ct_tcp {
struct ip_ct_tcp_state seen[2];
u_int8_t state;
u_int8_t last_dir;
u_int8_t retrans;
u_int8_t last_index;
u_int32_t last_seq;
u_int32_t last_ack;
u_int32_t last_end;
u_int16_t last_win;
u_int8_t last_wscale;
u_int8_t last_flags;
};
# 23 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_dccp.h" 1
enum ct_dccp_states {
CT_DCCP_NONE,
CT_DCCP_REQUEST,
CT_DCCP_RESPOND,
CT_DCCP_PARTOPEN,
CT_DCCP_OPEN,
CT_DCCP_CLOSEREQ,
CT_DCCP_CLOSING,
CT_DCCP_TIMEWAIT,
CT_DCCP_IGNORE,
CT_DCCP_INVALID,
__CT_DCCP_MAX
};
enum ct_dccp_roles {
CT_DCCP_ROLE_CLIENT,
CT_DCCP_ROLE_SERVER,
__CT_DCCP_ROLE_MAX
};
# 1 "include/net/netfilter/nf_conntrack_tuple.h" 1
# 13 "include/net/netfilter/nf_conntrack_tuple.h"
# 1 "include/linux/netfilter/x_tables.h" 1
# 10 "include/linux/netfilter/x_tables.h"
struct xt_entry_match {
union {
struct {
__u16 match_size;
char name[29];
__u8 revision;
} user;
struct {
__u16 match_size;
struct xt_match *match;
} kernel;
__u16 match_size;
} u;
unsigned char data[0];
};
struct xt_entry_target {
union {
struct {
__u16 target_size;
char name[29];
__u8 revision;
} user;
struct {
__u16 target_size;
struct xt_target *target;
} kernel;
__u16 target_size;
} u;
unsigned char data[0];
};
# 64 "include/linux/netfilter/x_tables.h"
struct xt_standard_target {
struct xt_entry_target target;
int verdict;
};
struct xt_get_revision {
char name[29];
__u8 revision;
};
# 87 "include/linux/netfilter/x_tables.h"
struct _xt_align {
__u8 u8;
__u16 u16;
__u32 u32;
__u64 u64;
};
# 104 "include/linux/netfilter/x_tables.h"
struct xt_counters {
__u64 pcnt, bcnt;
};
struct xt_counters_info {
char name[32];
unsigned int num_counters;
struct xt_counters counters[0];
};
# 205 "include/linux/netfilter/x_tables.h"
struct xt_action_param {
union {
const struct xt_match *match;
const struct xt_target *target;
};
union {
const void *matchinfo, *targinfo;
};
const struct net_device *in, *out;
int fragoff;
unsigned int thoff;
unsigned int hooknum;
u_int8_t family;
int hotdrop;
};
# 234 "include/linux/netfilter/x_tables.h"
struct xt_mtchk_param {
struct net *net;
const char *table;
const void *entryinfo;
const struct xt_match *match;
void *matchinfo;
unsigned int hook_mask;
u_int8_t family;
};
struct xt_mtdtor_param {
struct net *net;
const struct xt_match *match;
void *matchinfo;
u_int8_t family;
};
# 264 "include/linux/netfilter/x_tables.h"
struct xt_tgchk_param {
struct net *net;
const char *table;
const void *entryinfo;
const struct xt_target *target;
void *targinfo;
unsigned int hook_mask;
u_int8_t family;
};
struct xt_tgdtor_param {
struct net *net;
const struct xt_target *target;
void *targinfo;
u_int8_t family;
};
struct xt_match {
struct list_head list;
const char name[29];
u_int8_t revision;
int (*match)(const struct sk_buff *skb,
struct xt_action_param *);
int (*checkentry)(const struct xt_mtchk_param *);
void (*destroy)(const struct xt_mtdtor_param *);
struct module *me;
const char *table;
unsigned int matchsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
};
struct xt_target {
struct list_head list;
const char name[29];
u_int8_t revision;
unsigned int (*target)(struct sk_buff *skb,
const struct xt_action_param *);
int (*checkentry)(const struct xt_tgchk_param *);
void (*destroy)(const struct xt_tgdtor_param *);
struct module *me;
const char *table;
unsigned int targetsize;
unsigned int hooks;
unsigned short proto;
unsigned short family;
};
struct xt_table {
struct list_head list;
unsigned int valid_hooks;
struct xt_table_info *private;
struct module *me;
u_int8_t af;
int priority;
const char name[32];
};
# 1 "include/linux/netfilter_ipv4.h" 1
# 54 "include/linux/netfilter_ipv4.h"
enum nf_ip_hook_priorities {
NF_IP_PRI_FIRST = (-((int)(~0U>>1)) - 1),
NF_IP_PRI_CONNTRACK_DEFRAG = -400,
NF_IP_PRI_RAW = -300,
NF_IP_PRI_SELINUX_FIRST = -225,
NF_IP_PRI_CONNTRACK = -200,
NF_IP_PRI_MANGLE = -150,
NF_IP_PRI_NAT_DST = -100,
NF_IP_PRI_FILTER = 0,
NF_IP_PRI_SECURITY = 50,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
NF_IP_PRI_CONNTRACK_CONFIRM = ((int)(~0U>>1)),
NF_IP_PRI_LAST = ((int)(~0U>>1)),
};
# 77 "include/linux/netfilter_ipv4.h"
extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
extern int ip_xfrm_me_harder(struct sk_buff *skb);
extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
# 381 "include/linux/netfilter/x_tables.h" 2
struct xt_table_info {
unsigned int size;
unsigned int number;
unsigned int initial_entries;
unsigned int hook_entry[NF_INET_NUMHOOKS];
unsigned int underflow[NF_INET_NUMHOOKS];
unsigned int stacksize;
unsigned int *stackptr;
void ***jumpstack;
void *entries[1];
};
extern int xt_register_target(struct xt_target *target);
extern void xt_unregister_target(struct xt_target *target);
extern int xt_register_targets(struct xt_target *target, unsigned int n);
extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
extern int xt_register_match(struct xt_match *target);
extern void xt_unregister_match(struct xt_match *target);
extern int xt_register_matches(struct xt_match *match, unsigned int n);
extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
extern int xt_check_match(struct xt_mtchk_param *,
unsigned int size, u_int8_t proto, int inv_proto);
extern int xt_check_target(struct xt_tgchk_param *,
unsigned int size, u_int8_t proto, int inv_proto);
extern struct xt_table *xt_register_table(struct net *net,
const struct xt_table *table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo);
extern void *xt_unregister_table(struct xt_table *table);
extern struct xt_table_info *xt_replace_table(struct xt_table *table,
unsigned int num_counters,
struct xt_table_info *newinfo,
int *error);
extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
extern struct xt_match *xt_request_find_match(u8 af, const char *name,
u8 revision);
extern struct xt_target *xt_request_find_target(u8 af, const char *name,
u8 revision);
extern int xt_find_revision(u8 af, const char *name, u8 revision,
int target, int *err);
extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
const char *name);
extern void xt_table_unlock(struct xt_table *t);
extern int xt_proto_init(struct net *net, u_int8_t af);
extern void xt_proto_fini(struct net *net, u_int8_t af);
extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
extern void xt_free_table_info(struct xt_table_info *info);
# 469 "include/linux/netfilter/x_tables.h"
struct xt_info_lock {
spinlock_t lock;
unsigned char readers;
};
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct xt_info_lock) xt_info_locks;
# 488 "include/linux/netfilter/x_tables.h"
static inline __attribute__((always_inline)) void xt_info_rdlock_bh(void)
{
struct xt_info_lock *lock;
local_bh_disable();
lock = &(*({ do { const void *__vpp_verify = (typeof((&(xt_info_locks))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))); (typeof((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }));
if (__builtin_expect(!!(!lock->readers++), 1))
spin_lock(&lock->lock);
}
static inline __attribute__((always_inline)) void xt_info_rdunlock_bh(void)
{
struct xt_info_lock *lock = &(*({ do { const void *__vpp_verify = (typeof((&(xt_info_locks))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))); (typeof((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }));
if (__builtin_expect(!!(!--lock->readers), 1))
spin_unlock(&lock->lock);
local_bh_enable();
}
static inline __attribute__((always_inline)) void xt_info_wrlock(unsigned int cpu)
{
spin_lock(&(*({ do { const void *__vpp_verify = (typeof((&(xt_info_locks))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))); (typeof((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).lock);
}
static inline __attribute__((always_inline)) void xt_info_wrunlock(unsigned int cpu)
{
spin_unlock(&(*({ do { const void *__vpp_verify = (typeof((&(xt_info_locks))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))); (typeof((typeof(*(&(xt_info_locks))) *)(&(xt_info_locks)))) (__ptr + (((__per_cpu_offset[cpu])))); }); })).lock);
}
static inline __attribute__((always_inline)) unsigned long ifname_compare_aligned(const char *_a,
const char *_b,
const char *_mask)
{
const unsigned long *a = (const unsigned long *)_a;
const unsigned long *b = (const unsigned long *)_b;
const unsigned long *mask = (const unsigned long *)_mask;
unsigned long ret;
ret = (a[0] ^ b[0]) & mask[0];
if (16 > sizeof(unsigned long))
ret |= (a[1] ^ b[1]) & mask[1];
if (16 > 2 * sizeof(unsigned long))
ret |= (a[2] ^ b[2]) & mask[2];
if (16 > 3 * sizeof(unsigned long))
ret |= (a[3] ^ b[3]) & mask[3];
((void)(sizeof(struct { int:-!!(16 > 4 * sizeof(unsigned long)); })));
return ret;
}
extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
# 14 "include/net/netfilter/nf_conntrack_tuple.h" 2
# 1 "include/linux/netfilter/nf_conntrack_tuple_common.h" 1
enum ip_conntrack_dir {
IP_CT_DIR_ORIGINAL,
IP_CT_DIR_REPLY,
IP_CT_DIR_MAX
};
# 15 "include/net/netfilter/nf_conntrack_tuple.h" 2
# 29 "include/net/netfilter/nf_conntrack_tuple.h"
union nf_conntrack_man_proto {
__be16 all;
struct {
__be16 port;
} tcp;
struct {
__be16 port;
} udp;
struct {
__be16 id;
} icmp;
struct {
__be16 port;
} dccp;
struct {
__be16 port;
} sctp;
struct {
__be16 key;
} gre;
};
struct nf_conntrack_man {
union nf_inet_addr u3;
union nf_conntrack_man_proto u;
u_int16_t l3num;
};
struct nf_conntrack_tuple {
struct nf_conntrack_man src;
struct {
union nf_inet_addr u3;
union {
__be16 all;
struct {
__be16 port;
} tcp;
struct {
__be16 port;
} udp;
struct {
u_int8_t type, code;
} icmp;
struct {
__be16 port;
} dccp;
struct {
__be16 port;
} sctp;
struct {
__be16 key;
} gre;
} u;
u_int8_t protonum;
u_int8_t dir;
} dst;
};
struct nf_conntrack_tuple_mask {
struct {
union nf_inet_addr u3;
union nf_conntrack_man_proto u;
} src;
};
static inline __attribute__((always_inline)) void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t)
{
}
static inline __attribute__((always_inline)) void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t)
{
}
static inline __attribute__((always_inline)) void nf_ct_dump_tuple(const struct nf_conntrack_tuple *t)
{
switch (t->src.l3num) {
case 2:
nf_ct_dump_tuple_ip(t);
break;
case 10:
nf_ct_dump_tuple_ipv6(t);
break;
}
}
struct nf_conntrack_tuple_hash {
struct hlist_nulls_node hnnode;
struct nf_conntrack_tuple tuple;
};
static inline __attribute__((always_inline)) int __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) &&
t1->src.u.all == t2->src.u.all &&
t1->src.l3num == t2->src.l3num);
}
static inline __attribute__((always_inline)) int __nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return (nf_inet_addr_cmp(&t1->dst.u3, &t2->dst.u3) &&
t1->dst.u.all == t2->dst.u.all &&
t1->dst.protonum == t2->dst.protonum);
}
static inline __attribute__((always_inline)) int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
{
return __nf_ct_tuple_src_equal(t1, t2) &&
__nf_ct_tuple_dst_equal(t1, t2);
}
static inline __attribute__((always_inline)) int
nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
const struct nf_conntrack_tuple_mask *m2)
{
return (nf_inet_addr_cmp(&m1->src.u3, &m2->src.u3) &&
m1->src.u.all == m2->src.u.all);
}
static inline __attribute__((always_inline)) int
nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2,
const struct nf_conntrack_tuple_mask *mask)
{
int count;
for (count = 0; count < (sizeof(((union nf_inet_addr *)((void *)0))->all) / sizeof((((union nf_inet_addr *)((void *)0))->all)[0]) + (sizeof(struct { int:-!!(__builtin_types_compatible_p(typeof((((union nf_inet_addr *)((void *)0))->all)), typeof(&(((union nf_inet_addr *)((void *)0))->all)[0]))); }))); count++) {
if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
mask->src.u3.all[count])
return false;
}
if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
return false;
if (t1->src.l3num != t2->src.l3num ||
t1->dst.protonum != t2->dst.protonum)
return false;
return true;
}
static inline __attribute__((always_inline)) int
nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple_mask *mask)
{
return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
__nf_ct_tuple_dst_equal(t, tuple);
}
# 29 "include/linux/netfilter/nf_conntrack_dccp.h" 2
struct nf_ct_dccp {
u_int8_t role[IP_CT_DIR_MAX];
u_int8_t state;
u_int8_t last_pkt;
u_int8_t last_dir;
u_int64_t handshake_seq;
};
# 24 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_sctp.h" 1
enum sctp_conntrack {
SCTP_CONNTRACK_NONE,
SCTP_CONNTRACK_CLOSED,
SCTP_CONNTRACK_COOKIE_WAIT,
SCTP_CONNTRACK_COOKIE_ECHOED,
SCTP_CONNTRACK_ESTABLISHED,
SCTP_CONNTRACK_SHUTDOWN_SENT,
SCTP_CONNTRACK_SHUTDOWN_RECD,
SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
SCTP_CONNTRACK_MAX
};
struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
};
# 25 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_proto_gre.h" 1
# 28 "include/linux/netfilter/nf_conntrack_proto_gre.h"
struct gre_hdr {
# 40 "include/linux/netfilter/nf_conntrack_proto_gre.h"
__u16 csum:1,
routing:1,
key:1,
seq:1,
srr:1,
rec:3,
ack:1,
reserved:4,
version:3;
__be16 protocol;
};
struct gre_hdr_pptp {
__u8 flags;
__u8 version;
__be16 protocol;
__be16 payload_len;
__be16 call_id;
__be32 seq;
__be32 ack;
};
struct nf_ct_gre {
unsigned int stream_timeout;
unsigned int timeout;
};
struct nf_conn;
struct nf_ct_gre_keymap {
struct list_head list;
struct nf_conntrack_tuple tuple;
};
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
extern void nf_ct_gre_keymap_flush(struct net *net);
extern void nf_nat_need_gre(void);
# 26 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/net/netfilter/ipv6/nf_conntrack_icmpv6.h" 1
# 27 "include/net/netfilter/nf_conntrack.h" 2
union nf_conntrack_proto {
struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
struct nf_ct_gre gre;
};
union nf_conntrack_expect_proto {
};
# 1 "include/linux/netfilter/nf_conntrack_ftp.h" 1
enum nf_ct_ftp_type {
NF_CT_FTP_PORT,
NF_CT_FTP_PASV,
NF_CT_FTP_EPRT,
NF_CT_FTP_EPSV,
};
struct nf_ct_ftp_master {
u_int32_t seq_aft_nl[IP_CT_DIR_MAX][2];
int seq_aft_nl_num[IP_CT_DIR_MAX];
};
struct nf_conntrack_expect;
extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
enum nf_ct_ftp_type type,
unsigned int matchoff,
unsigned int matchlen,
struct nf_conntrack_expect *exp);
# 45 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_pptp.h" 1
extern const char *const pptp_msg_name[];
enum pptp_ctrlsess_state {
PPTP_SESSION_NONE,
PPTP_SESSION_ERROR,
PPTP_SESSION_STOPREQ,
PPTP_SESSION_REQUESTED,
PPTP_SESSION_CONFIRMED,
};
enum pptp_ctrlcall_state {
PPTP_CALL_NONE,
PPTP_CALL_ERROR,
PPTP_CALL_OUT_REQ,
PPTP_CALL_OUT_CONF,
PPTP_CALL_IN_REQ,
PPTP_CALL_IN_REP,
PPTP_CALL_IN_CONF,
PPTP_CALL_CLEAR_REQ,
};
struct nf_ct_pptp_master {
enum pptp_ctrlsess_state sstate;
enum pptp_ctrlcall_state cstate;
__be16 pac_call_id;
__be16 pns_call_id;
struct nf_ct_gre_keymap *keymap[IP_CT_DIR_MAX];
};
struct nf_nat_pptp {
__be16 pns_call_id;
__be16 pac_call_id;
};
# 56 "include/linux/netfilter/nf_conntrack_pptp.h"
struct pptp_pkt_hdr {
__u16 packetLength;
__be16 packetType;
__be32 magicCookie;
};
# 90 "include/linux/netfilter/nf_conntrack_pptp.h"
struct PptpControlHeader {
__be16 messageType;
__u16 reserved;
};
# 103 "include/linux/netfilter/nf_conntrack_pptp.h"
struct PptpStartSessionRequest {
__be16 protocolVersion;
__u16 reserved1;
__be32 framingCapability;
__be32 bearerCapability;
__be16 maxChannels;
__be16 firmwareRevision;
__u8 hostName[64];
__u8 vendorString[64];
};
# 121 "include/linux/netfilter/nf_conntrack_pptp.h"
struct PptpStartSessionReply {
__be16 protocolVersion;
__u8 resultCode;
__u8 generalErrorCode;
__be32 framingCapability;
__be32 bearerCapability;
__be16 maxChannels;
__be16 firmwareRevision;
__u8 hostName[64];
__u8 vendorString[64];
};
struct PptpStopSessionRequest {
__u8 reason;
__u8 reserved1;
__u16 reserved2;
};
struct PptpStopSessionReply {
__u8 resultCode;
__u8 generalErrorCode;
__u16 reserved1;
};
struct PptpEchoRequest {
__be32 identNumber;
};
struct PptpEchoReply {
__be32 identNumber;
__u8 resultCode;
__u8 generalErrorCode;
__u16 reserved;
};
# 179 "include/linux/netfilter/nf_conntrack_pptp.h"
struct PptpOutCallRequest {
__be16 callID;
__be16 callSerialNumber;
__be32 minBPS;
__be32 maxBPS;
__be32 bearerType;
__be32 framingType;
__be16 packetWindow;
__be16 packetProcDelay;
__be16 phoneNumberLength;
__u16 reserved1;
__u8 phoneNumber[64];
__u8 subAddress[64];
};
# 203 "include/linux/netfilter/nf_conntrack_pptp.h"
struct PptpOutCallReply {
__be16 callID;
__be16 peersCallID;
__u8 resultCode;
__u8 generalErrorCode;
__be16 causeCode;
__be32 connectSpeed;
__be16 packetWindow;
__be16 packetProcDelay;
__be32 physChannelID;
};
struct PptpInCallRequest {
__be16 callID;
__be16 callSerialNumber;
__be32 callBearerType;
__be32 physChannelID;
__be16 dialedNumberLength;
__be16 dialingNumberLength;
__u8 dialedNumber[64];
__u8 dialingNumber[64];
__u8 subAddress[64];
};
struct PptpInCallReply {
__be16 callID;
__be16 peersCallID;
__u8 resultCode;
__u8 generalErrorCode;
__be16 packetWindow;
__be16 packetProcDelay;
__u16 reserved;
};
struct PptpInCallConnected {
__be16 peersCallID;
__u16 reserved;
__be32 connectSpeed;
__be16 packetWindow;
__be16 packetProcDelay;
__be32 callFramingType;
};
struct PptpClearCallRequest {
__be16 callID;
__u16 reserved;
};
struct PptpCallDisconnectNotify {
__be16 callID;
__u8 resultCode;
__u8 generalErrorCode;
__be16 causeCode;
__u16 reserved;
__u8 callStatistics[128];
};
struct PptpWanErrorNotify {
__be16 peersCallID;
__u16 reserved;
__be32 crcErrors;
__be32 framingErrors;
__be32 hardwareOverRuns;
__be32 bufferOverRuns;
__be32 timeoutErrors;
__be32 alignmentErrors;
};
struct PptpSetLinkInfo {
__be16 peersCallID;
__u16 reserved;
__be32 sendAccm;
__be32 recvAccm;
};
union pptp_ctrl_union {
struct PptpStartSessionRequest sreq;
struct PptpStartSessionReply srep;
struct PptpStopSessionRequest streq;
struct PptpStopSessionReply strep;
struct PptpOutCallRequest ocreq;
struct PptpOutCallReply ocack;
struct PptpInCallRequest icreq;
struct PptpInCallReply icack;
struct PptpInCallConnected iccon;
struct PptpClearCallRequest clrreq;
struct PptpCallDisconnectNotify disc;
struct PptpWanErrorNotify wanerr;
struct PptpSetLinkInfo setlink;
};
struct nf_conn;
struct nf_conntrack_expect;
extern int
(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq);
extern int
(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
struct PptpControlHeader *ctlh,
union pptp_ctrl_union *pptpReq);
extern void
(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
struct nf_conntrack_expect *exp_reply);
extern void
(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
struct nf_conntrack_expect *exp);
# 46 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_h323.h" 1
# 1 "include/linux/netfilter/nf_conntrack_h323_asn1.h" 1
# 43 "include/linux/netfilter/nf_conntrack_h323_asn1.h"
# 1 "include/linux/netfilter/nf_conntrack_h323_types.h" 1
typedef struct TransportAddress_ipAddress {
int options;
unsigned ip;
} TransportAddress_ipAddress;
typedef struct TransportAddress_ip6Address {
int options;
unsigned ip;
} TransportAddress_ip6Address;
typedef struct TransportAddress {
enum {
eTransportAddress_ipAddress,
eTransportAddress_ipSourceRoute,
eTransportAddress_ipxAddress,
eTransportAddress_ip6Address,
eTransportAddress_netBios,
eTransportAddress_nsap,
eTransportAddress_nonStandardAddress,
} choice;
union {
TransportAddress_ipAddress ipAddress;
TransportAddress_ip6Address ip6Address;
};
} TransportAddress;
typedef struct DataProtocolCapability {
enum {
eDataProtocolCapability_nonStandard,
eDataProtocolCapability_v14buffered,
eDataProtocolCapability_v42lapm,
eDataProtocolCapability_hdlcFrameTunnelling,
eDataProtocolCapability_h310SeparateVCStack,
eDataProtocolCapability_h310SingleVCStack,
eDataProtocolCapability_transparent,
eDataProtocolCapability_segmentationAndReassembly,
eDataProtocolCapability_hdlcFrameTunnelingwSAR,
eDataProtocolCapability_v120,
eDataProtocolCapability_separateLANStack,
eDataProtocolCapability_v76wCompression,
eDataProtocolCapability_tcp,
eDataProtocolCapability_udp,
} choice;
} DataProtocolCapability;
typedef struct DataApplicationCapability_application {
enum {
eDataApplicationCapability_application_nonStandard,
eDataApplicationCapability_application_t120,
eDataApplicationCapability_application_dsm_cc,
eDataApplicationCapability_application_userData,
eDataApplicationCapability_application_t84,
eDataApplicationCapability_application_t434,
eDataApplicationCapability_application_h224,
eDataApplicationCapability_application_nlpid,
eDataApplicationCapability_application_dsvdControl,
eDataApplicationCapability_application_h222DataPartitioning,
eDataApplicationCapability_application_t30fax,
eDataApplicationCapability_application_t140,
eDataApplicationCapability_application_t38fax,
eDataApplicationCapability_application_genericDataCapability,
} choice;
union {
DataProtocolCapability t120;
};
} DataApplicationCapability_application;
typedef struct DataApplicationCapability {
int options;
DataApplicationCapability_application application;
} DataApplicationCapability;
typedef struct DataType {
enum {
eDataType_nonStandard,
eDataType_nullData,
eDataType_videoData,
eDataType_audioData,
eDataType_data,
eDataType_encryptionData,
eDataType_h235Control,
eDataType_h235Media,
eDataType_multiplexedStream,
} choice;
union {
DataApplicationCapability data;
};
} DataType;
typedef struct UnicastAddress_iPAddress {
int options;
unsigned network;
} UnicastAddress_iPAddress;
typedef struct UnicastAddress_iP6Address {
int options;
unsigned network;
} UnicastAddress_iP6Address;
typedef struct UnicastAddress {
enum {
eUnicastAddress_iPAddress,
eUnicastAddress_iPXAddress,
eUnicastAddress_iP6Address,
eUnicastAddress_netBios,
eUnicastAddress_iPSourceRouteAddress,
eUnicastAddress_nsap,
eUnicastAddress_nonStandardAddress,
} choice;
union {
UnicastAddress_iPAddress iPAddress;
UnicastAddress_iP6Address iP6Address;
};
} UnicastAddress;
typedef struct H245_TransportAddress {
enum {
eH245_TransportAddress_unicastAddress,
eH245_TransportAddress_multicastAddress,
} choice;
union {
UnicastAddress unicastAddress;
};
} H245_TransportAddress;
typedef struct H2250LogicalChannelParameters {
enum {
eH2250LogicalChannelParameters_nonStandard = (1 << 31),
eH2250LogicalChannelParameters_associatedSessionID =
(1 << 30),
eH2250LogicalChannelParameters_mediaChannel = (1 << 29),
eH2250LogicalChannelParameters_mediaGuaranteedDelivery =
(1 << 28),
eH2250LogicalChannelParameters_mediaControlChannel =
(1 << 27),
eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery
= (1 << 26),
eH2250LogicalChannelParameters_silenceSuppression = (1 << 25),
eH2250LogicalChannelParameters_destination = (1 << 24),
eH2250LogicalChannelParameters_dynamicRTPPayloadType =
(1 << 23),
eH2250LogicalChannelParameters_mediaPacketization = (1 << 22),
eH2250LogicalChannelParameters_transportCapability =
(1 << 21),
eH2250LogicalChannelParameters_redundancyEncoding = (1 << 20),
eH2250LogicalChannelParameters_source = (1 << 19),
} options;
H245_TransportAddress mediaChannel;
H245_TransportAddress mediaControlChannel;
} H2250LogicalChannelParameters;
typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters {
enum {
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none,
} choice;
union {
H2250LogicalChannelParameters h2250LogicalChannelParameters;
};
} OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters;
typedef struct OpenLogicalChannel_forwardLogicalChannelParameters {
enum {
eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber
= (1 << 31),
eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency
= (1 << 30),
eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor
= (1 << 29),
} options;
DataType dataType;
OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters
multiplexParameters;
} OpenLogicalChannel_forwardLogicalChannelParameters;
typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters {
enum {
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
} choice;
union {
H2250LogicalChannelParameters h2250LogicalChannelParameters;
};
} OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters;
typedef struct OpenLogicalChannel_reverseLogicalChannelParameters {
enum {
eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
= (1 << 31),
eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency
= (1 << 30),
eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor
= (1 << 29),
} options;
OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
multiplexParameters;
} OpenLogicalChannel_reverseLogicalChannelParameters;
typedef struct NetworkAccessParameters_networkAddress {
enum {
eNetworkAccessParameters_networkAddress_q2931Address,
eNetworkAccessParameters_networkAddress_e164Address,
eNetworkAccessParameters_networkAddress_localAreaAddress,
} choice;
union {
H245_TransportAddress localAreaAddress;
};
} NetworkAccessParameters_networkAddress;
typedef struct NetworkAccessParameters {
enum {
eNetworkAccessParameters_distribution = (1 << 31),
eNetworkAccessParameters_externalReference = (1 << 30),
eNetworkAccessParameters_t120SetupProcedure = (1 << 29),
} options;
NetworkAccessParameters_networkAddress networkAddress;
} NetworkAccessParameters;
typedef struct OpenLogicalChannel {
enum {
eOpenLogicalChannel_reverseLogicalChannelParameters =
(1 << 31),
eOpenLogicalChannel_separateStack = (1 << 30),
eOpenLogicalChannel_encryptionSync = (1 << 29),
} options;
OpenLogicalChannel_forwardLogicalChannelParameters
forwardLogicalChannelParameters;
OpenLogicalChannel_reverseLogicalChannelParameters
reverseLogicalChannelParameters;
NetworkAccessParameters separateStack;
} OpenLogicalChannel;
typedef struct Setup_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} Setup_UUIE_fastStart;
typedef struct Setup_UUIE {
enum {
eSetup_UUIE_h245Address = (1 << 31),
eSetup_UUIE_sourceAddress = (1 << 30),
eSetup_UUIE_destinationAddress = (1 << 29),
eSetup_UUIE_destCallSignalAddress = (1 << 28),
eSetup_UUIE_destExtraCallInfo = (1 << 27),
eSetup_UUIE_destExtraCRV = (1 << 26),
eSetup_UUIE_callServices = (1 << 25),
eSetup_UUIE_sourceCallSignalAddress = (1 << 24),
eSetup_UUIE_remoteExtensionAddress = (1 << 23),
eSetup_UUIE_callIdentifier = (1 << 22),
eSetup_UUIE_h245SecurityCapability = (1 << 21),
eSetup_UUIE_tokens = (1 << 20),
eSetup_UUIE_cryptoTokens = (1 << 19),
eSetup_UUIE_fastStart = (1 << 18),
eSetup_UUIE_mediaWaitForConnect = (1 << 17),
eSetup_UUIE_canOverlapSend = (1 << 16),
eSetup_UUIE_endpointIdentifier = (1 << 15),
eSetup_UUIE_multipleCalls = (1 << 14),
eSetup_UUIE_maintainConnection = (1 << 13),
eSetup_UUIE_connectionParameters = (1 << 12),
eSetup_UUIE_language = (1 << 11),
eSetup_UUIE_presentationIndicator = (1 << 10),
eSetup_UUIE_screeningIndicator = (1 << 9),
eSetup_UUIE_serviceControl = (1 << 8),
eSetup_UUIE_symmetricOperationRequired = (1 << 7),
eSetup_UUIE_capacity = (1 << 6),
eSetup_UUIE_circuitInfo = (1 << 5),
eSetup_UUIE_desiredProtocols = (1 << 4),
eSetup_UUIE_neededFeatures = (1 << 3),
eSetup_UUIE_desiredFeatures = (1 << 2),
eSetup_UUIE_supportedFeatures = (1 << 1),
eSetup_UUIE_parallelH245Control = (1 << 0),
} options;
TransportAddress h245Address;
TransportAddress destCallSignalAddress;
TransportAddress sourceCallSignalAddress;
Setup_UUIE_fastStart fastStart;
} Setup_UUIE;
typedef struct CallProceeding_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} CallProceeding_UUIE_fastStart;
typedef struct CallProceeding_UUIE {
enum {
eCallProceeding_UUIE_h245Address = (1 << 31),
eCallProceeding_UUIE_callIdentifier = (1 << 30),
eCallProceeding_UUIE_h245SecurityMode = (1 << 29),
eCallProceeding_UUIE_tokens = (1 << 28),
eCallProceeding_UUIE_cryptoTokens = (1 << 27),
eCallProceeding_UUIE_fastStart = (1 << 26),
eCallProceeding_UUIE_multipleCalls = (1 << 25),
eCallProceeding_UUIE_maintainConnection = (1 << 24),
eCallProceeding_UUIE_fastConnectRefused = (1 << 23),
eCallProceeding_UUIE_featureSet = (1 << 22),
} options;
TransportAddress h245Address;
CallProceeding_UUIE_fastStart fastStart;
} CallProceeding_UUIE;
typedef struct Connect_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} Connect_UUIE_fastStart;
typedef struct Connect_UUIE {
enum {
eConnect_UUIE_h245Address = (1 << 31),
eConnect_UUIE_callIdentifier = (1 << 30),
eConnect_UUIE_h245SecurityMode = (1 << 29),
eConnect_UUIE_tokens = (1 << 28),
eConnect_UUIE_cryptoTokens = (1 << 27),
eConnect_UUIE_fastStart = (1 << 26),
eConnect_UUIE_multipleCalls = (1 << 25),
eConnect_UUIE_maintainConnection = (1 << 24),
eConnect_UUIE_language = (1 << 23),
eConnect_UUIE_connectedAddress = (1 << 22),
eConnect_UUIE_presentationIndicator = (1 << 21),
eConnect_UUIE_screeningIndicator = (1 << 20),
eConnect_UUIE_fastConnectRefused = (1 << 19),
eConnect_UUIE_serviceControl = (1 << 18),
eConnect_UUIE_capacity = (1 << 17),
eConnect_UUIE_featureSet = (1 << 16),
} options;
TransportAddress h245Address;
Connect_UUIE_fastStart fastStart;
} Connect_UUIE;
typedef struct Alerting_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} Alerting_UUIE_fastStart;
typedef struct Alerting_UUIE {
enum {
eAlerting_UUIE_h245Address = (1 << 31),
eAlerting_UUIE_callIdentifier = (1 << 30),
eAlerting_UUIE_h245SecurityMode = (1 << 29),
eAlerting_UUIE_tokens = (1 << 28),
eAlerting_UUIE_cryptoTokens = (1 << 27),
eAlerting_UUIE_fastStart = (1 << 26),
eAlerting_UUIE_multipleCalls = (1 << 25),
eAlerting_UUIE_maintainConnection = (1 << 24),
eAlerting_UUIE_alertingAddress = (1 << 23),
eAlerting_UUIE_presentationIndicator = (1 << 22),
eAlerting_UUIE_screeningIndicator = (1 << 21),
eAlerting_UUIE_fastConnectRefused = (1 << 20),
eAlerting_UUIE_serviceControl = (1 << 19),
eAlerting_UUIE_capacity = (1 << 18),
eAlerting_UUIE_featureSet = (1 << 17),
} options;
TransportAddress h245Address;
Alerting_UUIE_fastStart fastStart;
} Alerting_UUIE;
typedef struct FacilityReason {
enum {
eFacilityReason_routeCallToGatekeeper,
eFacilityReason_callForwarded,
eFacilityReason_routeCallToMC,
eFacilityReason_undefinedReason,
eFacilityReason_conferenceListChoice,
eFacilityReason_startH245,
eFacilityReason_noH245,
eFacilityReason_newTokens,
eFacilityReason_featureSetUpdate,
eFacilityReason_forwardedElements,
eFacilityReason_transportedInformation,
} choice;
} FacilityReason;
typedef struct Facility_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} Facility_UUIE_fastStart;
typedef struct Facility_UUIE {
enum {
eFacility_UUIE_alternativeAddress = (1 << 31),
eFacility_UUIE_alternativeAliasAddress = (1 << 30),
eFacility_UUIE_conferenceID = (1 << 29),
eFacility_UUIE_callIdentifier = (1 << 28),
eFacility_UUIE_destExtraCallInfo = (1 << 27),
eFacility_UUIE_remoteExtensionAddress = (1 << 26),
eFacility_UUIE_tokens = (1 << 25),
eFacility_UUIE_cryptoTokens = (1 << 24),
eFacility_UUIE_conferences = (1 << 23),
eFacility_UUIE_h245Address = (1 << 22),
eFacility_UUIE_fastStart = (1 << 21),
eFacility_UUIE_multipleCalls = (1 << 20),
eFacility_UUIE_maintainConnection = (1 << 19),
eFacility_UUIE_fastConnectRefused = (1 << 18),
eFacility_UUIE_serviceControl = (1 << 17),
eFacility_UUIE_circuitInfo = (1 << 16),
eFacility_UUIE_featureSet = (1 << 15),
eFacility_UUIE_destinationInfo = (1 << 14),
eFacility_UUIE_h245SecurityMode = (1 << 13),
} options;
TransportAddress alternativeAddress;
FacilityReason reason;
TransportAddress h245Address;
Facility_UUIE_fastStart fastStart;
} Facility_UUIE;
typedef struct Progress_UUIE_fastStart {
int count;
OpenLogicalChannel item[30];
} Progress_UUIE_fastStart;
typedef struct Progress_UUIE {
enum {
eProgress_UUIE_h245Address = (1 << 31),
eProgress_UUIE_h245SecurityMode = (1 << 30),
eProgress_UUIE_tokens = (1 << 29),
eProgress_UUIE_cryptoTokens = (1 << 28),
eProgress_UUIE_fastStart = (1 << 27),
eProgress_UUIE_multipleCalls = (1 << 26),
eProgress_UUIE_maintainConnection = (1 << 25),
eProgress_UUIE_fastConnectRefused = (1 << 24),
} options;
TransportAddress h245Address;
Progress_UUIE_fastStart fastStart;
} Progress_UUIE;
typedef struct H323_UU_PDU_h323_message_body {
enum {
eH323_UU_PDU_h323_message_body_setup,
eH323_UU_PDU_h323_message_body_callProceeding,
eH323_UU_PDU_h323_message_body_connect,
eH323_UU_PDU_h323_message_body_alerting,
eH323_UU_PDU_h323_message_body_information,
eH323_UU_PDU_h323_message_body_releaseComplete,
eH323_UU_PDU_h323_message_body_facility,
eH323_UU_PDU_h323_message_body_progress,
eH323_UU_PDU_h323_message_body_empty,
eH323_UU_PDU_h323_message_body_status,
eH323_UU_PDU_h323_message_body_statusInquiry,
eH323_UU_PDU_h323_message_body_setupAcknowledge,
eH323_UU_PDU_h323_message_body_notify,
} choice;
union {
Setup_UUIE setup;
CallProceeding_UUIE callProceeding;
Connect_UUIE connect;
Alerting_UUIE alerting;
Facility_UUIE facility;
Progress_UUIE progress;
};
} H323_UU_PDU_h323_message_body;
typedef struct RequestMessage {
enum {
eRequestMessage_nonStandard,
eRequestMessage_masterSlaveDetermination,
eRequestMessage_terminalCapabilitySet,
eRequestMessage_openLogicalChannel,
eRequestMessage_closeLogicalChannel,
eRequestMessage_requestChannelClose,
eRequestMessage_multiplexEntrySend,
eRequestMessage_requestMultiplexEntry,
eRequestMessage_requestMode,
eRequestMessage_roundTripDelayRequest,
eRequestMessage_maintenanceLoopRequest,
eRequestMessage_communicationModeRequest,
eRequestMessage_conferenceRequest,
eRequestMessage_multilinkRequest,
eRequestMessage_logicalChannelRateRequest,
} choice;
union {
OpenLogicalChannel openLogicalChannel;
};
} RequestMessage;
typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters {
enum {
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
} choice;
union {
H2250LogicalChannelParameters h2250LogicalChannelParameters;
};
} OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters;
typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters {
enum {
eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber
= (1 << 31),
eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
= (1 << 30),
eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor
= (1 << 29),
} options;
OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
multiplexParameters;
} OpenLogicalChannelAck_reverseLogicalChannelParameters;
typedef struct H2250LogicalChannelAckParameters {
enum {
eH2250LogicalChannelAckParameters_nonStandard = (1 << 31),
eH2250LogicalChannelAckParameters_sessionID = (1 << 30),
eH2250LogicalChannelAckParameters_mediaChannel = (1 << 29),
eH2250LogicalChannelAckParameters_mediaControlChannel =
(1 << 28),
eH2250LogicalChannelAckParameters_dynamicRTPPayloadType =
(1 << 27),
eH2250LogicalChannelAckParameters_flowControlToZero =
(1 << 26),
eH2250LogicalChannelAckParameters_portNumber = (1 << 25),
} options;
H245_TransportAddress mediaChannel;
H245_TransportAddress mediaControlChannel;
} H2250LogicalChannelAckParameters;
typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters {
enum {
eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters,
} choice;
union {
H2250LogicalChannelAckParameters
h2250LogicalChannelAckParameters;
};
} OpenLogicalChannelAck_forwardMultiplexAckParameters;
typedef struct OpenLogicalChannelAck {
enum {
eOpenLogicalChannelAck_reverseLogicalChannelParameters =
(1 << 31),
eOpenLogicalChannelAck_separateStack = (1 << 30),
eOpenLogicalChannelAck_forwardMultiplexAckParameters =
(1 << 29),
eOpenLogicalChannelAck_encryptionSync = (1 << 28),
} options;
OpenLogicalChannelAck_reverseLogicalChannelParameters
reverseLogicalChannelParameters;
NetworkAccessParameters separateStack;
OpenLogicalChannelAck_forwardMultiplexAckParameters
forwardMultiplexAckParameters;
} OpenLogicalChannelAck;
typedef struct ResponseMessage {
enum {
eResponseMessage_nonStandard,
eResponseMessage_masterSlaveDeterminationAck,
eResponseMessage_masterSlaveDeterminationReject,
eResponseMessage_terminalCapabilitySetAck,
eResponseMessage_terminalCapabilitySetReject,
eResponseMessage_openLogicalChannelAck,
eResponseMessage_openLogicalChannelReject,
eResponseMessage_closeLogicalChannelAck,
eResponseMessage_requestChannelCloseAck,
eResponseMessage_requestChannelCloseReject,
eResponseMessage_multiplexEntrySendAck,
eResponseMessage_multiplexEntrySendReject,
eResponseMessage_requestMultiplexEntryAck,
eResponseMessage_requestMultiplexEntryReject,
eResponseMessage_requestModeAck,
eResponseMessage_requestModeReject,
eResponseMessage_roundTripDelayResponse,
eResponseMessage_maintenanceLoopAck,
eResponseMessage_maintenanceLoopReject,
eResponseMessage_communicationModeResponse,
eResponseMessage_conferenceResponse,
eResponseMessage_multilinkResponse,
eResponseMessage_logicalChannelRateAcknowledge,
eResponseMessage_logicalChannelRateReject,
} choice;
union {
OpenLogicalChannelAck openLogicalChannelAck;
};
} ResponseMessage;
typedef struct MultimediaSystemControlMessage {
enum {
eMultimediaSystemControlMessage_request,
eMultimediaSystemControlMessage_response,
eMultimediaSystemControlMessage_command,
eMultimediaSystemControlMessage_indication,
} choice;
union {
RequestMessage request;
ResponseMessage response;
};
} MultimediaSystemControlMessage;
typedef struct H323_UU_PDU_h245Control {
int count;
MultimediaSystemControlMessage item[4];
} H323_UU_PDU_h245Control;
typedef struct H323_UU_PDU {
enum {
eH323_UU_PDU_nonStandardData = (1 << 31),
eH323_UU_PDU_h4501SupplementaryService = (1 << 30),
eH323_UU_PDU_h245Tunneling = (1 << 29),
eH323_UU_PDU_h245Control = (1 << 28),
eH323_UU_PDU_nonStandardControl = (1 << 27),
eH323_UU_PDU_callLinkage = (1 << 26),
eH323_UU_PDU_tunnelledSignallingMessage = (1 << 25),
eH323_UU_PDU_provisionalRespToH245Tunneling = (1 << 24),
eH323_UU_PDU_stimulusControl = (1 << 23),
eH323_UU_PDU_genericData = (1 << 22),
} options;
H323_UU_PDU_h323_message_body h323_message_body;
H323_UU_PDU_h245Control h245Control;
} H323_UU_PDU;
typedef struct H323_UserInformation {
enum {
eH323_UserInformation_user_data = (1 << 31),
} options;
H323_UU_PDU h323_uu_pdu;
} H323_UserInformation;
typedef struct GatekeeperRequest {
enum {
eGatekeeperRequest_nonStandardData = (1 << 31),
eGatekeeperRequest_gatekeeperIdentifier = (1 << 30),
eGatekeeperRequest_callServices = (1 << 29),
eGatekeeperRequest_endpointAlias = (1 << 28),
eGatekeeperRequest_alternateEndpoints = (1 << 27),
eGatekeeperRequest_tokens = (1 << 26),
eGatekeeperRequest_cryptoTokens = (1 << 25),
eGatekeeperRequest_authenticationCapability = (1 << 24),
eGatekeeperRequest_algorithmOIDs = (1 << 23),
eGatekeeperRequest_integrity = (1 << 22),
eGatekeeperRequest_integrityCheckValue = (1 << 21),
eGatekeeperRequest_supportsAltGK = (1 << 20),
eGatekeeperRequest_featureSet = (1 << 19),
eGatekeeperRequest_genericData = (1 << 18),
} options;
TransportAddress rasAddress;
} GatekeeperRequest;
typedef struct GatekeeperConfirm {
enum {
eGatekeeperConfirm_nonStandardData = (1 << 31),
eGatekeeperConfirm_gatekeeperIdentifier = (1 << 30),
eGatekeeperConfirm_alternateGatekeeper = (1 << 29),
eGatekeeperConfirm_authenticationMode = (1 << 28),
eGatekeeperConfirm_tokens = (1 << 27),
eGatekeeperConfirm_cryptoTokens = (1 << 26),
eGatekeeperConfirm_algorithmOID = (1 << 25),
eGatekeeperConfirm_integrity = (1 << 24),
eGatekeeperConfirm_integrityCheckValue = (1 << 23),
eGatekeeperConfirm_featureSet = (1 << 22),
eGatekeeperConfirm_genericData = (1 << 21),
} options;
TransportAddress rasAddress;
} GatekeeperConfirm;
typedef struct RegistrationRequest_callSignalAddress {
int count;
TransportAddress item[10];
} RegistrationRequest_callSignalAddress;
typedef struct RegistrationRequest_rasAddress {
int count;
TransportAddress item[10];
} RegistrationRequest_rasAddress;
typedef struct RegistrationRequest {
enum {
eRegistrationRequest_nonStandardData = (1 << 31),
eRegistrationRequest_terminalAlias = (1 << 30),
eRegistrationRequest_gatekeeperIdentifier = (1 << 29),
eRegistrationRequest_alternateEndpoints = (1 << 28),
eRegistrationRequest_timeToLive = (1 << 27),
eRegistrationRequest_tokens = (1 << 26),
eRegistrationRequest_cryptoTokens = (1 << 25),
eRegistrationRequest_integrityCheckValue = (1 << 24),
eRegistrationRequest_keepAlive = (1 << 23),
eRegistrationRequest_endpointIdentifier = (1 << 22),
eRegistrationRequest_willSupplyUUIEs = (1 << 21),
eRegistrationRequest_maintainConnection = (1 << 20),
eRegistrationRequest_alternateTransportAddresses = (1 << 19),
eRegistrationRequest_additiveRegistration = (1 << 18),
eRegistrationRequest_terminalAliasPattern = (1 << 17),
eRegistrationRequest_supportsAltGK = (1 << 16),
eRegistrationRequest_usageReportingCapability = (1 << 15),
eRegistrationRequest_multipleCalls = (1 << 14),
eRegistrationRequest_supportedH248Packages = (1 << 13),
eRegistrationRequest_callCreditCapability = (1 << 12),
eRegistrationRequest_capacityReportingCapability = (1 << 11),
eRegistrationRequest_capacity = (1 << 10),
eRegistrationRequest_featureSet = (1 << 9),
eRegistrationRequest_genericData = (1 << 8),
} options;
RegistrationRequest_callSignalAddress callSignalAddress;
RegistrationRequest_rasAddress rasAddress;
unsigned timeToLive;
} RegistrationRequest;
typedef struct RegistrationConfirm_callSignalAddress {
int count;
TransportAddress item[10];
} RegistrationConfirm_callSignalAddress;
typedef struct RegistrationConfirm {
enum {
eRegistrationConfirm_nonStandardData = (1 << 31),
eRegistrationConfirm_terminalAlias = (1 << 30),
eRegistrationConfirm_gatekeeperIdentifier = (1 << 29),
eRegistrationConfirm_alternateGatekeeper = (1 << 28),
eRegistrationConfirm_timeToLive = (1 << 27),
eRegistrationConfirm_tokens = (1 << 26),
eRegistrationConfirm_cryptoTokens = (1 << 25),
eRegistrationConfirm_integrityCheckValue = (1 << 24),
eRegistrationConfirm_willRespondToIRR = (1 << 23),
eRegistrationConfirm_preGrantedARQ = (1 << 22),
eRegistrationConfirm_maintainConnection = (1 << 21),
eRegistrationConfirm_serviceControl = (1 << 20),
eRegistrationConfirm_supportsAdditiveRegistration = (1 << 19),
eRegistrationConfirm_terminalAliasPattern = (1 << 18),
eRegistrationConfirm_supportedPrefixes = (1 << 17),
eRegistrationConfirm_usageSpec = (1 << 16),
eRegistrationConfirm_featureServerAlias = (1 << 15),
eRegistrationConfirm_capacityReportingSpec = (1 << 14),
eRegistrationConfirm_featureSet = (1 << 13),
eRegistrationConfirm_genericData = (1 << 12),
} options;
RegistrationConfirm_callSignalAddress callSignalAddress;
unsigned timeToLive;
} RegistrationConfirm;
typedef struct UnregistrationRequest_callSignalAddress {
int count;
TransportAddress item[10];
} UnregistrationRequest_callSignalAddress;
typedef struct UnregistrationRequest {
enum {
eUnregistrationRequest_endpointAlias = (1 << 31),
eUnregistrationRequest_nonStandardData = (1 << 30),
eUnregistrationRequest_endpointIdentifier = (1 << 29),
eUnregistrationRequest_alternateEndpoints = (1 << 28),
eUnregistrationRequest_gatekeeperIdentifier = (1 << 27),
eUnregistrationRequest_tokens = (1 << 26),
eUnregistrationRequest_cryptoTokens = (1 << 25),
eUnregistrationRequest_integrityCheckValue = (1 << 24),
eUnregistrationRequest_reason = (1 << 23),
eUnregistrationRequest_endpointAliasPattern = (1 << 22),
eUnregistrationRequest_supportedPrefixes = (1 << 21),
eUnregistrationRequest_alternateGatekeeper = (1 << 20),
eUnregistrationRequest_genericData = (1 << 19),
} options;
UnregistrationRequest_callSignalAddress callSignalAddress;
} UnregistrationRequest;
typedef struct AdmissionRequest {
enum {
eAdmissionRequest_callModel = (1 << 31),
eAdmissionRequest_destinationInfo = (1 << 30),
eAdmissionRequest_destCallSignalAddress = (1 << 29),
eAdmissionRequest_destExtraCallInfo = (1 << 28),
eAdmissionRequest_srcCallSignalAddress = (1 << 27),
eAdmissionRequest_nonStandardData = (1 << 26),
eAdmissionRequest_callServices = (1 << 25),
eAdmissionRequest_canMapAlias = (1 << 24),
eAdmissionRequest_callIdentifier = (1 << 23),
eAdmissionRequest_srcAlternatives = (1 << 22),
eAdmissionRequest_destAlternatives = (1 << 21),
eAdmissionRequest_gatekeeperIdentifier = (1 << 20),
eAdmissionRequest_tokens = (1 << 19),
eAdmissionRequest_cryptoTokens = (1 << 18),
eAdmissionRequest_integrityCheckValue = (1 << 17),
eAdmissionRequest_transportQOS = (1 << 16),
eAdmissionRequest_willSupplyUUIEs = (1 << 15),
eAdmissionRequest_callLinkage = (1 << 14),
eAdmissionRequest_gatewayDataRate = (1 << 13),
eAdmissionRequest_capacity = (1 << 12),
eAdmissionRequest_circuitInfo = (1 << 11),
eAdmissionRequest_desiredProtocols = (1 << 10),
eAdmissionRequest_desiredTunnelledProtocol = (1 << 9),
eAdmissionRequest_featureSet = (1 << 8),
eAdmissionRequest_genericData = (1 << 7),
} options;
TransportAddress destCallSignalAddress;
TransportAddress srcCallSignalAddress;
} AdmissionRequest;
typedef struct AdmissionConfirm {
enum {
eAdmissionConfirm_irrFrequency = (1 << 31),
eAdmissionConfirm_nonStandardData = (1 << 30),
eAdmissionConfirm_destinationInfo = (1 << 29),
eAdmissionConfirm_destExtraCallInfo = (1 << 28),
eAdmissionConfirm_destinationType = (1 << 27),
eAdmissionConfirm_remoteExtensionAddress = (1 << 26),
eAdmissionConfirm_alternateEndpoints = (1 << 25),
eAdmissionConfirm_tokens = (1 << 24),
eAdmissionConfirm_cryptoTokens = (1 << 23),
eAdmissionConfirm_integrityCheckValue = (1 << 22),
eAdmissionConfirm_transportQOS = (1 << 21),
eAdmissionConfirm_willRespondToIRR = (1 << 20),
eAdmissionConfirm_uuiesRequested = (1 << 19),
eAdmissionConfirm_language = (1 << 18),
eAdmissionConfirm_alternateTransportAddresses = (1 << 17),
eAdmissionConfirm_useSpecifiedTransport = (1 << 16),
eAdmissionConfirm_circuitInfo = (1 << 15),
eAdmissionConfirm_usageSpec = (1 << 14),
eAdmissionConfirm_supportedProtocols = (1 << 13),
eAdmissionConfirm_serviceControl = (1 << 12),
eAdmissionConfirm_multipleCalls = (1 << 11),
eAdmissionConfirm_featureSet = (1 << 10),
eAdmissionConfirm_genericData = (1 << 9),
} options;
TransportAddress destCallSignalAddress;
} AdmissionConfirm;
typedef struct LocationRequest {
enum {
eLocationRequest_endpointIdentifier = (1 << 31),
eLocationRequest_nonStandardData = (1 << 30),
eLocationRequest_sourceInfo = (1 << 29),
eLocationRequest_canMapAlias = (1 << 28),
eLocationRequest_gatekeeperIdentifier = (1 << 27),
eLocationRequest_tokens = (1 << 26),
eLocationRequest_cryptoTokens = (1 << 25),
eLocationRequest_integrityCheckValue = (1 << 24),
eLocationRequest_desiredProtocols = (1 << 23),
eLocationRequest_desiredTunnelledProtocol = (1 << 22),
eLocationRequest_featureSet = (1 << 21),
eLocationRequest_genericData = (1 << 20),
eLocationRequest_hopCount = (1 << 19),
eLocationRequest_circuitInfo = (1 << 18),
} options;
TransportAddress replyAddress;
} LocationRequest;
typedef struct LocationConfirm {
enum {
eLocationConfirm_nonStandardData = (1 << 31),
eLocationConfirm_destinationInfo = (1 << 30),
eLocationConfirm_destExtraCallInfo = (1 << 29),
eLocationConfirm_destinationType = (1 << 28),
eLocationConfirm_remoteExtensionAddress = (1 << 27),
eLocationConfirm_alternateEndpoints = (1 << 26),
eLocationConfirm_tokens = (1 << 25),
eLocationConfirm_cryptoTokens = (1 << 24),
eLocationConfirm_integrityCheckValue = (1 << 23),
eLocationConfirm_alternateTransportAddresses = (1 << 22),
eLocationConfirm_supportedProtocols = (1 << 21),
eLocationConfirm_multipleCalls = (1 << 20),
eLocationConfirm_featureSet = (1 << 19),
eLocationConfirm_genericData = (1 << 18),
eLocationConfirm_circuitInfo = (1 << 17),
eLocationConfirm_serviceControl = (1 << 16),
} options;
TransportAddress callSignalAddress;
TransportAddress rasAddress;
} LocationConfirm;
typedef struct InfoRequestResponse_callSignalAddress {
int count;
TransportAddress item[10];
} InfoRequestResponse_callSignalAddress;
typedef struct InfoRequestResponse {
enum {
eInfoRequestResponse_nonStandardData = (1 << 31),
eInfoRequestResponse_endpointAlias = (1 << 30),
eInfoRequestResponse_perCallInfo = (1 << 29),
eInfoRequestResponse_tokens = (1 << 28),
eInfoRequestResponse_cryptoTokens = (1 << 27),
eInfoRequestResponse_integrityCheckValue = (1 << 26),
eInfoRequestResponse_needResponse = (1 << 25),
eInfoRequestResponse_capacity = (1 << 24),
eInfoRequestResponse_irrStatus = (1 << 23),
eInfoRequestResponse_unsolicited = (1 << 22),
eInfoRequestResponse_genericData = (1 << 21),
} options;
TransportAddress rasAddress;
InfoRequestResponse_callSignalAddress callSignalAddress;
} InfoRequestResponse;
typedef struct RasMessage {
enum {
eRasMessage_gatekeeperRequest,
eRasMessage_gatekeeperConfirm,
eRasMessage_gatekeeperReject,
eRasMessage_registrationRequest,
eRasMessage_registrationConfirm,
eRasMessage_registrationReject,
eRasMessage_unregistrationRequest,
eRasMessage_unregistrationConfirm,
eRasMessage_unregistrationReject,
eRasMessage_admissionRequest,
eRasMessage_admissionConfirm,
eRasMessage_admissionReject,
eRasMessage_bandwidthRequest,
eRasMessage_bandwidthConfirm,
eRasMessage_bandwidthReject,
eRasMessage_disengageRequest,
eRasMessage_disengageConfirm,
eRasMessage_disengageReject,
eRasMessage_locationRequest,
eRasMessage_locationConfirm,
eRasMessage_locationReject,
eRasMessage_infoRequest,
eRasMessage_infoRequestResponse,
eRasMessage_nonStandardMessage,
eRasMessage_unknownMessageResponse,
eRasMessage_requestInProgress,
eRasMessage_resourcesAvailableIndicate,
eRasMessage_resourcesAvailableConfirm,
eRasMessage_infoRequestAck,
eRasMessage_infoRequestNak,
eRasMessage_serviceControlIndication,
eRasMessage_serviceControlResponse,
} choice;
union {
GatekeeperRequest gatekeeperRequest;
GatekeeperConfirm gatekeeperConfirm;
RegistrationRequest registrationRequest;
RegistrationConfirm registrationConfirm;
UnregistrationRequest unregistrationRequest;
AdmissionRequest admissionRequest;
AdmissionConfirm admissionConfirm;
LocationRequest locationRequest;
LocationConfirm locationConfirm;
InfoRequestResponse infoRequestResponse;
};
} RasMessage;
# 44 "include/linux/netfilter/nf_conntrack_h323_asn1.h" 2
typedef struct {
enum {
Q931_NationalEscape = 0x00,
Q931_Alerting = 0x01,
Q931_CallProceeding = 0x02,
Q931_Connect = 0x07,
Q931_ConnectAck = 0x0F,
Q931_Progress = 0x03,
Q931_Setup = 0x05,
Q931_SetupAck = 0x0D,
Q931_Resume = 0x26,
Q931_ResumeAck = 0x2E,
Q931_ResumeReject = 0x22,
Q931_Suspend = 0x25,
Q931_SuspendAck = 0x2D,
Q931_SuspendReject = 0x21,
Q931_UserInformation = 0x20,
Q931_Disconnect = 0x45,
Q931_Release = 0x4D,
Q931_ReleaseComplete = 0x5A,
Q931_Restart = 0x46,
Q931_RestartAck = 0x4E,
Q931_Segment = 0x60,
Q931_CongestionCtrl = 0x79,
Q931_Information = 0x7B,
Q931_Notify = 0x6E,
Q931_Status = 0x7D,
Q931_StatusEnquiry = 0x75,
Q931_Facility = 0x62
} MessageType;
H323_UserInformation UUIE;
} Q931;
# 92 "include/linux/netfilter/nf_conntrack_h323_asn1.h"
int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras);
int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931);
int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
MultimediaSystemControlMessage *
mscm);
# 7 "include/linux/netfilter/nf_conntrack_h323.h" 2
struct nf_ct_h323_master {
__be16 sig_port[IP_CT_DIR_MAX];
__be16 rtp_port[4][IP_CT_DIR_MAX];
union {
u_int32_t timeout;
u_int16_t tpkt_len[IP_CT_DIR_MAX];
};
};
struct nf_conn;
extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
TransportAddress *taddr,
union nf_inet_addr *addr, __be16 *port);
extern void nf_conntrack_h245_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
extern void nf_conntrack_q931_expect(struct nf_conn *new,
struct nf_conntrack_expect *this);
extern int (*set_h245_addr_hook) (struct sk_buff *skb,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
union nf_inet_addr *addr,
__be16 port);
extern int (*set_h225_addr_hook) (struct sk_buff *skb,
unsigned char **data, int dataoff,
TransportAddress *taddr,
union nf_inet_addr *addr,
__be16 port);
extern int (*set_sig_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data,
TransportAddress *taddr, int count);
extern int (*set_ras_addr_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data,
TransportAddress *taddr, int count);
extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr,
__be16 port, __be16 rtp_port,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp);
extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
H245_TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, int dataoff,
TransportAddress *taddr,
__be16 port,
struct nf_conntrack_expect *exp);
extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, TransportAddress *taddr,
int idx, __be16 port,
struct nf_conntrack_expect *exp);
# 47 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_sane.h" 1
# 9 "include/linux/netfilter/nf_conntrack_sane.h"
enum sane_state {
SANE_STATE_NORMAL,
SANE_STATE_START_REQUESTED,
};
struct nf_ct_sane_master {
enum sane_state state;
};
# 48 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/linux/netfilter/nf_conntrack_sip.h" 1
struct nf_ct_sip_master {
unsigned int register_cseq;
unsigned int invite_cseq;
};
enum sip_expectation_classes {
SIP_EXPECT_SIGNALLING,
SIP_EXPECT_AUDIO,
SIP_EXPECT_VIDEO,
SIP_EXPECT_IMAGE,
__SIP_EXPECT_MAX
};
struct sdp_media_type {
const char *name;
unsigned int len;
enum sip_expectation_classes class;
};
# 35 "include/linux/netfilter/nf_conntrack_sip.h"
struct sip_handler {
const char *method;
unsigned int len;
int (*request)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq);
int (*response)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code);
};
# 54 "include/linux/netfilter/nf_conntrack_sip.h"
struct sip_header {
const char *name;
const char *cname;
const char *search;
unsigned int len;
unsigned int clen;
unsigned int slen;
int (*match_len)(const struct nf_conn *ct,
const char *dptr, const char *limit,
int *shift);
};
# 83 "include/linux/netfilter/nf_conntrack_sip.h"
enum sip_header_types {
SIP_HDR_CSEQ,
SIP_HDR_FROM,
SIP_HDR_TO,
SIP_HDR_CONTACT,
SIP_HDR_VIA_UDP,
SIP_HDR_VIA_TCP,
SIP_HDR_EXPIRES,
SIP_HDR_CONTENT_LENGTH,
};
enum sdp_header_types {
SDP_HDR_UNSPEC,
SDP_HDR_VERSION,
SDP_HDR_OWNER_IP4,
SDP_HDR_CONNECTION_IP4,
SDP_HDR_OWNER_IP6,
SDP_HDR_CONNECTION_IP6,
SDP_HDR_MEDIA,
};
extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen);
extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off);
extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *exp,
unsigned int matchoff,
unsigned int matchlen);
extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
const union nf_inet_addr *addr);
extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
unsigned int matchoff,
unsigned int matchlen,
u_int16_t port);
extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
unsigned int sdpoff,
const union nf_inet_addr *addr);
extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp,
unsigned int mediaoff,
unsigned int medialen,
union nf_inet_addr *rtp_addr);
extern int ct_sip_parse_request(const struct nf_conn *ct,
const char *dptr, unsigned int datalen,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port);
extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sip_header_types type,
unsigned int *matchoff, unsigned int *matchlen);
extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
unsigned int *dataoff, unsigned int datalen,
enum sip_header_types type, int *in_header,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr, __be16 *port);
extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
union nf_inet_addr *addr);
extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
unsigned int off, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchen,
unsigned int *val);
extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
enum sdp_header_types type,
enum sdp_header_types term,
unsigned int *matchoff, unsigned int *matchlen);
# 49 "include/net/netfilter/nf_conntrack.h" 2
union nf_conntrack_help {
struct nf_ct_ftp_master ct_ftp_info;
struct nf_ct_pptp_master ct_pptp_info;
struct nf_ct_h323_master ct_h323_info;
struct nf_ct_sane_master ct_sane_info;
struct nf_ct_sip_master ct_sip_info;
};
# 70 "include/net/netfilter/nf_conntrack.h"
struct nf_conntrack_helper;
struct nf_conn_help {
struct nf_conntrack_helper *helper;
union nf_conntrack_help help;
struct hlist_head expectations;
u8 expecting[4];
};
# 1 "include/net/netfilter/ipv4/nf_conntrack_ipv4.h" 1
# 13 "include/net/netfilter/ipv4/nf_conntrack_ipv4.h"
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
extern int nf_conntrack_ipv4_compat_init(void);
extern void nf_conntrack_ipv4_compat_fini(void);
extern void need_ipv4_conntrack(void);
# 89 "include/net/netfilter/nf_conntrack.h" 2
# 1 "include/net/netfilter/ipv6/nf_conntrack_ipv6.h" 1
extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
extern int nf_ct_frag6_init(void);
extern void nf_ct_frag6_cleanup(void);
extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
struct net_device *in,
struct net_device *out,
int (*okfn)(struct sk_buff *));
struct inet_frags_ctl;
extern struct ctl_table nf_ct_ipv6_sysctl_table[];
# 90 "include/net/netfilter/nf_conntrack.h" 2
struct nf_conn {
struct nf_conntrack ct_general;
spinlock_t lock;
struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
unsigned long status;
struct nf_conn *master;
struct timer_list timeout;
u_int32_t mark;
struct {
char *app_proto;
char *app_data;
unsigned int app_data_len;
} layer7;
union nf_conntrack_proto proto;
struct nf_ct_ext *ext;
uint32_t layer7_id;
void *dpi_context;
};
static inline __attribute__((always_inline)) struct nf_conn *
nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
{
return ({ const typeof( ((struct nf_conn *)0)->tuplehash[hash->tuple.dst.dir] ) *__mptr = (hash); (struct nf_conn *)( (char *)__mptr - __builtin_offsetof(struct nf_conn,tuplehash[hash->tuple.dst.dir]) );})
;
}
static inline __attribute__((always_inline)) u_int16_t nf_ct_l3num(const struct nf_conn *ct)
{
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
}
static inline __attribute__((always_inline)) u_int8_t nf_ct_protonum(const struct nf_conn *ct)
{
return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
}
extern struct net init_net;
static inline __attribute__((always_inline)) struct net *nf_ct_net(const struct nf_conn *ct)
{
return (&init_net);
}
extern void
nf_conntrack_alter_reply(struct nf_conn *ct,
const struct nf_conntrack_tuple *newreply);
extern int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack);
static inline __attribute__((always_inline)) struct nf_conn *
nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
{
*ctinfo = skb->nfctinfo;
return (struct nf_conn *)skb->nfct;
}
static inline __attribute__((always_inline)) void nf_ct_put(struct nf_conn *ct)
{
;
nf_conntrack_put(&ct->ct_general);
}
extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
extern void nf_ct_l3proto_module_put(unsigned short l3proto);
extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
extern void nf_conntrack_hash_insert(struct nf_conn *ct);
extern void nf_ct_delete_from_lists(struct nf_conn *ct);
extern void nf_ct_insert_dying_list(struct nf_conn *ct);
extern void nf_conntrack_flush_report(struct net *net, u32 pid, int report);
extern int nf_ct_get_tuplepr(const struct sk_buff *skb,
unsigned int nhoff, u_int16_t l3num,
struct nf_conntrack_tuple *tuple);
extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
extern void __nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
unsigned long extra_jiffies,
int do_acct);
static inline __attribute__((always_inline)) void nf_ct_refresh_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
unsigned long extra_jiffies)
{
__nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
}
static inline __attribute__((always_inline)) void nf_ct_refresh(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned long extra_jiffies)
{
__nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
}
extern int __nf_ct_kill_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb,
int do_acct);
static inline __attribute__((always_inline)) int nf_ct_kill_acct(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct sk_buff *skb)
{
return __nf_ct_kill_acct(ct, ctinfo, skb, 1);
}
static inline __attribute__((always_inline)) int nf_ct_kill(struct nf_conn *ct)
{
return __nf_ct_kill_acct(ct, 0, ((void *)0), 0);
}
extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
extern __attribute__((section(".data..percpu" ""))) __typeof__(struct nf_conn) nf_conntrack_untracked;
static inline __attribute__((always_inline)) struct nf_conn *nf_ct_untracked_get(void)
{
return &(*({ do { const void *__vpp_verify = (typeof((&(nf_conntrack_untracked))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(nf_conntrack_untracked))) *)(&(nf_conntrack_untracked)))); (typeof((typeof(*(&(nf_conntrack_untracked))) *)(&(nf_conntrack_untracked)))) (__ptr + (((__per_cpu_offset[(__current_thread_info->cpu)])))); }); }));
}
extern void nf_ct_untracked_status_or(unsigned long bits);
extern void
nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net, u16 zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp);
static inline __attribute__((always_inline)) int nf_ct_is_template(const struct nf_conn *ct)
{
return test_bit(IPS_TEMPLATE_BIT, &ct->status);
}
static inline __attribute__((always_inline)) int nf_ct_is_confirmed(struct nf_conn *ct)
{
return test_bit(IPS_CONFIRMED_BIT, &ct->status);
}
static inline __attribute__((always_inline)) int nf_ct_is_dying(struct nf_conn *ct)
{
return test_bit(IPS_DYING_BIT, &ct->status);
}
static inline __attribute__((always_inline)) int nf_ct_is_untracked(const struct nf_conn *ct)
{
return test_bit(IPS_UNTRACKED_BIT, &ct->status);
}
extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
extern unsigned int nf_conntrack_htable_size;
extern unsigned int nf_conntrack_max;
extern int nf_conntrack_ftp_enable;
extern int nf_conntrack_sip_enable;
extern int nf_conntrack_h323_enable;
extern int nf_conntrack_rtsp_enable;
extern int nf_conntrack_l2tp_enable;
extern int nf_conntrack_ipsec_enable;
extern int nf_conntrack_pptp_enable;
extern int nf_conntrack_portscan_enable;
extern int nf_conntrack_ftp_port;
extern int nf_conntrack_esp_timeout;
# 67 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/net/netfilter/nf_conntrack_helper.h" 1
# 13 "include/net/netfilter/nf_conntrack_helper.h"
# 1 "include/net/netfilter/nf_conntrack_extend.h" 1
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
NF_CT_EXT_NAT,
NF_CT_EXT_ACCT,
NF_CT_EXT_ECACHE,
NF_CT_EXT_ZONE,
NF_CT_EXT_NUM,
};
# 24 "include/net/netfilter/nf_conntrack_extend.h"
struct nf_ct_ext {
struct rcu_head rcu;
u8 offset[NF_CT_EXT_NUM];
u8 len;
char data[0];
};
static inline __attribute__((always_inline)) int __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id)
{
return !!ext->offset[id];
}
static inline __attribute__((always_inline)) int nf_ct_ext_exist(const struct nf_conn *ct, u8 id)
{
return (ct->ext && __nf_ct_ext_exist(ct->ext, id));
}
static inline __attribute__((always_inline)) void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
{
if (!nf_ct_ext_exist(ct, id))
return ((void *)0);
return (void *)ct->ext + ct->ext->offset[id];
}
extern void __nf_ct_ext_destroy(struct nf_conn *ct);
static inline __attribute__((always_inline)) void nf_ct_ext_destroy(struct nf_conn *ct)
{
if (ct->ext)
__nf_ct_ext_destroy(ct);
}
void __nf_ct_ext_free_rcu(struct rcu_head *head);
static inline __attribute__((always_inline)) void nf_ct_ext_free(struct nf_conn *ct)
{
if (ct->ext)
call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu);
}
void *
__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
struct nf_ct_ext_type {
void (*destroy)(struct nf_conn *ct);
void (*move)(void *new, void *old);
enum nf_ct_ext_id id;
unsigned int flags;
u8 len;
u8 align;
u8 alloc_size;
};
int nf_ct_extend_register(struct nf_ct_ext_type *type);
void nf_ct_extend_unregister(struct nf_ct_ext_type *type);
# 14 "include/net/netfilter/nf_conntrack_helper.h" 2
struct module;
struct nf_conntrack_helper {
struct hlist_node hnode;
const char *name;
struct module *me;
const struct nf_conntrack_expect_policy *expect_policy;
struct nf_conntrack_tuple tuple;
int (*help)(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info conntrackinfo);
void (*destroy)(struct nf_conn *ct);
int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
unsigned int expect_class_max;
};
extern struct nf_conntrack_helper *
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
extern struct nf_conntrack_helper *
nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
extern void nf_ct_helper_destroy(struct nf_conn *ct);
static inline __attribute__((always_inline)) struct nf_conn_help *nfct_help(const struct nf_conn *ct)
{
return ((struct nf_conn_help *)__nf_ct_ext_find((ct), (NF_CT_EXT_HELPER)));
}
extern int nf_conntrack_helper_init(void);
extern void nf_conntrack_helper_fini(void);
# 68 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/cmdparse.h" 1
# 19 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/cmdparse.h"
typedef struct {
char *name;
int (*func)(int argc,char *argv[],void *p);
int flags;
int argcmin;
char *argc_errmsg;
} cmds_t;
extern int cmd_reg_add(char *cmd_name, cmds_t *cmds_p);
extern int cmd_register(cmds_t *cmds_p);
extern int cmd_unregister(char *name);
extern int subcmd(const cmds_t tab[], int argc, char *argv[], void *p);
# 70 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 1
# 33 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h"
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/../common/xpondrv.h" 1
# 46 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/../common/xpondrv.h"
int xpon_prepare_params(char * cmdline, size_t * argc, char ** argv);
void schedule_fe_reset(void);
int xpon_check_emergence_state(void);
# 57 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/../common/xpondrv.h"
int atoi_temp(char *s);
# 34 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_types.h" 1
# 35 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_util.h" 1
# 35 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_util.h"
void eponDbgPrint(__u32 debugLevel, char *fmt,...);
int strToMacNum(__u8 *macstr, __u8 *macnum);
__u16 get16(__u8 *cp);
__u32 get32 (__u8 *cp);
__u8 *put32( __u8 *cp, __u32 x);
__u8 *put16( __u8 *cp, __u16 x);
# 36 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mpcp.h" 1
# 87 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mpcp.h"
typedef struct eponMpcpDiscFsm_s{
__u8 mpcpDiscState;
__u8 mpcpDiscMacr_RgstrAck;
__u8 mpcpReqRetryFlag;
int begin;
__u8 *dataRx;
__u8 *dataTx;
__u32 grantEndTime;
int insideDiscoveryWindow;
__u32 localTime;
__u16 opcodeRx;
__u16 opcodeTx;
__u16 pendingGrants;
int registered;
__u16 syncTime;
int timestampDrift;
__u16 rgstAgainTimeout;
}eponMpcpDiscFsm_t, *eponMpcpDiscFsm_p;
typedef struct eponMpcp_s{
eponMpcpDiscFsm_t eponDiscFsm;
}eponMpcp_t, *eponMpcp_p;
int eponMpcpDscvFsmWaitHandler(__u8 llidIndex);
void eponTimeDrftIntHandler(__u32 data);
void eponMpcpTmOutIntHandler(__u32 data);
void eponTimeDrftIntHandler(__u32 data);
void eponMpcpDiscvGateIntHandler(unsigned long data);
int eponMpcpLocalDergstr(__u8 llidIndex);
int eponMpcpSetDiscvRgstAck(__u8 llidIndex , __u8 rgstAckFlag);
int eponMpcpRgstReqIntHandler(void);
int eponMpcpRgstAckIntHandler(void);
int eponMpcpRgstIntHandler(__u8 llidIndex);
int eponMpcpGntOvrRunIntHandler(void);
int eponMpcpRptOvrIntvalIntHandler(void);
int max_dscv_gate_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
int max_dscv_gate_write_proc(struct file *file, const char *buffer,
unsigned long count, void *data);
# 37 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_ioctl.h" 1
# 35 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_ioctl.h"
# 1 "include/linux/version.h" 1
# 36 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_ioctl.h" 2
# 84 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_ioctl.h"
typedef struct eponTxCnt_s{
__u32 txFrameCnt;
__u32 txFrameLen;
__u32 txDropCnt;
__u32 txBroadcastCnt;
__u32 txMulticastCnt;
__u32 txLess64Cnt;
__u32 txMore1518Cnt;
__u32 txEq64Cnt;
__u32 txFrom65To127Cnt;
__u32 txFrom128To255Cnt;
__u32 txFrom256To511Cnt;
__u32 txFrom512To1023Cnt;
__u32 txFrom1024To1518Cnt;
}eponTxCnt_t, *eponTxCnt_p;
typedef struct eponRxCnt_s{
__u32 rxFrameCnt;
__u32 rxFrameLen;
__u32 rxDropCnt;
__u32 rxBroadcastCnt;
__u32 rxMulticastCnt;
__u32 rxCrcCnt;
__u32 rxFragFameCnt;
__u32 rxJabberFameCnt;
__u32 rxLess64Cnt;
__u32 rxMore1518Cnt;
__u32 rxEq64Cnt;
__u32 rxFrom65To127Cnt;
__u32 rxFrom128To255Cnt;
__u32 rxFrom256To511Cnt;
__u32 rxFrom512To1023Cnt;
__u32 rxFrom1024To1518Cnt;
}eponRxCnt_t, *eponRxCnt_p;
typedef struct {
__u8 llidIndex;
__u8 param0;
__u16 param1;
__u32 param2;
__u8 info[128];
} eponMacIoctl_t ;
long eponMacIoctl(struct file *file, unsigned int cmd,
unsigned long arg);
# 38 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_timer.h" 1
# 33 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_timer.h"
typedef void (*eponTimerCallback)(unsigned long);
int eponMacAddTimer(struct timer_list *eponTimer, __u32 interval, eponTimerCallback callback, unsigned long param);
int eponMacDelTimer(struct timer_list *timer);
# 39 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_reg.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h" 1
# 1 "include/linux/mtd/rt_flash.h" 1
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/tc3162.h" 1
# 94 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/tc3162.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/./tc3182_int_source.h" 1
# 20 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/./tc3182_int_source.h"
enum
interrupt_source
{
DUMMY_INT,
UART_INT,
PTM_B0_INT,
SI_SWINT1_INT0,
SI_SWINT1_INT1,
TIMER0_INT,
TIMER1_INT,
TIMER2_INT,
SI_SWINT_INT0,
SI_SWINT_INT1,
TIMER5_INT,
GPIO_INT,
RESERVED1,
SI_PC1_INT,
SI_PC_INT,
APB_DMA0_INT,
MAC1_INT,
HSUART_INT,
IRQ_RT3XXX_USB,
DYINGGASP_INT,
DMT_INT,
USB20_INT,
MAC_INT,
SAR_INT,
USB11_INT,
PCIE_A_INT,
PCIE_SERR_INT,
PTM_B1_INT,
XSLV1_INT,
USB_INT,
SI_TIMER1_INT,
SI_TIMER_INT,
SWR_INT,
BUS_TOUT_INT,
RESERVE_A_INT,
RESERVE_B_INT,
RESERVE_C_INT,
AUTO_MANUAL_INT
};
# 95 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/tc3162.h" 2
typedef signed long int int32;
typedef signed long int sint31;
typedef unsigned long int uint32;
typedef signed short sint15;
typedef signed short int int16;
typedef unsigned short uint16;
typedef signed char sint7;
typedef unsigned char uint8;
# 167 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/tc3162.h"
static inline __attribute__((always_inline)) uint32 regRead32(uint32 reg)
{
return *(volatile unsigned long int *)(reg);
}
static inline __attribute__((always_inline)) void regWrite32(uint32 reg, uint32 vlaue)
{
*(volatile unsigned long int *)(reg) = vlaue;
}
static inline __attribute__((always_inline)) unsigned long int regReadPhy32(uint32 reg)
{
uint32 tmp;
tmp = *(volatile unsigned long int *)(reg);
tmp = *(volatile unsigned long int *)(reg);
return tmp;
}
# 798 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/tc3162.h"
enum
interrupt_priority
{
IPL0, IPL1, IPL2, IPL3, IPL4,
IPL5, IPL6, IPL7, IPL8, IPL9,
IPL10, IPL11, IPL12, IPL13, IPL14,
IPL15, IPL16, IPL17, IPL18, IPL19,
IPL20, IPL21, IPL22, IPL23, IPL24,
IPL25, IPL26, IPL27, IPL28, IPL29,
IPL30, IPL31
};
# 5 "include/linux/mtd/rt_flash.h" 2
extern unsigned char ReadSPIByte(unsigned long index);
extern unsigned long ReadSPIDWord(unsigned long index);
# 44 "include/linux/mtd/rt_flash.h"
extern unsigned long flash_base;
extern unsigned int (*ranand_read_byte)(unsigned long long);
extern unsigned int (*ranand_read_dword)(unsigned long long);
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h" 1
# 9 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h"
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_const.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_const.h" 1
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_const.h" 2
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_const.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_const.h" 1
# 22 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_const.h"
typedef enum {
XPON_DISABLE = 0,
XPON_ENABLE,
XPON_POWER_DOWN,
XPON_OTHER
} XPON_Mode_t;
typedef enum {
GPON_STATE_O1 = 1,
GPON_STATE_O2,
GPON_STATE_O3,
GPON_STATE_O4,
GPON_STATE_O5,
GPON_STATE_O6,
GPON_STATE_O7
} ENUM_GponState_t;
# 5 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_const.h" 2
# 20 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_const.h"
typedef enum {
GPON_EQD_OFFSET_FLAG_ADD = 0,
GPON_EQD_OFFSET_FLAG_SUBTRACT,
} GPON_EQD_OFFSET_FLAG_t ;
typedef enum {
GPON_INT_PLOAMD_RECV = 0,
GPON_INT_PLOAMU_SEND,
GPON_INT_SN_REQ_RECV,
GPON_INT_SN_ONU_SEND_O3,
GPON_INT_RANGING_REQ_RECV,
GPON_INT_ONU_SEND_O4,
GPON_INT_SN_REQ_CRS,
GPON_INT_LOS_GEN_DEL,
GPON_INT_AES_KEY_SWITCH_DONE,
GPON_INT_TOD_UPDATE_DONE,
GPON_INT_TOD_1PPS,
GPON_INT_DYING_GASP_SEND,
GPON_INT_RX_ERR = 16,
GPON_INT_FIFO_ERR,
GPON_INT_BST_SGL_DIFF,
GPON_INT_TX_LATE_START,
GPON_INT_RX_EOF_ERR,
GPON_INT_RX_GEM_INTLV_ERR,
GPON_INT_BFIFO_FULL,
GPON_INT_SFIFO_FULL,
GPON_INT_O5_EQD_ADJ_DONE,
GPON_INT_OLT_DS_FEC_CHG,
GPON_INT_ONU_US_FEC_CHG,
GPON_INT_POP_UP_RECV_IN_O6,
GPON_INT_FWI,
GPON_INT_LWI,
GPON_INT_BWM_STOP_TIME_ERR,
} GPON_INTERRUPT_MASK_T ;
typedef enum {
MSG_ERR = 0x0001,
MSG_WARN = 0x0002,
MSG_INT = 0x0004,
MSG_ACT = 0x0008,
MSG_OAM = 0x0010,
MSG_OMCI = 0x0020,
MSG_TRACE = 0x0040,
MSG_CONTENT = 0x0080,
MSG_DBG = 0x0100,
MSG_EQD = 0x0200,
MSG_XMCS = 0x0400
} xPON_DebugMsg_t ;
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_const.h" 2
# 10 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h" 2
# 45 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h"
static uint __inline__ __attribute__((always_inline)) GetReg(uint reg)
{
return ioread32((void *)reg) ;
}
static void __inline__ __attribute__((always_inline)) SetReg(uint reg, uint value)
{
iowrite32(value, (void *)reg) ;
}
# 69 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h"
static uint __inline__ __attribute__((always_inline)) GetRegMask(uint reg, uint mask, uint shift)
{
return (((ioread32((void *)reg))>>shift) & mask) ;
}
# 82 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h"
static void __inline__ __attribute__((always_inline)) SetRegMask(uint reg, uint mask, uint shift, uint value)
{
uint data = ioread32((void *)reg) ;
data = (data & ~(mask<<shift)) | ((value&mask)<<shift) ;
iowrite32(data, (void *)reg) ;
}
static void __inline__ __attribute__((always_inline)) SetBits(uint reg, uint bits)
{
uint data = ioread32((void *)reg) ;
data |= bits ;
iowrite32(data, (void *)reg) ;
}
static void __inline__ __attribute__((always_inline)) ClearBits(uint reg, uint bits)
{
uint data =ioread32((void *)reg) ;
data &= ~bits ;
iowrite32(data, (void *)reg) ;
}
# 130 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_types.h"
typedef unsigned int UINT32 ;
typedef unsigned short UINT16 ;
typedef unsigned char UINT8 ;
typedef unsigned int FIELD;
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h" 2
# 25 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
typedef union
{
struct
{
FIELD rsv_27 : 5;
FIELD sniffer_mode : 1;
FIELD txoam_favor : 1;
FIELD burst_en_dly : 1;
FIELD discv_burst_en : 1;
FIELD mpcp_fwd : 1;
FIELD bcst_llid_m1_drop : 1;
FIELD bcst_llid_m0_drop : 1;
FIELD mcst_llid_drop : 1;
FIELD all_unicast_llid_pkt_fwd : 1;
FIELD fcs_err_fwd : 1;
FIELD llid_crc8_err_fwd : 1;
FIELD rsv_14 : 2;
FIELD rxmpi_stop : 1;
FIELD txmpi_stop : 1;
FIELD phy_pwr_down : 1;
FIELD rx_nml_gate_fwd : 1;
FIELD rxmbi_stop : 1;
FIELD txmbi_stop : 1;
FIELD chk_all_gnt_mode : 1;
FIELD loc_cnt_sync_method : 1;
FIELD tx_default_rpt : 1;
FIELD epon_mac_sw_rst : 1;
FIELD epon_oam_cal_in_eth : 1;
FIELD epon_mac_lpbk_en : 1;
FIELD rpt_txpri_ctrl : 1;
FIELD mode_sel : 1;
# 86 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_glb_cfg, *PREG_e_glb_cfg;
typedef union
{
struct
{
FIELD rsv_27 : 5;
FIELD sniff_fifo_ovrun_int : 1;
FIELD reg_ack_done_int : 1;
FIELD reg_req_done_int : 1;
FIELD reorder1_gnt_int : 1;
FIELD b2b_gnt_int : 1;
FIELD hidn_gnt_int : 1;
FIELD ps_early_wakeup_int : 1;
FIELD rx_sleep_allow_int : 1;
FIELD ps_wakeup_int : 1;
FIELD ps_sleep_int : 1;
FIELD txfifo_udrun_int : 1;
FIELD rpt_overintvl_int : 1;
FIELD mpcp_timeout_int : 1;
FIELD timedrft_int : 1;
FIELD tod_1pps_int : 1;
FIELD tod_updt_int : 1;
FIELD ptp_msg_tx_int : 1;
FIELD gnt_buf_ovrrun_int : 1;
FIELD llid7_rcv_rgst_int : 1;
FIELD llid6_rcv_rgst_int : 1;
FIELD llid5_rcv_rgst_int : 1;
FIELD llid4_rcv_rgst_int : 1;
FIELD llid3_rcv_rgst_int : 1;
FIELD llid2_rcv_rgst_int : 1;
FIELD llid1_rcv_rgst_int : 1;
FIELD llid0_rcv_rgst_int : 1;
FIELD rcv_dscvry_gate_int : 1;
# 153 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_int_status, *PREG_e_int_status;
typedef union
{
struct
{
FIELD rsv_27 : 5;
FIELD sniff_fifo_ovrun_en : 1;
FIELD reg_ack_done_en : 1;
FIELD reg_req_done_en : 1;
FIELD reorder1_gnt_en : 1;
FIELD b2b_gnt_en : 1;
FIELD hidn_gnt_en : 1;
FIELD ps_early_wakeup_en : 1;
FIELD rx_sleep_allow_en : 1;
FIELD ps_wakeup_en : 1;
FIELD ps_sleep_en : 1;
FIELD txfifo_udrun_en : 1;
FIELD rpt_overintvl_en : 1;
FIELD mpcp_timeout_en : 1;
FIELD timedrft_en : 1;
FIELD tod_1pps_en : 1;
FIELD tod_updt_en : 1;
FIELD ptp_msg_tx_en : 1;
FIELD gnt_buf_ovrrun_en : 1;
FIELD llid7_rcv_rgst_en : 1;
FIELD llid6_rcv_rgst_en : 1;
FIELD llid5_rcv_rgst_en : 1;
FIELD llid4_rcv_rgst_en : 1;
FIELD llid3_rcv_rgst_en : 1;
FIELD llid2_rcv_rgst_en : 1;
FIELD llid1_rcv_rgst_en : 1;
FIELD llid0_rcv_rgst_en : 1;
FIELD rcv_dscvry_gate_en : 1;
# 220 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_int_en, *PREG_e_int_en;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD llid7_mpcp_tmo : 1;
FIELD llid6_mpcp_tmo : 1;
FIELD llid5_mpcp_tmo : 1;
FIELD llid4_mpcp_tmo : 1;
FIELD llid3_mpcp_tmo : 1;
FIELD llid2_mpcp_tmo : 1;
FIELD llid1_mpcp_tmo : 1;
FIELD llid0_mpcp_tmo : 1;
FIELD llid7_rpt_tmo : 1;
FIELD llid6_rpt_tmo : 1;
FIELD llid5_rpt_tmo : 1;
FIELD llid4_rpt_tmo : 1;
FIELD llid3_rpt_tmo : 1;
FIELD llid2_rpt_tmo : 1;
FIELD llid1_rpt_tmo : 1;
FIELD llid0_rpt_tmo : 1;
# 265 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_rpt_mpcp_timeout_llid_idx, *PREG_e_rpt_mpcp_timeout_llid_idx;
typedef union
{
struct
{
FIELD hw_dying_gasp_en : 1;
FIELD rsv_28 : 3;
FIELD dying_gasp_code : 8;
FIELD rsv_17 : 3;
FIELD sw_init_dying_gasp : 1;
FIELD rsv_10 : 6;
FIELD dygsp_num_of_times : 2;
FIELD rsv_7 : 1;
FIELD dying_gasp_flags : 7;
# 294 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_cfg, *PREG_e_dyinggsp_cfg;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD echoed_pending_gnt : 8;
FIELD rsv_7 : 1;
FIELD pending_gnt_num : 7;
} Bits;
UINT32 Raw;
} REG_e_pending_gnt_num, *PREG_e_pending_gnt_num;
typedef union
{
struct
{
FIELD rsv_28 : 4;
FIELD llid3_txfec_en : 1;
FIELD llid3_dcrypt_en : 1;
FIELD llid3_dcrypt_mode : 1;
FIELD llid3_oam_lpbk_en : 1;
FIELD rsv_20 : 4;
FIELD llid2_txfec_en : 1;
FIELD llid2_dcrypt_en : 1;
FIELD llid2_dcrypt_mode : 1;
FIELD llid2_oam_lpbk_en : 1;
FIELD rsv_12 : 4;
FIELD llid1_txfec_en : 1;
FIELD llid1_dcrypt_en : 1;
FIELD llid1_dcrypt_mode : 1;
FIELD llid1_oam_lpbk_en : 1;
FIELD rsv_4 : 4;
FIELD llid0_txfec_en : 1;
FIELD llid0_dcrypt_en : 1;
FIELD llid0_dcrypt_mode : 1;
FIELD llid0_oam_lpbk_en : 1;
# 364 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid0_3_cfg, *PREG_e_llid0_3_cfg;
typedef union
{
struct
{
FIELD rsv_28 : 4;
FIELD llid7_txfec_en : 1;
FIELD llid7_dcrypt_en : 1;
FIELD llid7_dcrypt_mode : 1;
FIELD llid7_oam_lpbk_en : 1;
FIELD rsv_20 : 4;
FIELD llid6_txfec_en : 1;
FIELD llid6_dcrypt_en : 1;
FIELD llid6_dcrypt_mode : 1;
FIELD llid6_oam_lpbk_en : 1;
FIELD rsv_12 : 4;
FIELD llid5_txfec_en : 1;
FIELD llid5_dcrypt_en : 1;
FIELD llid5_dcrypt_mode : 1;
FIELD llid5_oam_lpbk_en : 1;
FIELD rsv_4 : 4;
FIELD llid4_txfec_en : 1;
FIELD llid4_dcrypt_en : 1;
FIELD llid4_dcrypt_mode : 1;
FIELD llid4_oam_lpbk_en : 1;
# 415 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid4_7_cfg, *PREG_e_llid4_7_cfg;
typedef union
{
struct
{
FIELD mpcp_cmd : 2;
FIELD rsv_17 : 13;
FIELD mpcp_cmd_done : 1;
FIELD rsv_13 : 3;
FIELD rgstr_ack_flg : 1;
FIELD rsv_9 : 3;
FIELD rgstr_req_flg : 1;
FIELD rsv_3 : 5;
FIELD tx_mpcp_llid_idx : 3;
# 444 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid_dscvry_ctrl, *PREG_e_llid_dscvry_ctrl;
typedef union
{
struct
{
FIELD llid0_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid0_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid0_valid : 1;
FIELD llid0_value : 16;
# 467 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid0_dscvry_sts, *PREG_e_llid0_dscvry_sts;
typedef union
{
struct
{
FIELD llid1_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid1_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid1_valid : 1;
FIELD llid1_value : 16;
# 490 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid1_dscvry_sts, *PREG_e_llid1_dscvry_sts;
typedef union
{
struct
{
FIELD llid2_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid2_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid2_valid : 1;
FIELD llid2_value : 16;
# 513 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid2_dscvry_sts, *PREG_e_llid2_dscvry_sts;
typedef union
{
struct
{
FIELD llid3_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid3_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid3_valid : 1;
FIELD llid3_value : 16;
# 536 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid3_dscvry_sts, *PREG_e_llid3_dscvry_sts;
typedef union
{
struct
{
FIELD llid4_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid4_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid4_valid : 1;
FIELD llid4_value : 16;
# 559 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid4_dscvry_sts, *PREG_e_llid4_dscvry_sts;
typedef union
{
struct
{
FIELD llid5_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid5_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid5_valid : 1;
FIELD llid5_value : 16;
# 582 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid5_dscvry_sts, *PREG_e_llid5_dscvry_sts;
typedef union
{
struct
{
FIELD llid6_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid6_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid6_valid : 1;
FIELD llid6_value : 16;
# 605 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid6_dscvry_sts, *PREG_e_llid6_dscvry_sts;
typedef union
{
struct
{
FIELD llid7_dscvry_sts : 2;
FIELD rsv_26 : 4;
FIELD llid7_rgstr_flg_sts : 2;
FIELD rsv_17 : 7;
FIELD llid7_valid : 1;
FIELD llid7_value : 16;
# 628 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_llid7_dscvry_sts, *PREG_e_llid7_dscvry_sts;
typedef union
{
struct
{
FIELD mac_addr_rwcmd : 1;
FIELD rsv_17 : 14;
FIELD mac_addr_rwcmd_done : 1;
FIELD rsv_4 : 12;
FIELD mac_addr_llid_indx : 3;
FIELD mac_addr_dw_idx : 1;
# 651 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_mac_addr_cfg, *PREG_e_mac_addr_cfg;
typedef union
{
struct
{
FIELD mac_addr_value : 32;
} Bits;
UINT32 Raw;
} REG_e_mac_addr_value, *PREG_e_mac_addr_value;
typedef union
{
struct
{
FIELD key_rwcmd : 1;
FIELD rsv_17 : 14;
FIELD key_rwcmd_done : 1;
FIELD rsv_6 : 10;
FIELD key_llid_index : 3;
FIELD key_idx : 1;
FIELD key_dw_indx : 2;
# 685 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_security_key_cfg, *PREG_e_security_key_cfg;
typedef union
{
struct
{
FIELD rsv_24 : 8;
FIELD key_value : 24;
} Bits;
UINT32 Raw;
} REG_e_key_value, *PREG_e_key_value;
typedef union
{
struct
{
FIELD df_rpt_data : 32;
} Bits;
UINT32 Raw;
} REG_e_rpt_data, *PREG_e_rpt_data;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD df_rpt_fifo_clr : 1;
FIELD rsv_6 : 2;
FIELD df_rpt_dt_len : 6;
} Bits;
UINT32 Raw;
} REG_e_rpt_len, *PREG_e_rpt_len;
typedef union
{
struct
{
FIELD qsize_fec_adj : 16;
FIELD llid7_rpt_cfg : 2;
FIELD llid6_rpt_cfg : 2;
FIELD llid5_rpt_cfg : 2;
FIELD llid4_rpt_cfg : 2;
FIELD llid3_rpt_cfg : 2;
FIELD llid2_rpt_cfg : 2;
FIELD llid1_rpt_cfg : 2;
FIELD llid0_rpt_cfg : 2;
# 757 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_rpt_cfg, *PREG_e_rpt_cfg;
typedef union
{
struct
{
FIELD qthld_rwcmd : 1;
FIELD qthld_rwcmd_done : 1;
FIELD rsv_24 : 6;
FIELD qthld_value : 16;
FIELD qthld_idx : 2;
FIELD rpt_llid_idx : 3;
FIELD queue_idx : 3;
# 782 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_rpt_qthld_cfg, *PREG_e_rpt_qthld_cfg;
typedef union
{
struct
{
FIELD local_time : 32;
} Bits;
UINT32 Raw;
} REG_e_local_time, *PREG_e_local_time;
typedef union
{
struct
{
FIELD tod_sync_x : 32;
} Bits;
UINT32 Raw;
} REG_e_tod_sync_x, *PREG_e_tod_sync_x;
typedef union
{
struct
{
FIELD rsv_19 : 13;
FIELD ingrs_latency : 11;
FIELD egrs_latency : 8;
} Bits;
UINT32 Raw;
} REG_e_tod_ltncy, *PREG_e_tod_ltncy;
typedef union
{
struct
{
FIELD p2p_tx_tagging_done : 1;
FIELD rsv_12 : 19;
FIELD p2p_tx_pkt_info : 4;
FIELD p2p_tx_tag_sec_l8 : 8;
} Bits;
UINT32 Raw;
} REG_p2p_tx_tag1, *PREG_p2p_tx_tag1;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD p2p_tx_tag_nsec_29_5 : 25;
} Bits;
UINT32 Raw;
} REG_p2p_tx_tag2, *PREG_p2p_tx_tag2;
typedef union
{
struct
{
FIELD new_tod_p2p_offset_sec_l32 : 32;
} Bits;
UINT32 Raw;
} REG_e_new_tod_p2p_offset_sec_l32, *PREG_e_new_tod_p2p_offset_sec_l32;
typedef union
{
struct
{
FIELD new_tod_nsec : 32;
} Bits;
UINT32 Raw;
} REG_e_new_tod_p2p_tod_offset_nsec, *PREG_e_new_tod_p2p_tod_offset_nsec;
typedef union
{
struct
{
FIELD tod_p2p_sec_l32 : 32;
} Bits;
UINT32 Raw;
} REG_e_tod_p2p_tod_sec_l32, *PREG_e_tod_p2p_tod_sec_l32;
typedef union
{
struct
{
FIELD tod_p2p_nsec : 32;
} Bits;
UINT32 Raw;
} REG_e_tod_p2p_tod_nsec, *PREG_e_tod_p2p_tod_nsec;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD tod_period : 8;
} Bits;
UINT32 Raw;
} REG_e_tod_period, *PREG_e_tod_period;
typedef union
{
struct
{
FIELD rsv_30 : 2;
FIELD pwd_mode : 2;
FIELD rsv_27 : 1;
FIELD timedrift_ignore : 1;
FIELD onu_wakeup : 1;
FIELD pwr_sv_start : 1;
FIELD rsv_23 : 1;
FIELD pwd_mode_i : 3;
FIELD rsv_16 : 4;
FIELD slp_duration_max_h : 16;
# 933 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_e_pwr_sv_cfg, *PREG_e_pwr_sv_cfg;
typedef union
{
struct
{
FIELD slp_duration_max_l : 32;
} Bits;
UINT32 Raw;
} REG_e_slp_durt_max, *PREG_e_slp_durt_max;
typedef union
{
struct
{
FIELD slp_duration : 32;
} Bits;
UINT32 Raw;
} REG_e_slp_duration, *PREG_e_slp_duration;
typedef union
{
struct
{
FIELD act_duration : 32;
} Bits;
UINT32 Raw;
} REG_e_act_duration, *PREG_e_act_duration;
typedef union
{
struct
{
FIELD pwron_dly : 32;
} Bits;
UINT32 Raw;
} REG_e_pwron_dly, *PREG_e_pwron_dly;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD slp_duration_i : 16;
} Bits;
UINT32 Raw;
} REG_e_slp_duration_i, *PREG_e_slp_duration_i;
typedef union
{
struct
{
FIELD tx_fetch_overhead : 8;
FIELD tx_fetch_leadtime : 8;
FIELD rsv_12 : 4;
FIELD tx_dma_leadtime : 12;
} Bits;
UINT32 Raw;
} REG_e_txfetch_cfg, *PREG_e_txfetch_cfg;
typedef union
{
struct
{
FIELD rsv_17 : 15;
FIELD sync_time_updte : 1;
FIELD sync_time : 16;
} Bits;
UINT32 Raw;
} REG_e_sync_time, *PREG_e_sync_time;
typedef union
{
struct
{
FIELD dscvr_gnt_len : 8;
FIELD fec_tail_grd : 8;
FIELD tail_grd : 8;
FIELD rsv_6 : 2;
FIELD default_ovrhd : 6;
} Bits;
UINT32 Raw;
} REG_e_tx_cal_cnst, *PREG_e_tx_cal_cnst;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD laser_off_time : 8;
FIELD laser_on_time : 8;
} Bits;
UINT32 Raw;
} REG_e_laser_onoff_time, *PREG_e_laser_onoff_time;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD guard_thrshld : 8;
} Bits;
UINT32 Raw;
} REG_e_grd_thrshld, *PREG_e_grd_thrshld;
typedef union
{
struct
{
FIELD mpcp_timeout_intvl : 32;
} Bits;
UINT32 Raw;
} REG_e_mpcp_timeout_intvl, *PREG_e_mpcp_timeout_intvl;
typedef union
{
struct
{
FIELD rsv_24 : 8;
FIELD rpt_timeout_intvl : 24;
} Bits;
UINT32 Raw;
} REG_e_rpt_timeout_intvl, *PREG_e_rpt_timeout_intvl;
typedef union
{
struct
{
FIELD max_future_gnt_time : 32;
} Bits;
UINT32 Raw;
} REG_e_max_future_gnt_time, *PREG_e_max_future_gnt_time;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD min_proc_time : 16;
} Bits;
UINT32 Raw;
} REG_e_min_proc_time, *PREG_e_min_proc_time;
typedef union
{
struct
{
FIELD tx_tmstp_adj : 16;
FIELD tx_stm_adj : 16;
} Bits;
UINT32 Raw;
} REG_e_trx_adjust_time1, *PREG_e_trx_adjust_time1;
typedef union
{
struct
{
FIELD tx_len_adj : 16;
FIELD rx_tmstp_adj : 16;
} Bits;
UINT32 Raw;
} REG_e_trx_adjust_time2, *PREG_e_trx_adjust_time2;
typedef union
{
struct
{
FIELD rsv_12 : 20;
FIELD probe_bit0_sel : 4;
FIELD rsv_5 : 3;
FIELD probe_sel : 5;
} Bits;
UINT32 Raw;
} REG_e_dbg_prb_sel, *PREG_e_dbg_prb_sel;
typedef union
{
struct
{
FIELD probe_h32 : 32;
} Bits;
UINT32 Raw;
} REG_e_dbg_prb_h32, *PREG_e_dbg_prb_h32;
typedef union
{
struct
{
FIELD probe_l32 : 32;
} Bits;
UINT32 Raw;
} REG_e_dbg_prb_l32, *PREG_e_dbg_prb_l32;
typedef union
{
struct
{
FIELD rxmbi_eth_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_rxmbi_eth_cnt, *PREG_e_rxmbi_eth_cnt;
typedef union
{
struct
{
FIELD rxmpi_eth_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_rxmpi_eth_cnt, *PREG_e_rxmpi_eth_cnt;
typedef union
{
struct
{
FIELD txmbi_eth_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_txmbi_eth_cnt, *PREG_e_txmbi_eth_cnt;
typedef union
{
struct
{
FIELD txmpi_eth_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_txmpi_eth_cnt, *PREG_e_txmpi_eth_cnt;
typedef union
{
struct
{
FIELD rx_oam_cnt : 16;
FIELD tx_oam_cnt : 16;
} Bits;
UINT32 Raw;
} REG_e_oam_stat, *PREG_e_oam_stat;
typedef union
{
struct
{
FIELD mpcp_err_cnt : 8;
FIELD mpcp_rgst_cnt : 8;
FIELD mpcp_dscv_gate_cnt : 16;
} Bits;
UINT32 Raw;
} REG_e_mpcp_stat, *PREG_e_mpcp_stat;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD mpcp_rgst_req_cnt : 8;
FIELD mpcp_rgst_ack_cnt : 8;
} Bits;
UINT32 Raw;
} REG_e_mpcp_rgst_stat, *PREG_e_mpcp_rgst_stat;
typedef union
{
struct
{
FIELD rsv_23 : 9;
FIELD max_gnt_pending_cnt : 7;
FIELD rsv_7 : 9;
FIELD cur_gnt_pending_cnt : 7;
} Bits;
UINT32 Raw;
} REG_e_gnt_pending_stat, *PREG_e_gnt_pending_stat;
typedef union
{
struct
{
FIELD max_gnt_length : 16;
FIELD min_gnt_length : 16;
} Bits;
UINT32 Raw;
} REG_e_gnt_length_stat, *PREG_e_gnt_length_stat;
typedef union
{
struct
{
FIELD b2b_gnt_cnt : 16;
FIELD hdn_gnt_cnt : 16;
} Bits;
UINT32 Raw;
} REG_e_gnt_type_stat, *PREG_e_gnt_type_stat;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD cur_time_drift : 8;
FIELD max_time_drift : 8;
} Bits;
UINT32 Raw;
} REG_e_time_drft_stat, *PREG_e_time_drft_stat;
typedef union
{
struct
{
FIELD llid0_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid0_gnt_stat, *PREG_e_llid0_gnt_stat;
typedef union
{
struct
{
FIELD llid1_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid1_gnt_stat, *PREG_e_llid1_gnt_stat;
typedef union
{
struct
{
FIELD llid2_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid2_gnt_stat, *PREG_e_llid2_gnt_stat;
typedef union
{
struct
{
FIELD llid3_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid3_gnt_stat, *PREG_e_llid3_gnt_stat;
typedef union
{
struct
{
FIELD llid4_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid4_gnt_stat, *PREG_e_llid4_gnt_stat;
typedef union
{
struct
{
FIELD llid5_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid5_gnt_stat, *PREG_e_llid5_gnt_stat;
typedef union
{
struct
{
FIELD llid6_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid6_gnt_stat, *PREG_e_llid6_gnt_stat;
typedef union
{
struct
{
FIELD llid7_gnt_cnt : 32;
} Bits;
UINT32 Raw;
} REG_e_llid7_gnt_stat, *PREG_e_llid7_gnt_stat;
typedef union
{
struct
{
FIELD snf_mpcp_cap_en : 1;
FIELD snf_oam_cap_en : 1;
FIELD snf_mpcp_oam_cnt_set : 30;
} Bits;
UINT32 Raw;
} REG_e_snf_mpcp_oam_ctl, *PREG_e_snf_mpcp_oam_ctl;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD rpt_bitmap_ctrl : 1;
FIELD rpt_bitmap_set : 8;
FIELD rpt_qsize_fecoff_adj : 16;
} Bits;
UINT32 Raw;
} REG_e_rpt_adj, *PREG_e_rpt_adj;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD rpt_cnt : 16;
} Bits;
UINT32 Raw;
} REG_e_rpt_cnt, *PREG_e_rpt_cnt;
typedef union
{
struct
{
FIELD dyinggsp_w1_len_type : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w1, *PREG_e_dyinggsp_w1;
typedef union
{
struct
{
FIELD dyinggsp_w2 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w2, *PREG_e_dyinggsp_w2;
typedef union
{
struct
{
FIELD dyinggsp_w3 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w3, *PREG_e_dyinggsp_w3;
typedef union
{
struct
{
FIELD dyinggsp_w4 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w4, *PREG_e_dyinggsp_w4;
typedef union
{
struct
{
FIELD dyinggsp_w5 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w5, *PREG_e_dyinggsp_w5;
typedef union
{
struct
{
FIELD dyinggsp_w6 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w6, *PREG_e_dyinggsp_w6;
typedef union
{
struct
{
FIELD dyinggsp_w7 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w7, *PREG_e_dyinggsp_w7;
typedef union
{
struct
{
FIELD dyinggsp_w8 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w8, *PREG_e_dyinggsp_w8;
typedef union
{
struct
{
FIELD dyinggsp_w9 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w9, *PREG_e_dyinggsp_w9;
typedef union
{
struct
{
FIELD dyinggsp_w10 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w10, *PREG_e_dyinggsp_w10;
typedef union
{
struct
{
FIELD dyinggsp_w11 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w11, *PREG_e_dyinggsp_w11;
typedef union
{
struct
{
FIELD dyinggsp_w12 : 32;
} Bits;
UINT32 Raw;
} REG_e_dyinggsp_w12, *PREG_e_dyinggsp_w12;
typedef union
{
struct
{
FIELD oam_kpalv_w1 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w1, *PREG_e_oam_kpalv_w1;
typedef union
{
struct
{
FIELD oam_kpalv_w2 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w2, *PREG_e_oam_kpalv_w2;
typedef union
{
struct
{
FIELD oam_kpalv_w3 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w3, *PREG_e_oam_kpalv_w3;
typedef union
{
struct
{
FIELD oam_kpalv_w4 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w4, *PREG_e_oam_kpalv_w4;
typedef union
{
struct
{
FIELD oam_kpalv_w5 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w5, *PREG_e_oam_kpalv_w5;
typedef union
{
struct
{
FIELD oam_kpalv_w6 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w6, *PREG_e_oam_kpalv_w6;
typedef union
{
struct
{
FIELD oam_kpalv_w7 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w7, *PREG_e_oam_kpalv_w7;
typedef union
{
struct
{
FIELD oam_kpalv_w8 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w8, *PREG_e_oam_kpalv_w8;
typedef union
{
struct
{
FIELD oam_kpalv_w9 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w9, *PREG_e_oam_kpalv_w9;
typedef union
{
struct
{
FIELD oam_kpalv_w10 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w10, *PREG_e_oam_kpalv_w10;
typedef union
{
struct
{
FIELD oam_kpalv_w11 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w11, *PREG_e_oam_kpalv_w11;
typedef union
{
struct
{
FIELD oam_kpalv_w12 : 32;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_w12, *PREG_e_oam_kpalv_w12;
typedef union
{
struct
{
FIELD oam_kpalv_interval : 24;
FIELD rsv_3 : 5;
FIELD oam_kpalv_sw_trig : 1;
FIELD oam_kpalv_sw_cfg : 1;
FIELD oam_kpalv_en : 1;
} Bits;
UINT32 Raw;
} REG_e_oam_kpalv_ctrl, *PREG_e_oam_kpalv_ctrl;
typedef union
{
struct
{
FIELD tod_1pps_width_ctrl : 32;
} Bits;
UINT32 Raw;
} REG_e_tod_1pps_ctrl, *PREG_e_tod_1pps_ctrl;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD short_pkt_size_ctrl : 8;
} Bits;
UINT32 Raw;
} REG_e_short_pkt_ctrl, *PREG_e_short_pkt_ctrl;
typedef union
{
struct
{
FIELD sniffer_sp_tag : 32;
} Bits;
UINT32 Raw;
} REG_e_sniff_sp_tag, *PREG_e_sniff_sp_tag;
typedef volatile struct
{
UINT32 rsv_0000[6144];
REG_e_glb_cfg e_glb_cfg;
REG_e_int_status e_int_status;
REG_e_int_en e_int_en;
REG_e_rpt_mpcp_timeout_llid_idx e_rpt_mpcp_timeout_llid_idx;
REG_e_dyinggsp_cfg e_dyinggsp_cfg;
REG_e_pending_gnt_num e_pending_gnt_num;
UINT32 rsv_6018[2];
REG_e_llid0_3_cfg e_llid0_3_cfg;
REG_e_llid4_7_cfg e_llid4_7_cfg;
REG_e_llid_dscvry_ctrl e_llid_dscvry_ctrl;
REG_e_llid0_dscvry_sts e_llid0_dscvry_sts;
REG_e_llid1_dscvry_sts e_llid1_dscvry_sts;
REG_e_llid2_dscvry_sts e_llid2_dscvry_sts;
REG_e_llid3_dscvry_sts e_llid3_dscvry_sts;
REG_e_llid4_dscvry_sts e_llid4_dscvry_sts;
REG_e_llid5_dscvry_sts e_llid5_dscvry_sts;
REG_e_llid6_dscvry_sts e_llid6_dscvry_sts;
REG_e_llid7_dscvry_sts e_llid7_dscvry_sts;
UINT32 rsv_604C;
REG_e_mac_addr_cfg e_mac_addr_cfg;
REG_e_mac_addr_value e_mac_addr_value;
REG_e_security_key_cfg e_security_key_cfg;
REG_e_key_value e_key_value;
REG_e_rpt_data e_rpt_data;
REG_e_rpt_len e_rpt_len;
REG_e_rpt_cfg e_rpt_cfg;
REG_e_rpt_qthld_cfg e_rpt_qthld_cfg;
UINT32 rsv_6070[4];
REG_e_local_time e_local_time;
REG_e_tod_sync_x e_tod_sync_x;
REG_e_tod_ltncy e_tod_ltncy;
REG_p2p_tx_tag1 p2p_tx_tag1;
REG_p2p_tx_tag2 p2p_tx_tag2;
REG_e_new_tod_p2p_offset_sec_l32 e_new_tod_p2p_offset_sec_l32;
REG_e_new_tod_p2p_tod_offset_nsec e_new_tod_p2p_tod_offset_nsec;
REG_e_tod_p2p_tod_sec_l32 e_tod_p2p_tod_sec_l32;
REG_e_tod_p2p_tod_nsec e_tod_p2p_tod_nsec;
REG_e_tod_period e_tod_period;
UINT32 rsv_60A8[2];
REG_e_pwr_sv_cfg e_pwr_sv_cfg;
REG_e_slp_durt_max e_slp_durt_max;
REG_e_slp_duration e_slp_duration;
REG_e_act_duration e_act_duration;
REG_e_pwron_dly e_pwron_dly;
REG_e_slp_duration_i e_slp_duration_i;
UINT32 rsv_60C8[2];
REG_e_txfetch_cfg e_txfetch_cfg;
REG_e_sync_time e_sync_time;
REG_e_tx_cal_cnst e_tx_cal_cnst;
REG_e_laser_onoff_time e_laser_onoff_time;
REG_e_grd_thrshld e_grd_thrshld;
REG_e_mpcp_timeout_intvl e_mpcp_timeout_intvl;
REG_e_rpt_timeout_intvl e_rpt_timeout_intvl;
REG_e_max_future_gnt_time e_max_future_gnt_time;
REG_e_min_proc_time e_min_proc_time;
REG_e_trx_adjust_time1 e_trx_adjust_time1;
REG_e_trx_adjust_time2 e_trx_adjust_time2;
UINT32 rsv_60FC;
REG_e_dbg_prb_sel e_dbg_prb_sel;
REG_e_dbg_prb_h32 e_dbg_prb_h32;
REG_e_dbg_prb_l32 e_dbg_prb_l32;
REG_e_rxmbi_eth_cnt e_rxmbi_eth_cnt;
REG_e_rxmpi_eth_cnt e_rxmpi_eth_cnt;
REG_e_txmbi_eth_cnt e_txmbi_eth_cnt;
REG_e_txmpi_eth_cnt e_txmpi_eth_cnt;
REG_e_oam_stat e_oam_stat;
REG_e_mpcp_stat e_mpcp_stat;
REG_e_mpcp_rgst_stat e_mpcp_rgst_stat;
REG_e_gnt_pending_stat e_gnt_pending_stat;
REG_e_gnt_length_stat e_gnt_length_stat;
REG_e_gnt_type_stat e_gnt_type_stat;
REG_e_time_drft_stat e_time_drft_stat;
REG_e_llid0_gnt_stat e_llid0_gnt_stat;
REG_e_llid1_gnt_stat e_llid1_gnt_stat;
REG_e_llid2_gnt_stat e_llid2_gnt_stat;
REG_e_llid3_gnt_stat e_llid3_gnt_stat;
REG_e_llid4_gnt_stat e_llid4_gnt_stat;
REG_e_llid5_gnt_stat e_llid5_gnt_stat;
REG_e_llid6_gnt_stat e_llid6_gnt_stat;
REG_e_llid7_gnt_stat e_llid7_gnt_stat;
UINT32 rsv_6158[2];
REG_e_snf_mpcp_oam_ctl e_snf_mpcp_oam_ctl;
REG_e_rpt_adj e_rpt_adj;
REG_e_rpt_cnt e_rpt_cnt;
REG_e_dyinggsp_w1 e_dyinggsp_w1;
REG_e_dyinggsp_w2 e_dyinggsp_w2;
REG_e_dyinggsp_w3 e_dyinggsp_w3;
REG_e_dyinggsp_w4 e_dyinggsp_w4;
REG_e_dyinggsp_w5 e_dyinggsp_w5;
REG_e_dyinggsp_w6 e_dyinggsp_w6;
REG_e_dyinggsp_w7 e_dyinggsp_w7;
REG_e_dyinggsp_w8 e_dyinggsp_w8;
REG_e_dyinggsp_w9 e_dyinggsp_w9;
REG_e_dyinggsp_w10 e_dyinggsp_w10;
REG_e_dyinggsp_w11 e_dyinggsp_w11;
REG_e_dyinggsp_w12 e_dyinggsp_w12;
REG_e_oam_kpalv_w1 e_oam_kpalv_w1;
REG_e_oam_kpalv_w2 e_oam_kpalv_w2;
REG_e_oam_kpalv_w3 e_oam_kpalv_w3;
REG_e_oam_kpalv_w4 e_oam_kpalv_w4;
REG_e_oam_kpalv_w5 e_oam_kpalv_w5;
REG_e_oam_kpalv_w6 e_oam_kpalv_w6;
REG_e_oam_kpalv_w7 e_oam_kpalv_w7;
REG_e_oam_kpalv_w8 e_oam_kpalv_w8;
REG_e_oam_kpalv_w9 e_oam_kpalv_w9;
REG_e_oam_kpalv_w10 e_oam_kpalv_w10;
REG_e_oam_kpalv_w11 e_oam_kpalv_w11;
REG_e_oam_kpalv_w12 e_oam_kpalv_w12;
REG_e_oam_kpalv_ctrl e_oam_kpalv_ctrl;
REG_e_tod_1pps_ctrl e_tod_1pps_ctrl;
REG_e_short_pkt_ctrl e_short_pkt_ctrl;
REG_e_sniff_sp_tag e_sniff_sp_tag;
UINT32 rsv_61DC[8072];
UINT8 rsv_DFFC;
UINT16 rsv_DFFD;
UINT8 rsv_E0FF;
UINT32 rsv_E100[959];
UINT8 rsv_EFFC;
UINT16 rsv_EFFD;
}EPON_MAC_REGS, *PEPON_MAC_REGS;
extern PEPON_MAC_REGS g_EPON_MAC_BASE;
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_mac_reg_c_header.h" 2
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_reg.h" 2
# 13 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_reg.h"
typedef struct eponMacHwtestReg_s{
__u32 addr;
__u32 def_value;
__u32 rwmask;
}eponMacHwtestReg_t;
# 79 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_reg.h"
typedef union
{
struct
{
__u32 llidDscvrySts: 2;
__u32 reserved: 4;
__u32 rgstrFlgSts:2;
__u32 reserved1:7;
__u32 llidValid:1;
__u32 llidValue:16;
} Bits;
__u32 Raw;
} eponLlidDscvStsReg_t;
typedef union
{
struct
{
__u8 reserve1;
__u8 reserve2;
__u8 mpcpTmoutLlid;
__u8 rptOverIntvlLlid;
} Bits;
__u32 Raw;
} eponRptMpcpLlidReg_t;
typedef struct {
__u8 channel ;
__u8 queue ;
__u8 thrIdx ;
__u16 value ;
} eponQueueThreshold_t ;
# 40 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_phy.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_phy.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_phy.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_const.h" 1
# 5 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_phy.h" 2
typedef struct XMCS_PhyTransParams_S {
__u16 temperature ;
__u16 voltage ;
__u16 txCurrent ;
__u16 txPower ;
__u16 rxPower ;
} PHY_PARAMS_t;
typedef struct XMCS_PhyTxRxFecStatus_S{
__u32 rx_status;
__u32 tx_status;
} PHY_FECSTATUS_t;
# 5 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_phy.h" 2
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_const.h" 1
# 6 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_phy.h" 2
typedef enum {
XMCS_PHY_BURST_MODE = 0,
XMCS_PHY_CONTINUOUS_MODE
} XMCSPHY_TxBurstMode_t ;
struct XMCS_PhyRxFecConfig_S {
XPON_Mode_t fecMode ;
XPON_Mode_t fecStatus ;
XPON_Mode_t fecCntReset ;
struct {
__u32 corrBytes ;
__u32 corrCodeWords ;
__u32 unCorrCodeWords ;
__u32 totalRxCodeWords ;
__u32 fecSeconds ;
} fecCounter ;
} ;
struct XMCS_PhyFrameCount_S {
XPON_Mode_t frameCntReset ;
struct {
__u32 low ;
__u32 high ;
__u32 lof ;
} frameCounter ;
} ;
struct XMCS_PhyTransSetting_S {
XPON_Mode_t txSdInverse ;
XPON_Mode_t txFaultInverse ;
XPON_Mode_t txBurstEnInverse ;
XPON_Mode_t rxSdInverse ;
} ;
struct XMCS_PhyTxBurstCfg_S {
XMCSPHY_TxBurstMode_t burstMode ;
} ;
struct XMCS_PHY_I2cCtrl_S
{
__u8 u1CHannelID;
__u16 u2ClkDiv;
__u8 u1DevAddr;
__u8 u1WordAddrNum;
__u32 u4WordAddr;
__u8 * pu1Buf;
__u16 u2ByteCnt;
};
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_phy.h" 2
int phy_cmd_proc(uint cmd, ulong arg) ;
void xmcs_detect_phy_trans_alarm(uint alarm) ;
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_epon.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_const.h" 1
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_epon.h" 2
# 16 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_epon.h"
int epon_cmd_proc(uint cmd, ulong arg) ;
# 8 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_gpon.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_gpon.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_gpon.h" 1
typedef enum {
GPON_SW = 0,
GPON_HW,
GPON_SW_HW,
} GPON_SW_HW_SELECT_T, GPON_SW_HW_SELECT_t ;
typedef struct XMCS_GponOnuInfo_S {
__u8 onuId ;
__u8 state ;
__u8 sn[(8)] ;
__u8 PasswdLength;
__u8 hexFlag;
__u8 passwd[(10)] ;
__u8 keyIdx ;
__u8 key[(24)] ;
__u32 actTo1Timer ;
__u32 actTo2Timer ;
__u16 omcc ;
__u8 EmergencyState ;
} GPON_ONU_INFO_t;
typedef struct XMCS_GponGemCounter_S {
__u16 gemPortId ;
__u32 rxGemFrameH ;
__u32 rxGemFrameL ;
__u32 rxGemPayloadH ;
__u32 rxGemPayloadL ;
__u32 txGemFrameH ;
__u32 txGemFrameL ;
__u32 txGemPayloadH ;
__u32 txGemPayloadL ;
} GEM_STATISTIC_t;
# 5 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_gpon.h" 2
typedef enum {
XMCS_GPON_TRTCM_SCALE_1B = 0,
XMCS_GPON_TRTCM_SCALE_2B,
XMCS_GPON_TRTCM_SCALE_4B,
XMCS_GPON_TRTCM_SCALE_8B,
XMCS_GPON_TRTCM_SCALE_16B,
XMCS_GPON_TRTCM_SCALE_32B,
XMCS_GPON_TRTCM_SCALE_64B,
XMCS_GPON_TRTCM_SCALE_128B,
XMCS_GPON_TRTCM_SCALE_256B,
XMCS_GPON_TRTCM_SCALE_512B,
XMCS_GPON_TRTCM_SCALE_1K,
XMCS_GPON_TRTCM_SCALE_2K,
XMCS_GPON_TRTCM_SCALE_4K,
XMCS_GPON_TRTCM_SCALE_8K,
XMCS_GPON_TRTCM_SCALE_16K,
XMCS_GPON_TRTCM_SCALE_32K,
XMCS_GPON_TRTCM_SCALE_ITEMS
} XMCSGPON_TrtcmScale_t;
typedef enum {
GPON_COUNTER_TYPE_GEM = 0,
GPON_COUNTER_TYPE_ETHERNET,
} GPON_COUNTER_TYPE_t ;
typedef enum {
GPON_BURST_MODE_OVERHEAD_LEN_DEFAULT = 0,
GPON_BURST_MODE_OVERHEAD_LEN_MORE_THAN_128,
} GPON_BURST_MODE_OVERHEAD_LEN_T ;
typedef enum {
Doze = 0,
Sleep,
WSleep,
} GPON_PLOAMu_SLEEP_MODE_t ;
struct XMCS_GponSnPasswd_S {
__u8 sn[(8)] ;
__u8 passwd[(10)] ;
__u8 EmergencyState ;
__u8 PasswdLength;
__u8 hexFlag;
} ;
struct XMCS_GponActTimer_S {
__u32 to1Timer ;
__u32 to2Timer ;
} ;
struct XMCS_GponTrtcmConfig_S {
XPON_Mode_t trtcmMode ;
XMCSGPON_TrtcmScale_t trtcmScale ;
} ;
struct XMCS_GponTrtcmParams_S {
__u8 channel ;
__u16 cirValue ;
__u16 cbsUnit ;
__u16 pirValue ;
__u16 pbsUnit ;
} ;
struct XMCS_GponTodCfg_S {
__u32 superframe ;
__u32 sec ;
__u32 nanosec ;
} ;
struct XMCS_EqdOffset_S {
unsigned char O4 ;
unsigned char O5 ;
unsigned char eqdOffsetFlag;
} ;
struct XMCS_GponGetCounter_S {
__u32 gponGetCounter_table[16];
};
typedef struct {
__u32 aesSpf;
__u32 activeKey[4];
__u32 shadowKey[4];
} GPON_DEV_ENCRYPT_KEY_INFO_T;
typedef struct {
XPON_Mode_t omci;
XPON_Mode_t data;
} GPON_DEV_UP_TRAFFIC_T;
typedef enum {
SNIFFER_MODE_LAN0 = 0x8001,
SNIFFER_MODE_LAN1 = 0x8002,
SNIFFER_MODE_LAN2 = 0x8004,
SNIFFER_MODE_LAN3 = 0x8008,
} GPON_DEV_SNIFFER_MODE_LAN_PORT_T ;
typedef struct {
GPON_DEV_SNIFFER_MODE_LAN_PORT_T lan_port;
__u16 tx_da;
__u16 tx_sa;
__u16 tx_ethertype;
__u16 tx_vid;
__u16 tx_tpid;
__u16 rx_da;
__u16 rx_sa;
__u16 rx_ethertype;
__u16 rx_vid;
__u16 rx_tpid;
XPON_Mode_t packet_padding;
XPON_Mode_t enable;
} GPON_DEV_SNIFFER_MODE_T;
typedef struct {
__u32 dba_backdoor_total_buf;
__u32 dba_backdoor_green_buf;
__u32 dba_backdoor_yellow_buf;
XPON_Mode_t enable;
} GPON_DEV_DBA_BACKDOOR_T;
typedef struct {
__u16 dba_slight_modify_total_buf;
__u16 dba_slight_modify_green_buf;
__u16 dba_slight_modify_yellow_buf;
XPON_Mode_t enable;
} GPON_DEV_SLIGHT_MODIFY_T;
typedef enum {
SEND_PLOAMU_BEFORE = 0,
SEND_PLOAMU_AFTER,
} GPON_DEV_SEND_PLOAMU_WAIT_MODE_T ;
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_gpon.h" 2
int gpon_cmd_proc(uint cmd, ulong arg) ;
# 12 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_fdet.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_fdet.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_fdet.h" 1
typedef enum {
XMCS_EVENT_TYPE_GPON = 1,
XMCS_EVENT_TYPE_EPON
} XMCS_EventType_t ;
typedef struct {
XMCS_EventType_t type ;
unsigned int id ;
unsigned int value ;
} XPON_EVENT_t;
typedef enum {
XMCS_EVENT_GPON_LOS = 0,
XMCS_EVENT_GPON_PHY_READY,
XMCS_EVNET_GPON_DEACTIVATE,
XMCS_EVENT_GPON_ACTIVATE,
XMCS_EVENT_GPON_TCONT_ALLOCED,
XMCS_EVENT_GPON_MODE_CHANGE,
XMCS_EVENT_GPON_PHY_TRANS_ALARM,
XMCS_EVENT_GPON_MAX_ID,
XMCS_EVENT_GPON_DISABLE,
XMCS_EVENT_GPON_ENABLE,
XMCS_EVENT_GPON_STATE_CHANGE,
XMCS_EVENT_GPON_MAX
} XMCS_GponEventId_t ;
typedef enum {
XMCS_EVENT_EPON_LOS = 0,
XMCS_EVENT_EPON_PHY_READY,
XMCS_EVENT_EPON_REGISTER,
XMCS_EVENT_EPON_DEREGISTER,
XMCS_EVENT_EPON_DENIED,
XMCS_EVENT_EPON_MODE_CHANGE,
XMCS_EVENT_EPON_PHY_TRANS_ALARM,
XMCS_EVENT_EPON_TX_POWER_OFF,
XMCS_EVENT_EPON_MAX_ID
} XMCS_EponEventId_t ;
typedef enum {
XMCS_PHY_ALARM_TX_POWER_HIGH = (1<<0),
XMCS_PHY_ALARM_TX_POWER_LOW = (1<<1),
XMCS_PHY_ALARM_TX_CURRENT_HIGH = (1<<2),
XMCS_PHY_ALARM_TX_CURRENT_LOW = (1<<3),
XMCS_PHY_ALARM_RX_POWER_HIGH = (1<<4),
XMCS_PHY_ALARM_RX_POWER_LOW = (1<<5),
} XMCSPHY_PhyTransAlarm_t ;
# 5 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_fdet.h" 2
struct XMCS_PonEventStatus_S {
XPON_EVENT_t event[32] ;
unsigned int items ;
};
typedef struct{
__u32 report_init_O1:1;
__u32 reserve:31;
}Event_ctrlFlag_t;
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_fdet.h" 2
void xmcs_report_event(XMCS_EventType_t type, uint id, uint value) ;
int fdet_cmd_proc(uint cmd, ulong arg) ;
# 15 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_if.h" 1
# 1 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_if.h" 1
# 9 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/../public/xmcs_if.h"
typedef struct XMCS_WanCntStats_S {
__u32 mask;
__u32 txFrameCnt;
__u32 txFrameLen;
__u32 txDropCnt;
__u32 txBroadcastCnt;
__u32 txMulticastCnt;
__u32 txLess64Cnt;
__u32 txMore1518Cnt;
__u32 tx64Cnt;
__u32 tx65To127Cnt;
__u32 tx128To255Cnt;
__u32 tx256To511Cnt;
__u32 tx512To1023Cnt;
__u32 tx1024To1518Cnt;
__u32 rxFrameCnt;
__u32 rxFrameLen;
__u32 rxDropCnt;
__u32 rxBroadcastCnt;
__u32 rxMulticastCnt;
__u32 rxCrcCnt;
__u32 rxFragFameCnt;
__u32 rxJabberFameCnt;
__u32 rxLess64Cnt;
__u32 rxMore1518Cnt;
__u32 rx64Cnt;
__u32 rx65To127Cnt;
__u32 rx128To255Cnt;
__u32 rx256To511Cnt;
__u32 rx512To1023Cnt;
__u32 rx1024To1518Cnt;
__u32 rxHecErrorCnt;
__u32 rxFecErrorCnt;
}WAN_STATISTIC_t;
typedef enum {
GPON_UNICAST_GEM = 0,
GPON_MULTICAST_GEM,
} GPON_GemType_t ;
typedef enum {
EPON_RX_FORWARDING = 0,
EPON_RX_DISCARD,
EPON_RX_LOOPBACK
} EPON_RxMode_t ;
typedef enum {
EPON_TX_FORWARDING = 0,
EPON_TX_DISCARD,
} EPON_TxMode_t ;
struct XMCS_EponLlidCreate_S {
__u8 idx ;
__u16 llid ;
} ;
struct XMCS_EponRxConfig_S {
__u8 idx ;
EPON_RxMode_t rxMode ;
} ;
struct XMCS_EponTxConfig_S {
__u8 idx ;
EPON_TxMode_t txMode ;
} ;
struct XMCS_EponLlidInfo_S {
struct {
__u8 idx ;
__u16 llid ;
__u8 channel ;
EPON_RxMode_t rxMode ;
EPON_TxMode_t txMode ;
} info[8] ;
__u16 entryNum ;
} ;
# 6 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_if.h" 2
typedef enum {
XMCS_IF_WEIGHT_TYPE_PACKET = 0,
XMCS_IF_WEIGHT_TYPE_BYTE
} XMCSIF_QoSWeightType_t ;
typedef enum {
XMCS_IF_WEIGHT_SCALE_64B = 0,
XMCS_IF_WEIGHT_SCALE_16B
} XMCSIF_QoSWeightScale_t ;
typedef enum {
XMCS_IF_QOS_TYPE_WRR = 0,
XMCS_IF_QOS_TYPE_SP,
XMCS_IF_QOS_TYPE_SPWRR7,
XMCS_IF_QOS_TYPE_SPWRR6,
XMCS_IF_QOS_TYPE_SPWRR5,
XMCS_IF_QOS_TYPE_SPWRR4,
XMCS_IF_QOS_TYPE_SPWRR3,
XMCS_IF_QOS_TYPE_SPWRR2,
} XMCSIF_QosType_t ;
typedef enum {
XMCS_IF_CONGESTIOM_SCALE_2 = 0,
XMCS_IF_CONGESTIOM_SCALE_4,
XMCS_IF_CONGESTIOM_SCALE_8,
XMCS_IF_CONGESTIOM_SCALE_16,
} XMCSIF_CongestionScale_t ;
typedef enum {
XMCS_IF_TRTCM_SCALE_1B = 0,
XMCS_IF_TRTCM_SCALE_2B,
XMCS_IF_TRTCM_SCALE_4B,
XMCS_IF_TRTCM_SCALE_8B,
XMCS_IF_TRTCM_SCALE_16B,
XMCS_IF_TRTCM_SCALE_32B,
XMCS_IF_TRTCM_SCALE_64B,
XMCS_IF_TRTCM_SCALE_128B,
XMCS_IF_TRTCM_SCALE_256B,
XMCS_IF_TRTCM_SCALE_512B,
XMCS_IF_TRTCM_SCALE_1K,
XMCS_IF_TRTCM_SCALE_2K,
XMCS_IF_TRTCM_SCALE_4K,
XMCS_IF_TRTCM_SCALE_8K,
XMCS_IF_TRTCM_SCALE_16K,
XMCS_IF_TRTCM_SCALE_32K,
XMCS_IF_TRTCM_SCALE_ITEMS
} XMCSIF_TrtcmScale_t ;
typedef enum {
XMCS_IF_PCP_TYPE_CDM_TX = 0,
XMCS_IF_PCP_TYPE_CDM_RX,
XMCS_IF_PCP_TYPE_GDM_RX
} XMCSIF_PcpType_t ;
typedef enum {
XMCS_IF_PCP_MODE_DISABLE = 0,
XMCS_IF_PCP_MODE_8B0D,
XMCS_IF_PCP_MODE_7B1D,
XMCS_IF_PCP_MODE_6B2D,
XMCS_IF_PCP_MODE_5B3D
} XMCSIF_PcpMode_t ;
typedef enum {
XMCS_IF_WAN_DETECT_MODE_AUTO = 0,
XMCS_IF_WAN_DETECT_MODE_GPON,
XMCS_IF_WAN_DETECT_MODE_EPON
} XMCSIF_WanDetectionMode_t ;
typedef enum {
XMCS_IF_WAN_LINK_OFF = 0,
XMCS_IF_WAN_LINK_GPON,
XMCS_IF_WAN_LINK_EPON
} XMCSIF_WanLinkStatus_t ;
typedef enum {
XMCS_IF_ONU_TYPE_UNKNOWN = 0,
XMCS_IF_ONU_TYPE_SFU,
XMCS_IF_ONU_TYPE_HGU
} XMCSIF_OnuType_t;
typedef enum {
XMCS_IF_CLEAR_TYPE_XPON = 0,
} XMCSIF_ClearType_t;
struct XMCS_DebugLevel_S {
xPON_DebugMsg_t mask;
XPON_Mode_t enable;
};
struct XMCS_QoSWeightConfig_S {
XMCSIF_QoSWeightType_t weightType ;
XMCSIF_QoSWeightScale_t weightScale ;
};
struct XMCS_ChannelQoS_S {
__u8 channel ;
XMCSIF_QosType_t qosType ;
struct {
__u8 weight ;
} queue[8];
};
struct XMCS_TxQueueCongestion_S {
struct {
XPON_Mode_t trtcm ;
XPON_Mode_t dei ;
XPON_Mode_t threshold ;
} congestMode ;
struct {
XMCSIF_CongestionScale_t max ;
XMCSIF_CongestionScale_t min ;
} scale ;
struct {
__u8 green ;
__u8 yellow ;
} dropProbability ;
struct {
__u8 queueIdx ;
__u8 greenMax ;
__u8 greenMin ;
__u8 yellowMax ;
__u8 yellowMin ;
} queueThreshold[8] ;
};
struct XMCS_TxTrtcmScale_S {
XMCSIF_TrtcmScale_t trtcmScale ;
};
struct XMCS_TxQueueTrtcm_S {
__u8 tsIdx ;
__u16 cirValue ;
__u16 cbsUnit ;
__u16 pirValue ;
__u16 pbsUnit ;
};
struct XMCS_TxPCPConfig_S {
XMCSIF_PcpType_t pcpType ;
XMCSIF_PcpMode_t pcpMode ;
};
struct XMCS_ClearConfig_S {
XMCSIF_ClearType_t clearType ;
};
struct XMCS_WanLinkConfig_S {
XPON_Mode_t linkStart ;
XMCSIF_WanDetectionMode_t detectMode ;
XMCSIF_WanLinkStatus_t linkStatus ;
};
struct XMCS_StormCtrlConfig_S {
__u32 mask;
__u32 threld ;
__u32 timer ;
};
# 187 "/opt/tclinux_phoenix/global_inc/modules/xpon_global/private/xmcs_if.h"
struct XMCS_GemPortAssign_S {
struct {
__u16 id ;
__u16 ani ;
} gemPort[256] ;
__u16 entryNum ;
};
struct XMCS_GemPortCreate_S {
GPON_GemType_t gemType ;
__u16 gemPortId ;
__u16 allocId ;
};
struct XMCS_GemPortLoopback_S {
__u16 gemPortId ;
XPON_Mode_t loopback ;
};
struct XMCS_GemPortInfo_S {
struct {
GPON_GemType_t gemType ;
__u16 gemPortId ;
__u16 allocId ;
__u16 aniIdx ;
XPON_Mode_t lbMode ;
XPON_Mode_t enMode ;
} info[256] ;
__u16 entryNum ;
} ;
struct XMCS_TcontInfo_S {
struct {
__u16 allocId ;
__u8 channel ;
} info[32] ;
__u16 entryNum ;
} ;
struct XMCS_TcontCfg_S {
__u16 allocId ;
__u8 tcontId ;
__u8 tcontPolicy ;
} ;
struct XMCS_OMCC_Info_S {
__u16 allocId;
__u32 gemportid;
} ;
struct XMCS_TcontTrtcmScale {
XMCSIF_TrtcmScale_t trtcmScale ;
} ;
struct FH_VLAN_INFO_S {
__u16 srcVlan;
__u16 dstVlan;
};
struct XMCS_VLAN_CFG_INFO_S {
__u8 vlanCount;
struct FH_VLAN_INFO_S vlan[8];
} ;
struct XMCS_TcontTrtcm_S {
struct {
__u8 channel ;
__u32 cirValue ;
__u32 cbsUnit ;
__u32 pirValue ;
__u32 pbsUnit ;
} trtcmParam[32] ;
__u8 entryNum ;
} ;
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h" 2
# 18 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
struct XMCS_VLAN_CFG_OPERATE_S {
unchar vlanOperate;
struct FH_VLAN_INFO_S vlan;
} ;
# 31 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
int xmcs_get_wan_link_status(struct XMCS_WanLinkConfig_S *pSysLinkStatus) ;
int xmcs_get_onu_type(XMCSIF_OnuType_t *type);
# 44 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
int xmcs_create_gem_port(struct XMCS_GemPortCreate_S *pGemCreate) ;
int xmcs_remove_gem_port(ushort gemPortId) ;
int xmcs_remove_all_gem_port(void) ;
# 68 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
int xmcs_get_gem_port_info(struct XMCS_GemPortInfo_S *pGemInfo) ;
# 77 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
int xmcs_get_tcont_info(struct XMCS_TcontInfo_S *pTcontInfo) ;
int xmcs_create_tcont_info(struct XMCS_TcontCfg_S *pTcontInfo) ;
int xmcs_remove_tcont_info(ushort allocId) ;
int xmcs_set_vlan_info(struct XMCS_VLAN_CFG_OPERATE_S *pVlaninfo);
void showTransVlanInfo(void);
void assignGemportId(ushort gemPortId);
int xmcs_get_omcc_info(struct XMCS_OMCC_Info_S *ptOmccInfo);
# 110 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_if.h"
int xmcs_create_llid(struct XMCS_EponLlidCreate_S *pLlidCreate) ;
int xmcs_remove_llid(unchar idx) ;
int if_cmd_proc(uint cmd, ulong arg) ;
int xmcs_set_connection_start(XPON_Mode_t mode);
int xmcs_set_epon_llid_config(uint index);
int xpon_set_qdma_qos(int enable);
int xpon_reset_qdma_tx_buf(void );
void prepare_epon(void);
# 16 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_sdi.h" 2
# 41 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h" 2
# 63 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h"
void dbgtoMem(__u32 debugLevel,char * fmt,...);
typedef struct {
__u8 snifferModeEnable;
__u8 snifferModeLanMask;
} EPON_SNIFFER_MODE_T;
typedef struct {
__u32 earlyWakeupTimer;
__u8 earlyWakeupFlag;
__u16 earlyWakeupCount;
__u8 txOamFavorMode;
EPON_SNIFFER_MODE_T snifferModeConfig;
} EPON_Config_T ;
typedef struct {
struct timer_list early_wakeup_timer;
EPON_Config_T eponCfg ;
__u8 mpcpInterruptMode;
} EPON_GlbPriv_T ;
# 119 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon.h"
typedef struct eponLlid_s{
__u8 llidIndex;
__u8 enableFlag;
__u16 llid;
__u8 macAddr[6];
__u8 oui[3];
__u8 vendorSpecInfo[4];
eponMpcp_t eponMpcp;
}eponLlid_t, *eponLlid_p;
typedef struct epon_s{
__u32 llidMask;
__u8 hldoverEnable;
__u16 hldOverTime;
__u8 typeBOnGoing;
__u32 timeDrftEq255Cnt;
__u32 timeDrftFrom8To16Cnt;
eponLlid_t eponLlidEntry[8];
__u32 eponTxPowerFlag;
}epon_t, *epon_p;
extern int cmd_register(cmds_t *);
extern int cmd_unregister(char *name);
extern int subcmd(const cmds_t tab[], int argc, char *argv[], void *p);
int eponLlidEnable(__u8 llidIndex);
int eponLlidDisable(__u8 llidIndex);
int eponMacRestart(void);
int eponWanResetWithChannelRetire(void);
int eponMacDumpAllReg(void);
int eponMacGetRegTblSize(void);
void eponStart(unsigned long);
void eponStop(void);
int eponInit(void);
void eponExit(void);
int epon_set_queue_threshold(eponQueueThreshold_t *pEponQThr) ;
int epon_get_queue_threshold(eponQueueThreshold_t *pEponQThr) ;
int eponSetLlidThrshldNum(__u8 llidIndex, __u8 num);
int eponGetLlidThrshldNum(__u8 llidIndex, __u8 *num);
int eponHwDygaspCtrl(__u8 flag);
int eponDevGetDyingGaspMode(__u8 *mode);
int eponDevSetDyingGaspNum(__u32 num);
int eponDevGetDyingGaspNum(__u32 *num);
void eponDetectPhyReady(void);
void eponDetectPhyLosLof(void);
int eponSetSnifferModeConfig(__u8 enable, __u16 mask);
# 72 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/xpondrv.h" 1
# 73 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/xpon_led.h" 1
# 44 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/xpon_led.h"
typedef enum XPON_ALARM_LED_Status_e{
ALARM_LED_OFF ,
ALARM_LED_ON ,
ALARM_LED_FLICKER
}XPON_ALARM_LED_Status_t;
void change_alarm_led_status(XPON_ALARM_LED_Status_t los_led_status);
int xpon_los_led_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
int xpon_los_led_write_proc(struct file *file, const char *buffer,
unsigned long count, void *data);
# 74 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/ecnt_hook/ecnt_hook_qdma.h" 1
# 45 "include/ecnt_hook/ecnt_hook_qdma.h"
# 1 "include/ecnt_hook/ecnt_hook.h" 1
# 77 "include/ecnt_hook/ecnt_hook.h"
typedef enum {
ECNT_HOOK_ERROR = -1,
ECNT_RETURN_DROP= 0,
ECNT_CONTINUE ,
ECNT_RETURN,
}ecnt_ret_val;
enum ecnt_maintype{
ECNT_NET_CORE_DEV,
ECNT_NET_VLAN_DEV,
ECNT_NET_BR_FDB,
ECNT_NET_BR_FORWARD,
ECNT_NET_BR_INPUT,
ECNT_NET_PPP_GENERIC,
ECNT_NET_UDP,
ECNT_NET_UDPV6,
ECNT_NET_AF,
ECNT_NET_SOCK,
ECNT_NET_IP_OUTPUT,
ECNT_NF_BR,
ECNT_NF_ARP,
ECNT_NF_IPV4,
ECNT_NF_IPV6,
ECNT_NF_TRACK_CORE,
ECNT_QDMA_WAN,
ECNT_QDMA_LAN,
ECNT_FE,
ECNT_PPE,
ECNT_ATM,
ECNT_PTM,
ECNT_ETHER_SWITCH,
ECNT_ETHER_PHY,
ECNT_XPON_MAC,
ECNT_XPON_PHY,
ECNT_QDMA_7510_20,
ECNT_PCIE,
ECNT_MULTICAST,
ECNT_L2TP,
ECNT_TRAFFIC_CLASSIFY,
ECNT_SMUX,
ECNT_VOIP,
ECNT_NUM_MAINTYPE
};
struct ecnt_data;
typedef ecnt_ret_val ecnt_hookfn(struct ecnt_data *in_data);
struct net_info_s{
};
struct nf_info_s{
};
struct ecnt_ops_info{
unsigned int drop_num;
union{
struct nf_info_s nf_info;
struct net_info_s net_info;
};
};
struct ecnt_hook_ops {
struct list_head list;
unsigned int hook_id;
struct ecnt_ops_info info;
const char *name;
unsigned int is_execute;
ecnt_hookfn *hookfn;
unsigned int maintype;
unsigned int subtype;
int priority;
};
extern struct list_head ecnt_hooks[ECNT_NUM_MAINTYPE][8];
extern void ecnt_hook_init(void);
extern int __ECNT_HOOK(unsigned int maintype, unsigned int subtype,struct ecnt_data *in_data);
extern int ecnt_register_hook(struct ecnt_hook_ops *reg);
extern void ecnt_unregister_hook(struct ecnt_hook_ops *reg);
extern int show_all_ecnt_hookfn(void);
extern int set_ecnt_hookfn_execute_or_not(unsigned int maintype, unsigned int subtype, unsigned int hook_id, unsigned int is_execute);
extern int ecnt_ops_unregister(unsigned int maintype, unsigned int subtype, unsigned int hook_id);
extern int get_ecnt_hookfn(unsigned int maintype, unsigned int subtype);
extern int ecnt_register_hooks(struct ecnt_hook_ops *reg, unsigned int n);
extern void ecnt_unregister_hooks(struct ecnt_hook_ops *reg, unsigned int n);
# 46 "include/ecnt_hook/ecnt_hook_qdma.h" 2
# 1 "include/ecnt_hook/ecnt_hook_qdma_type.h" 1
# 113 "include/ecnt_hook/ecnt_hook_qdma_type.h"
struct port_info;
typedef enum {
DBG_ERR ,
DBG_ST ,
DBG_WARN ,
DBG_MSG ,
DBG_LEVEL_MAX
} QDMA_DebugLevel_t ;
typedef enum {
QDMA_CALLBACK_RX_PACKET,
QDMA_CALLBACK_EVENT_HANDLER,
QDMA_CALLBACK_RECYCLE_PACKET,
QDMA_CALLBACK_GPON_MAC_HANDLER,
QDMA_CALLBACK_EPON_MAC_HANDLER,
QDMA_CALLBACK_XPON_PHY_HANDLER,
} QDMA_CbType_t ;
typedef enum {
QDMA_EVENT_RECV_PKTS = 0 ,
QDMA_EVENT_NO_RX_BUFFER ,
QDMA_EVENT_TX_CROWDED
} QDMA_EventType_t ;
typedef enum {
QDMA_LOOPBACK_DISABLE = 0 ,
QDMA_LOOPBACK_QDMA ,
QDMA_LOOPBACK_UMAC
} QDMA_LoopbackMode_t ;
typedef enum {
QDMA_TX_POLLING = 0 ,
QDMA_TX_INTERRUPT ,
} QDMA_RecycleMode_t ;
typedef enum {
QDMA_RX_POLLING = 0 ,
QDMA_RX_INTERRUPT ,
QDMA_RX_NAPI
} QDMA_RecvMode_t ;
typedef enum {
QDMA_DISABLE = 0 ,
QDMA_ENABLE
} QDMA_Mode_t ;
typedef enum {
QDMA_WAN_TYPE_GPON = 0,
QDMA_WAN_TYPE_EPON,
QDMA_WAN_TYPE_PTM,
QDMA_WAN_TYPE_SAR
} QDMA_WanType_t ;
typedef enum {
QDMA_TXQOS_WEIGHT_BY_PACKET = 0,
QDMA_TXQOS_WEIGHT_BY_BYTE,
QDMA_TXQOS_WEIGHT_MAX,
} QDMA_TxQosWeightType_t ;
typedef enum {
QDMA_TXQOS_WEIGHT_SCALE_64B = 0,
QDMA_TXQOS_WEIGHT_SCALE_16B,
QDMA_TXQOS_WEIGHT_SCALE_MAX,
} QDMA_TxQosWeightScale_t ;
typedef enum {
QDMA_TXQOS_TYPE_WRR = 0,
QDMA_TXQOS_TYPE_SP,
QDMA_TXQOS_TYPE_SPWRR7,
QDMA_TXQOS_TYPE_SPWRR6,
QDMA_TXQOS_TYPE_SPWRR5,
QDMA_TXQOS_TYPE_SPWRR4,
QDMA_TXQOS_TYPE_SPWRR3,
QDMA_TXQOS_TYPE_SPWRR2,
QDMA_TXQOS_TYPE_NUMS
} QDMA_TxQosType_t ;
typedef enum {
QDMA_VCHNL_TXQOS_TYPE_WRR = 0,
QDMA_VCHNL_TXQOS_TYPE_SP,
QDMA_VCHNL_TXQOS_TYPE_SPWRR3,
QDMA_VCHNL_TXQOS_TYPE_SPWRR2,
QDMA_VCHNL_TXQOS_TYPE_NUMS
} QDMA_VirChnlQosType_t ;
typedef enum {
QDMA_TRTCM_SCALE_1BYTE = 0,
QDMA_TRTCM_SCALE_2BYTE,
QDMA_TRTCM_SCALE_4BYTE,
QDMA_TRTCM_SCALE_8BYTE,
QDMA_TRTCM_SCALE_16BYTE,
QDMA_TRTCM_SCALE_32BYTE,
QDMA_TRTCM_SCALE_64BYTE,
QDMA_TRTCM_SCALE_128BYTE,
QDMA_TRTCM_SCALE_256BYTE,
QDMA_TRTCM_SCALE_512BYTE,
QDMA_TRTCM_SCALE_1KBYTE,
QDMA_TRTCM_SCALE_2KBYTE,
QDMA_TRTCM_SCALE_4KBYTE,
QDMA_TRTCM_SCALE_8KBYTE,
QDMA_TRTCM_SCALE_16KBYTE,
QDMA_TRTCM_SCALE_32KBYTE,
QDMA_TRTCM_SCALE_MAX_ITEMS
} QDMA_TrtcmScale_t ;
typedef enum {
QDMA_TRTCM_PARAM_CIR = 0,
QDMA_TRTCM_PARAM_CBS,
QDMA_TRTCM_PARAM_PIR,
QDMA_TRTCM_PARAM_PBS
} QDMA_TrtcmParamType_t ;
typedef enum {
QDMA_EPON_REPORT_WO_THRESHOLD = 0,
QDMA_EPON_REPORT_ONE_THRESHOLD,
QDMA_EPON_REPORT_TWO_THRESHOLD,
QDMA_EPON_REPORT_THREE_THRESHOLD
} QDMA_EponReportMode_t ;
typedef enum {
QDMA_TXQUEUE_SCALE_2_DSCP = 0,
QDMA_TXQUEUE_SCALE_4_DSCP,
QDMA_TXQUEUE_SCALE_8_DSCP,
QDMA_TXQUEUE_SCALE_16_DSCP,
QDMA_TXQUEUE_SCALE_ITEMS
} QDMA_TxQueueThresholdScale_t ;
typedef enum {
PSE_PCP_TYPE_CDM_TX = 0,
PSE_PCP_TYPE_CDM_RX,
PSE_PCP_TYPE_GDM_RX
} PSE_PcpType_t ;
typedef enum {
PSE_PCP_MODE_DISABLE = 0,
PSE_PCP_MODE_8B0D = 1,
PSE_PCP_MODE_7B1D = 2,
PSE_PCP_MODE_6B2D = 4,
PSE_PCP_MODE_5B3D = 8
} PSE_PcpMode_t ;
typedef enum {
QDMA_RX_RATE_LIMIT_BY_BYTE = 0,
QDMA_RX_RATE_LIMIT_BY_PACKET,
} QDMA_RxPktMode_t ;
typedef enum {
QDMA_Rx_Rate_Limit_PIR = 0,
QDMA_Rx_Rate_Limit_PBS
} QDMA_RxRateLimitType_t ;
typedef enum {
QDMA_Tx_Rate_Limit_CIR = 0,
QDMA_Tx_Rate_Limit_CBS,
QDMA_Tx_Rate_Limit_PIR,
QDMA_Tx_Rate_Limit_PBS
} QDMA_TxRateLimitType_t ;
typedef enum {
QDMA_TX_RATE_METER_TIME_DIVISOR_1 = 0,
QDMA_TX_RATE_METER_TIME_DIVISOR_2,
QDMA_TX_RATE_METER_TIME_DIVISOR_4,
QDMA_TX_RATE_METER_TIME_DIVISOR_8,
QDMA_TX_RATE_METER_TIME_DIVISOR_ITEMS
} QDMA_TxRateMeterTimeDivisor_t ;
typedef enum {
QDMA_DYNCNGST_DEI_THRH_SCALE_1_2 = 0,
QDMA_DYNCNGST_DEI_THRH_SCALE_1_4,
QDMA_DYNCNGST_DEI_THRH_SCALE_1_8,
QDMA_DYNCNGST_DEI_THRH_SCALE_1_16,
QDMA_DYNCNGST_DEI_THRH_SCALE_ITEMS
} QDMA_TxDynCngstDeiThrhScale_t ;
typedef enum {
QDMA_QUEUE_NONBLOCKING = 0 ,
QDMA_QUEUE_NORMAL
} QDMA_TxQCngstQueueMode_t ;
typedef enum {
QDMA_CHANNEL_NONBLOCKING = 0 ,
QDMA_CHANNEL_NORMAL
} QDMA_TxQCngstChannelMode_t ;
typedef enum {
QDMA_DBG_CNTR_SRC_CPU_TX = 0,
QDMA_DBG_CNTR_SRC_FWD_TX,
QDMA_DBG_CNTR_SRC_CPU_RX,
QDMA_DBG_CNTR_SRC_FWD_RX,
QDMA_DBG_CNTR_SRC_ITEMS
} QDMA_DbgCntrSrc_t ;
typedef enum {
QDMA_TXCNGST_DYNAMIC_NORMAL = 0,
QDMA_TXCNGST_DYNAMIC_PEAKRATE_MARGIN,
QDMA_TXCNGST_STATIC,
} QDMA_TxCngstMode_t ;
typedef enum {
QDMA_TXCNGST_PEEKRATE_MARGIN_0 = 0,
QDMA_TXCNGST_PEEKRATE_MARGIN_25,
QDMA_TXCNGST_PEEKRATE_MARGIN_50,
QDMA_TXCNGST_PEEKRATE_MARGIN_100,
} QDMA_PeekRateMargin_t ;
typedef enum {
QDMA_DBG_MEM_XS_MEMSEL_LOCAL_DSCP_INFO = 0,
QDMA_DBG_MEM_XS_MEMSEL_LOCAL_QUEUE_INFO,
QDMA_DBG_MEM_XS_MEMSEL_QOS_WEIGHT_COUNTER,
QDMA_DBG_MEM_XS_MEMSEL_LOCAL_DMA_INDEX,
QDMA_DBG_MEM_XS_MEMSEL_BUFFER_MONITOR,
QDMA_DBG_MEM_XS_MEMSEL_RATELIMIT_PARAM,
QDMA_DBG_MEM_XS_MEMSEL_VCHNL_QOS_WEIGHT,
QDMA_DBG_MEM_XS_MEMSEL_MAX,
} QDMA_DbgMemXsMemSel_t ;
typedef enum {
MAC_TYPE_ETHER = 0,
MAC_TYPE_XPON,
MAC_TYPE_PTM,
MAC_TYPE_ATM,
} MAC_Type_t ;
typedef enum {
QDMA_VIRTUAL_CHANNEL_16Queue = 0,
QDMA_VIRTUAL_CHANNEL_32Queue,
} QDMA_VirChnlMapMode_t ;
typedef enum {
QDMA_DBG_CNTR_CHNL_TXCPU = 0,
QDMA_DBG_CNTR_CHNL_TXFWD1,
QDMA_DBG_CNTR_CHNL_TXFWD2,
QDMA_DBG_CNTR_QUEUE,
QDMA_DBG_CNTR_MAX,
} QDMA_DbgCntrChnlGroup_t ;
typedef enum {
QDMA_FUNCTION_INIT = 0,
QDMA_FUNCTION_DEINIT ,
QDMA_FUNCTION_TX_DMA_MODE ,
QDMA_FUNCTION_RX_DMA_MODE ,
QDMA_FUNCTION_LOOPBACK_MODE ,
QDMA_FUNCTION_REGISTER ,
QDMA_FUNCTION_UNREGISTER ,
QDMA_FUNCTION_ENABLE_RXPKT_INT ,
QDMA_FUNCTION_DISABLE_RXPKT_INT ,
QDMA_FUNCTION_RECEIVE_PACKETS ,
QDMA_FUNCTION_TRANSMIT_PACKETS ,
QDMA_FUNCTION_SET_TX_QOS_WEIGHT ,
QDMA_FUNCTION_GET_TX_QOS_WEIGHT ,
QDMA_FUNCTION_SET_TX_QOS ,
QDMA_FUNCTION_GET_TX_QOS ,
QDMA_FUNCTION_SET_MAC_LIMIT_THRESHOLD ,
QDMA_FUNCTION_GET_MAC_LIMIT_THRESHOLD ,
QDMA_FUNCTION_SET_TXBUF_THRESHOLD ,
QDMA_FUNCTION_GET_TXBUF_THRESHOLD ,
QDMA_FUNCTION_SET_PREFETCH_MODE ,
QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_EN ,
QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_EN ,
QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_VALUE ,
QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_VALUE ,
QDMA_FUNCTION_SET_LMGR_LOW_THRESHOLD ,
QDMA_FUNCTION_GET_LMGR_LOW_THRESHOLD ,
QDMA_FUNCTION_GET_LMGR_STATUS ,
QDMA_FUNCTION_SET_DEBUG_LEVEL,
QDMA_FUNCTION_DUMP_DMA_BUSY,
QDMA_FUNCTION_DUMP_REG_POLLING,
QDMA_FUNCTION_SET_FORCE_RX_RING1,
QDMA_FUNCTION_SET_TX_DROP_EN,
QDMA_FUNCTION_GET_TX_DROP_EN,
QDMA_FUNCTION_SET_TX_RATEMETER,
QDMA_FUNCTION_GET_TX_RATEMETER,
QDMA_FUNCTION_ENABLE_TX_RATELIMIT,
QDMA_FUNCTION_SET_TX_RATELIMIT_CFG,
QDMA_FUNCTION_GET_TX_RATELIMIT_CFG,
QDMA_FUNCTION_SET_TX_RATELIMIT,
QDMA_FUNCTION_GET_TX_RATELIMIT,
QDMA_FUNCTION_SET_TX_DBAREPORT,
QDMA_FUNCTION_GET_TX_DBAREPORT,
QDMA_FUNCTION_SET_RX_PROTECT_EN,
QDMA_FUNCTION_GET_RX_PROTECT_EN,
QDMA_FUNCTION_SET_RX_LOW_THRESHOLD,
QDMA_FUNCTION_GET_RX_LOW_THRESHOLD,
QDMA_FUNCTION_SET_RX_RATELIMIT_EN,
QDMA_FUNCTION_SET_RX_RATELIMIT_PKT_MODE,
QDMA_FUNCTION_GET_RX_RATELIMIT_CFG,
QDMA_FUNCTION_SET_RX_RATELIMIT,
QDMA_FUNCTION_GET_RX_RATELIMIT,
QDMA_FUNCTION_SET_TXQ_DEI_DROP_MODE,
QDMA_FUNCTION_GET_TXQ_DEI_DROP_MODE,
QDMA_FUNCTION_SET_TXQ_CNGST_MODE,
QDMA_FUNCTION_GET_TXQ_CNGST_MODE,
QDMA_FUNCTION_SET_TXQ_DEI_THRH_SCALE,
QDMA_FUNCTION_GET_TXQ_DEI_THRH_SCALE,
QDMA_FUNCTION_SET_TXQ_CNGST_AUTO_CONFIG,
QDMA_FUNCTION_GET_TXQ_CNGST_AUTO_CONFIG,
QDMA_FUNCTION_SET_TXQ_CNGST_DYNAMIC_THRESHOLD,
QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_THRESHOLD,
QDMA_FUNCTION_SET_TXQ_CNGST_TOTAL_THRESHOLD,
QDMA_FUNCTION_GET_TXQ_CNGST_TOTAL_THRESHOLD,
QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_THRESHOLD,
QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_THRESHOLD,
QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_THRESHOLD,
QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_THRESHOLD,
QDMA_FUNCTION_SET_PEEKRATE_PARAMS,
QDMA_FUNCTION_GET_PEEKRATE_PARAMS,
QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD,
QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD,
QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_INFO,
QDMA_FUNCTION_GET_TXQ_CNGST_STATIC_INFO,
QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_NONBLOCKING,
QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_NONBLOCKING,
QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_NONBLOCKING,
QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_NONBLOCKING,
QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_MODE,
QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_MODE,
QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_QOS,
QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_QOS,
QDMA_FUNCTION_SET_DBGCNTR_CHANNEL,
QDMA_FUNCTION_SET_DBGCNTR_QUEUE,
QDMA_FUNCTION_CLEAR_DBGCNTR,
QDMA_FUNCTION_DUMP_DBGCNTR,
QDMA_FUNCTION_DUMP_TX_QOS,
QDMA_FUNCTION_DUMP_VIRTUAL_CHANNEL_QOS,
QDMA_FUNCTION_DUMP_TX_RATELIMIT,
QDMA_FUNCTION_DUMP_RX_RATELIMIT,
QDMA_FUNCTION_DUMP_TX_DBA_REPORT,
QDMA_FUNCTION_DUMP_TXQ_CNGST,
QDMA_FUNCTION_CLEAR_CPU_COUNTER,
QDMA_FUNCTION_DUMP_CPU_COUNTER,
QDMA_FUNCTION_DUMP_REGISTER_INFO,
QDMA_FUNCTION_DUMP_DESCRIPTOR_INFO,
QDMA_FUNCTION_DUMP_IRQ_INFO,
QDMA_FUNCTION_DUMP_HWFWD_INFO,
QDMA_FUNCTION_DUMP_INFO_ALL,
QDMA_FUNCTION_READ_VIP_INFO,
QDMA_FUNCTION_MAX_NUM ,
} QDMA_HookFunction_t ;
typedef int (*qdma_callback_recv_packet_t)(void *, uint, struct sk_buff *, uint) ;
typedef int (*qdma_callback_event_handler_t)(QDMA_EventType_t) ;
typedef void (*qdma_callback_int_handler_t)(void) ;
typedef int (*qdma_callback_recycle_packet_t)(struct sk_buff *, uint) ;
typedef struct {
MAC_Type_t macType ;
unchar txRecycleThrh ;
qdma_callback_recv_packet_t cbRecvPkts ;
qdma_callback_event_handler_t cbEventHandler ;
qdma_callback_recycle_packet_t cbRecyclePkts ;
qdma_callback_int_handler_t cbGponMacHandler ;
qdma_callback_int_handler_t cbEponMacHandler ;
qdma_callback_int_handler_t cbXponPhyHandler ;
} QDMA_InitCfg_t ;
typedef struct {
QDMA_Mode_t mode ;
unchar chnThreshold ;
unchar totalThreshold ;
} QDMA_TxBufCtrl_T ;
typedef struct {
unchar channel ;
QDMA_TxQosType_t qosType ;
struct {
unchar weight ;
} queue[8] ;
} QDMA_TxQosScheduler_T ;
typedef struct {
unchar channel ;
ushort cir ;
ushort cbs ;
ushort pir ;
ushort pbs ;
} QDMA_TrtcmParam_T ;
typedef struct {
QDMA_TxQueueThresholdScale_t maxScale ;
QDMA_TxQueueThresholdScale_t minScale ;
} QDMA_TxQueueCongestScale_T ;
typedef struct {
unchar deiScale;
struct {
unchar staticDeiThreshold ;
unchar staticNormalThreshold ;
} queue[8] ;
} QDMA_TxQueueCongestCfg_T ;
typedef struct {
uint normalThrh[8] ;
} QDMA_TxQStaticNormalCfg_T ;
typedef struct {
uint deiThrh[8] ;
} QDMA_TxQStaticDeiCfg_T ;
typedef struct {
unchar cntrIdx ;
unchar cntrEn ;
QDMA_DbgCntrSrc_t cntrSrc ;
unchar isChnlAll ;
unchar isQueueAll ;
unchar isDscpRingAll ;
unchar chnlIdx ;
unchar queueIdx ;
unchar dscpRingIdx ;
uint cntrVal ;
} QDMA_DBG_CNTR_T ;
typedef struct {
ushort txRateMeterTimeSlice ;
QDMA_TxRateMeterTimeDivisor_t txRateMeterTimeDivisor ;
} QDMA_TxRateMeter_T ;
typedef struct {
ushort txRateLimitUnit ;
QDMA_TrtcmScale_t txRateLimitBucketScale ;
} QDMA_TxRateLimitCfg_T ;
typedef struct {
unchar chnlIdx ;
unchar chnlRateLimitEn ;
uint rateLimitValue ;
} QDMA_TxRateLimitSet_T ;
typedef struct {
unchar chnlIdx ;
unchar chnlRateLimitEn ;
uint rateLimitValue ;
uint pbsParamValue ;
} QDMA_TxRateLimitGet_T ;
typedef struct {
unchar tsid ;
ushort cirParamValue ;
ushort cbsParamValue ;
ushort pirParamValue ;
ushort pbsParamValue ;
} QDMA_TxQueueTrtcm_T ;
typedef struct {
unchar channel ;
unchar queue ;
unchar thrIdx ;
ushort value ;
} QDMA_EponQueueThreshold_T ;
typedef struct {
unchar channel ;
uint cirParamValue ;
uint cbsParamValue ;
uint pirParamValue ;
uint pbsParamValue ;
} QDMA_TxDbaReport_T ;
typedef struct {
uint rxRing0LowThrh ;
uint rxRing1LowThrh ;
} QDMA_RxLowThreshold_T ;
typedef struct {
unchar rxRateLimitEn ;
QDMA_RxPktMode_t rxRateLimitPktMode ;
ushort rxRateLimitUnit ;
QDMA_TrtcmScale_t rxRateLimitBucketScale ;
} QDMA_RxRateLimitCfg_T ;
typedef struct {
unchar ringIdx ;
uint rateLimitValue ;
} QDMA_RxRateLimitSet_T ;
typedef struct {
unchar ringIdx ;
uint rateLimitValue ;
uint pbsParamValue ;
} QDMA_RxRateLimitGet_T ;
typedef struct {
unchar cngstDropEn ;
unchar cngstDeiDropEn ;
unchar dynCngstEn ;
unchar dynCngstMaxThrhTx1En ;
unchar dynCngstMinThrhTx1En ;
unchar dynCngstMaxThrhTx0En ;
unchar dynCngstMinThrhTx0En ;
unchar dynCngstModeConfigTrigEn ;
unchar dynCngstModePacketTrigEn ;
unchar dynCngstModeTimeTrigEn ;
ushort dynCngstTicksel ;
QDMA_TxDynCngstDeiThrhScale_t dynCngstDeiThrhScale ;
} QDMA_TxQCngstCfg_T ;
typedef struct {
uint dynCngstTotalMaxThrh ;
uint dynCngstTotalMinThrh ;
} QDMA_TxQDynCngstTotalThrh_T ;
typedef struct {
uint dynCngstChnlMaxThrh ;
uint dynCngstChnlMinThrh ;
} QDMA_TxQDynCngstChnlThrh_T ;
typedef struct {
uint dynCngstQueueMaxThrh ;
uint dynCngstQueueMinThrh ;
} QDMA_TxQDynCngstQueueThrh_T ;
typedef struct {
ushort dynCngstTotalMaxThrh ;
ushort dynCngstTotalMinThrh ;
ushort dynCngstChnlMaxThrh ;
ushort dynCngstChnlMinThrh ;
ushort dynCngstQueueMaxThrh ;
ushort dynCngstQueueMinThrh ;
} QDMA_TxQDynCngstThrh_T ;
typedef struct {
unchar queueIdx ;
ushort staticDeiThrh ;
ushort staticNormalThrh ;
} QDMA_TxQStaticCngstThrh_T ;
typedef struct {
QDMA_TxCngstMode_t txCngstMode ;
QDMA_PeekRateMargin_t peekRateMargin ;
uint peekRateDuration ;
} QDMA_txCngstCfg_t ;
typedef struct {
QDMA_Mode_t peekRateEn ;
QDMA_PeekRateMargin_t peekRateMargin ;
uint peekRateDuration ;
} QDMA_PeekRateCfg_t ;
typedef struct {
QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr;
QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr;
QDMA_PeekRateCfg_t *peekrateCfgPtr;
} QDMA_TxQDynamicCngstInfo_T ;
typedef struct {
QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
QDMA_TxQStaticNormalCfg_T *normThrhPtr;
QDMA_TxQStaticDeiCfg_T *deiThrhPtr;
} QDMA_TxQStaticCngstInfo_T ;
typedef struct {
QDMA_TxQCngstQueueMode_t queueMode;
uint queue;
} QDMA_TxQCngstQueueCfg_T ;
typedef struct {
QDMA_TxQCngstChannelMode_t channelMode;
uint channel;
} QDMA_TxQCngstChannelCfg_T ;
typedef struct {
QDMA_Mode_t virChnlEn;
QDMA_VirChnlMapMode_t virChnlMode;
} QDMA_VirtualChannelMode_T ;
typedef struct {
unchar phyChnl ;
QDMA_VirChnlQosType_t qosType ;
struct {
unchar weight ;
} queue[4] ;
} QDMA_VirtualChannelQoS_T ;
typedef struct {
QDMA_CbType_t type;
void *pCbFun;
} QDMA_RegisterCallBack_T ;
typedef struct {
struct sk_buff *skb;
uint msg0;
uint msg1;
struct port_info *pMacInfo;
} QDMA_Transmit_T ;
typedef struct {
QDMA_TxQosWeightType_t weightBase;
QDMA_TxQosWeightScale_t weightScale;
QDMA_TxQosScheduler_T *pTxQos;
} QDMA_TxQos_T;
typedef struct {
QDMA_Mode_t prefecthMode;
QDMA_Mode_t overDragMode;
uint overDragCnt;
} QDMA_PrefetchMode_T;
typedef struct {
uint freeLmgr;
uint usedLmgr;
uint usedBuf;
} QDMA_LmgrStatus_T;
typedef struct {
uint dbgLevel;
uint busyDuration;
uint regOffset;
uint pollingDuration;
uint forceEn;
} QDMA_OldProc_T;
typedef struct {
uint chnlThrh;
uint queueThrh;
} QDMA_MacLimitThrh_T;
struct ECNT_QDMA_Data {
QDMA_HookFunction_t function_id;
int retValue;
union {
QDMA_InitCfg_t *pInitCfg;
QDMA_Mode_t mode;
QDMA_LoopbackMode_t lbMode;
QDMA_DbgCntrChnlGroup_t cntrSrc;
int cnt;
int channel;
uint threshold;
QDMA_RegisterCallBack_T qdma_reg_cb;
QDMA_Transmit_T qdma_transmit;
QDMA_TxQos_T qdma_tx_qos;
QDMA_TxBufCtrl_T *pQdmaTxBufCtrl;
QDMA_PrefetchMode_T *qdma_prefetch;
QDMA_LmgrStatus_T *qdma_lmgr_status;
QDMA_OldProc_T qdma_old_proc;
QDMA_MacLimitThrh_T *qdma_mac_limit_thrh;
QDMA_TxRateMeter_T *txRateMeterPtr;
QDMA_TxRateLimitCfg_T *txRateLimitCfgPtr;
QDMA_TxRateLimitSet_T *txRateLimitSetPtr;
QDMA_TxRateLimitGet_T *txRateLimitGetPtr;
QDMA_TxDbaReport_T *txDbaReportPtr;
QDMA_RxLowThreshold_T *rxLowThresholdPtr;
QDMA_RxPktMode_t pktMode;
QDMA_RxRateLimitCfg_T *rxRateLimitCfgPtr;
QDMA_RxRateLimitSet_T *rxRateLimitSetPtr;
QDMA_RxRateLimitGet_T *rxRateLimitGetPtr;
QDMA_txCngstCfg_t *pTxCngstCfg;
QDMA_TxQDynCngstThrh_T *dynCngstThrhPtr;
QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr;
QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr;
QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr;
QDMA_PeekRateCfg_t *peekrateCfgPtr;
QDMA_TxQStaticDeiCfg_T *deiThrhPtr;
QDMA_TxQStaticNormalCfg_T *normThrhPtr;
QDMA_TxQDynamicCngstInfo_T *dynCfgPtr;
QDMA_TxQStaticCngstInfo_T *staticCfgPtr;
QDMA_VirtualChannelMode_T *virChnlModePtr;
QDMA_VirtualChannelQoS_T *virChnlQoSPtr;
QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr;
QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr;
} qdma_private;
};
# 47 "include/ecnt_hook/ecnt_hook_qdma.h" 2
# 66 "include/ecnt_hook/ecnt_hook_qdma.h"
static inline __attribute__((always_inline)) int QDMA_API_INIT(unsigned int mainType, QDMA_InitCfg_t *pInitCfg) {
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_INIT;
in_data.qdma_private.pInitCfg = pInitCfg;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_DEINIT(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_DEINIT;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_TX_DMA_MODE(unsigned int mainType, QDMA_Mode_t txMode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_TX_DMA_MODE;
in_data.qdma_private.mode = txMode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_RX_DMA_MODE(unsigned int mainType, QDMA_Mode_t rxMode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_RX_DMA_MODE;
in_data.qdma_private.mode = rxMode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_LOOPBACK_MODE(unsigned int mainType, QDMA_LoopbackMode_t lbMode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_LOOPBACK_MODE;
in_data.qdma_private.lbMode = lbMode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_REGISTER_HOOKFUNC(unsigned int mainType, QDMA_CbType_t type, void *pCbFun){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_REGISTER;
in_data.qdma_private.qdma_reg_cb.type = type ;
in_data.qdma_private.qdma_reg_cb.pCbFun = pCbFun ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_UNREGISTER_HOOKFUNC(unsigned int mainType, QDMA_CbType_t type){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_UNREGISTER;
in_data.qdma_private.qdma_reg_cb.type = type ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_ENABLE_RXPKT_INT(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_ENABLE_RXPKT_INT;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_DISABLE_RXPKT_INT(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_DISABLE_RXPKT_INT;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_RECEIVE_PACKETS(unsigned int mainType, int maxPkts){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_RECEIVE_PACKETS;
in_data.qdma_private.cnt = maxPkts ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_TRANSMIT_PACKETS(unsigned int mainType, struct sk_buff *skb, uint msg0, uint msg1, struct port_info *pMacInfo){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_TRANSMIT_PACKETS;
in_data.qdma_private.qdma_transmit.skb = skb ;
in_data.qdma_private.qdma_transmit.msg0 = msg0 ;
in_data.qdma_private.qdma_transmit.msg1 = msg1 ;
in_data.qdma_private.qdma_transmit.pMacInfo = pMacInfo ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_QOS_WEIGHT(unsigned int mainType, QDMA_TxQosWeightType_t weightBase, QDMA_TxQosWeightScale_t weightScale){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_QOS_WEIGHT;
in_data.qdma_private.qdma_tx_qos.weightBase = weightBase ;
in_data.qdma_private.qdma_tx_qos.weightScale = weightScale ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_QOS_WEIGHT(unsigned int mainType, QDMA_TxQosWeightType_t *pWeightBase, QDMA_TxQosWeightScale_t *pWeightScale){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_QOS_WEIGHT;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
*pWeightBase = in_data.qdma_private.qdma_tx_qos.weightBase ;
*pWeightScale = in_data.qdma_private.qdma_tx_qos.weightScale ;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_QOS(unsigned int mainType, QDMA_TxQosScheduler_T *pTxQos){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_QOS;
in_data.qdma_private.qdma_tx_qos.pTxQos = pTxQos ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_QOS(unsigned int mainType, QDMA_TxQosScheduler_T *pTxQos){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_QOS;
in_data.qdma_private.qdma_tx_qos.pTxQos = pTxQos ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_MAC_LIMIT_THRESHOLD(unsigned int mainType, uint chnlThrh, uint queueThrh){
struct ECNT_QDMA_Data in_data;
QDMA_MacLimitThrh_T mac_limit_thrh;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_MAC_LIMIT_THRESHOLD;
mac_limit_thrh.chnlThrh = chnlThrh ;
mac_limit_thrh.queueThrh = queueThrh ;
in_data.qdma_private.qdma_mac_limit_thrh = &mac_limit_thrh ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXBUF_THRESHOLD(unsigned int mainType, QDMA_TxBufCtrl_T *pTxBufCtrl){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXBUF_THRESHOLD;
in_data.qdma_private.pQdmaTxBufCtrl = pTxBufCtrl ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXBUF_THRESHOLD(unsigned int mainType, QDMA_TxBufCtrl_T *pTxBufCtrl){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXBUF_THRESHOLD;
in_data.qdma_private.pQdmaTxBufCtrl = pTxBufCtrl ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_PREFETCH_MODE(unsigned int mainType, QDMA_Mode_t prefecthMode, QDMA_Mode_t overDragMode, uint overDragCnt){
struct ECNT_QDMA_Data in_data;
QDMA_PrefetchMode_T qdma_prefetch;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_PREFETCH_MODE;
qdma_prefetch.prefecthMode = prefecthMode ;
qdma_prefetch.overDragMode = overDragMode ;
qdma_prefetch.overDragCnt = overDragCnt ;
in_data.qdma_private.qdma_prefetch = &qdma_prefetch ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_PKTSIZE_OVERHEAD_EN(unsigned int mainType, QDMA_Mode_t mode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_EN;
in_data.qdma_private.mode = mode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) QDMA_Mode_t QDMA_API_GET_PKTSIZE_OVERHEAD_EN(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_EN;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_PKTSIZE_OVERHEAD_VALUE(unsigned int mainType, int cnt){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_PKTSIZE_OVERHEAD_VALUE;
in_data.qdma_private.cnt = cnt ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) uint QDMA_API_GET_PKTSIZE_OVERHEAD_VALUE(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_PKTSIZE_OVERHEAD_VALUE;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_LMGR_LOW_THRESHOLD(unsigned int mainType, uint lowThrh){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_LMGR_LOW_THRESHOLD;
in_data.qdma_private.threshold = lowThrh ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) uint QDMA_API_GET_LMGR_LOW_THRESHOLD(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_LMGR_LOW_THRESHOLD;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_DROP_EN(unsigned int mainType, QDMA_Mode_t mode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_DROP_EN;
in_data.qdma_private.mode = mode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_DROP_EN(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_DROP_EN;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_RATEMETER(unsigned int mainType, QDMA_TxRateMeter_T *txRateMeterPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_RATEMETER;
in_data.qdma_private.txRateMeterPtr = txRateMeterPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_RATEMETER(unsigned int mainType, QDMA_TxRateMeter_T *txRateMeterPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_RATEMETER;
in_data.qdma_private.txRateMeterPtr = txRateMeterPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_ENABLE_TX_RATELIMIT(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_ENABLE_TX_RATELIMIT;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
# 489 "include/ecnt_hook/ecnt_hook_qdma.h"
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_RATELIMIT(unsigned int mainType, QDMA_TxRateLimitSet_T *txRateLimitSetPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_RATELIMIT;
in_data.qdma_private.txRateLimitSetPtr = txRateLimitSetPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_RATELIMIT(unsigned int mainType, QDMA_TxRateLimitGet_T *txRateLimitGetPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_RATELIMIT;
in_data.qdma_private.txRateLimitGetPtr = txRateLimitGetPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TX_DBA_REPORT(unsigned int mainType, QDMA_TxDbaReport_T *txDbaReportPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TX_DBAREPORT;
in_data.qdma_private.txDbaReportPtr = txDbaReportPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TX_DBA_REPORT(unsigned int mainType, QDMA_TxDbaReport_T *txDbaReportPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TX_DBAREPORT;
in_data.qdma_private.txDbaReportPtr = txDbaReportPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_RX_PROTECT_EN(unsigned int mainType, QDMA_Mode_t mode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_RX_PROTECT_EN;
in_data.qdma_private.mode = mode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_RX_PROTECT_EN(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_RX_PROTECT_EN;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_RX_LOW_THRESHOLD(unsigned int mainType, QDMA_RxLowThreshold_T *rxLowThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_RX_LOW_THRESHOLD;
in_data.qdma_private.rxLowThresholdPtr = rxLowThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_RX_LOW_THRESHOLD(unsigned int mainType, QDMA_RxLowThreshold_T *rxLowThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_RX_LOW_THRESHOLD;
in_data.qdma_private.rxLowThresholdPtr = rxLowThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_RX_RATELIMIT_EN(unsigned int mainType, QDMA_Mode_t mode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT_EN;
in_data.qdma_private.mode = mode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_RX_RATELIMIT_PKT_MODE(unsigned int mainType, QDMA_RxPktMode_t pktMode )
{
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT_PKT_MODE;
in_data.qdma_private.pktMode = pktMode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_RX_RATELIMIT_CFG(unsigned int mainType, QDMA_RxRateLimitCfg_T *rxRateLimitCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_RX_RATELIMIT_CFG;
in_data.qdma_private.rxRateLimitCfgPtr = rxRateLimitCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_RX_RATELIMIT(unsigned int mainType, QDMA_RxRateLimitSet_T *rxRateLimitSetPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_RX_RATELIMIT;
in_data.qdma_private.rxRateLimitSetPtr = rxRateLimitSetPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_RX_RATELIMIT(unsigned int mainType, QDMA_RxRateLimitGet_T *rxRateLimitGetPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_RX_RATELIMIT;
in_data.qdma_private.rxRateLimitGetPtr = rxRateLimitGetPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_DEI_DROP_MODE(unsigned int mainType, QDMA_Mode_t deiDropMode){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_DEI_DROP_MODE;
in_data.qdma_private.mode = deiDropMode ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) QDMA_Mode_t QDMA_API_GET_TXQ_DEI_DROP_MODE(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_DEI_DROP_MODE;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_MODE(unsigned int mainType, QDMA_Mode_t dynCngstEn){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_MODE;
in_data.qdma_private.mode = dynCngstEn ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) QDMA_Mode_t QDMA_API_GET_TXQ_CNGST_MODE(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_MODE;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_DEI_THRESHOLD_SCALE(unsigned int mainType, QDMA_TxDynCngstDeiThrhScale_t deiThrhScale){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_DEI_THRH_SCALE;
in_data.qdma_private.threshold = deiThrhScale ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) QDMA_TxDynCngstDeiThrhScale_t QDMA_API_GET_TXQ_DEI_THRESHOLD_SCALE(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_DEI_THRH_SCALE;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_AUTO_CONFIG(unsigned int mainType, QDMA_txCngstCfg_t *pTxCngstCfg){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_AUTO_CONFIG;
in_data.qdma_private.pTxCngstCfg = pTxCngstCfg ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_AUTO_CONFIG(unsigned int mainType, QDMA_txCngstCfg_t *pTxCngstCfg){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_AUTO_CONFIG;
in_data.qdma_private.pTxCngstCfg = pTxCngstCfg ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_DYNAMIC_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstThrh_T *txqCngstThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_DYNAMIC_THRESHOLD;
in_data.qdma_private.dynCngstThrhPtr = txqCngstThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_DYNAMIC_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstThrh_T *txqCngstThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_THRESHOLD;
in_data.qdma_private.dynCngstThrhPtr = txqCngstThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_TOTAL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_TOTAL_THRESHOLD;
in_data.qdma_private.totalThrhPtr = totalThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_TOTAL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstTotalThrh_T *totalThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_TOTAL_THRESHOLD;
in_data.qdma_private.totalThrhPtr = totalThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_CHANNEL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_THRESHOLD;
in_data.qdma_private.chnlThrhPtr = chnlThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_CHANNEL_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstChnlThrh_T *chnlThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_THRESHOLD;
in_data.qdma_private.chnlThrhPtr = chnlThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_QUEUE_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_THRESHOLD;
in_data.qdma_private.queueThrhPtr = queueThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_QUEUE_THRESHOLD(unsigned int mainType, QDMA_TxQDynCngstQueueThrh_T *queueThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_THRESHOLD;
in_data.qdma_private.queueThrhPtr = queueThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_QUEUE_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_QUEUE_NONBLOCKING;
in_data.qdma_private.txqCngstQueueCfgPtr = txqCngstQueueCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_QUEUE_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstQueueCfg_T *txqCngstQueueCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_QUEUE_NONBLOCKING;
in_data.qdma_private.txqCngstQueueCfgPtr = txqCngstQueueCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_CNGST_CHANNEL_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_CNGST_CHANNEL_NONBLOCKING;
in_data.qdma_private.txqCngstChannelCfgPtr = txqCngstChannelCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_CHANNEL_NONBLOCKING(unsigned int mainType, QDMA_TxQCngstChannelCfg_T *txqCngstChannelCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_CHANNEL_NONBLOCKING;
in_data.qdma_private.txqCngstChannelCfgPtr = txqCngstChannelCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_PEEKRATE_PARAMS(unsigned int mainType, QDMA_PeekRateCfg_t *peekrateCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_PEEKRATE_PARAMS;
in_data.qdma_private.peekrateCfgPtr = peekrateCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_PEEKRATE_PARAMS(unsigned int mainType, QDMA_PeekRateCfg_t *peekrateCfgPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_PEEKRATE_PARAMS;
in_data.qdma_private.peekrateCfgPtr = peekrateCfgPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD(unsigned int mainType, QDMA_TxQStaticNormalCfg_T *normThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_NORMAL_THRESHOLD;
in_data.qdma_private.normThrhPtr = normThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD(unsigned int mainType, QDMA_TxQStaticDeiCfg_T *deiThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_TXQ_STATIC_QUEUE_DEI_THRESHOLD;
in_data.qdma_private.deiThrhPtr = deiThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_DYNAMIC_INFO(unsigned int mainType, QDMA_TxQDynamicCngstInfo_T *allThrhPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_DYNAMIC_INFO;
in_data.qdma_private.dynCfgPtr = allThrhPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_TXQ_CNGST_STATIC_INFO(unsigned int mainType, QDMA_TxQStaticCngstInfo_T *staticInfoPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_TXQ_CNGST_STATIC_INFO;
in_data.qdma_private.staticCfgPtr = staticInfoPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_VIRTUAL_CHANNEL_MODE(unsigned int mainType, QDMA_VirtualChannelMode_T *virChnlModePtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_MODE;
in_data.qdma_private.virChnlModePtr = virChnlModePtr;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_VIRTUAL_CHANNEL_MODE(unsigned int mainType, QDMA_VirtualChannelMode_T *virChnlModePtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_MODE;
in_data.qdma_private.virChnlModePtr = virChnlModePtr;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_SET_VIRTUAL_CHANNEL_QOS(unsigned int mainType, QDMA_VirtualChannelQoS_T *virChnlQoSPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_SET_VIRTUAL_CHANNEL_QOS;
in_data.qdma_private.virChnlQoSPtr = virChnlQoSPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_GET_VIRTUAL_CHANNEL_QOS(unsigned int mainType, QDMA_VirtualChannelQoS_T *virChnlQoSPtr){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_GET_VIRTUAL_CHANNEL_QOS;
in_data.qdma_private.virChnlQoSPtr = virChnlQoSPtr ;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int QDMA_API_READ_VIP_INFO(unsigned int mainType){
struct ECNT_QDMA_Data in_data;
int ret=0;
in_data.function_id = QDMA_FUNCTION_READ_VIP_INFO;
ret = __ECNT_HOOK(mainType, 0, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
# 78 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h" 1
# 47 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h" 1
# 15 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/asm.h" 1
# 16 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/cacheops.h" 1
# 17 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h" 2
# 53 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
extern int mt_protiflush;
extern int mt_protdflush;
extern void mt_cflush_lockdown(void);
extern void mt_cflush_release(void);
# 143 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
static inline __attribute__((always_inline)) void flush_icache_line_indexed(unsigned long addr)
{
unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x00), "R" (*(unsigned char *)(addr)));
if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } }
}
static inline __attribute__((always_inline)) void flush_dcache_line_indexed(unsigned long addr)
{
unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x01), "R" (*(unsigned char *)(addr)));
if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } }
}
static inline __attribute__((always_inline)) void flush_scache_line_indexed(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x03), "R" (*(unsigned char *)(addr)));
}
static inline __attribute__((always_inline)) void flush_icache_line(unsigned long addr)
{
unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x10), "R" (*(unsigned char *)(addr)));
if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } }
}
static inline __attribute__((always_inline)) void flush_dcache_line(unsigned long addr)
{
unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x15), "R" (*(unsigned char *)(addr)));
if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } }
}
static inline __attribute__((always_inline)) void invalidate_dcache_line(unsigned long addr)
{
unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x11), "R" (*(unsigned char *)(addr)));
if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } }
}
static inline __attribute__((always_inline)) void invalidate_scache_line(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x13), "R" (*(unsigned char *)(addr)));
}
static inline __attribute__((always_inline)) void flush_scache_line(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x17), "R" (*(unsigned char *)(addr)));
}
# 209 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
static inline __attribute__((always_inline)) void protected_flush_icache_line(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x10), "r" (addr));
}
static inline __attribute__((always_inline)) void protected_writeback_dcache_line(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x15), "r" (addr));
}
static inline __attribute__((always_inline)) void protected_writeback_scache_line(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x17), "r" (addr));
}
static inline __attribute__((always_inline)) void invalidate_tcache_page(unsigned long addr)
{
__asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x16), "R" (*(unsigned char *)(addr)));
}
# 396 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
static inline __attribute__((always_inline)) void blast_dcache16(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].dcache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache16_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x15));; start += 16 * 32; } while (start < end); if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache16_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].dcache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_icache16(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].icache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache16_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x10));; start += 16 * 32; } while (start < end); if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache16_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].icache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_scache16(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_scache16_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x17));; start += 16 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_scache16_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_dcache32(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].dcache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x15));; start += 32 * 32; } while (start < end); if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache32_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].dcache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_icache32(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].icache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x10));; start += 32 * 32; } while (start < end); if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache32_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].icache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_scache32(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_scache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x17));; start += 32 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_scache32_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_dcache64(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].dcache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x15));; start += 64 * 32; } while (start < end); if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_dcache64_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].dcache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_icache64(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].icache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x10));; start += 64 * 32; } while (start < end); if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_icache64_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].icache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].icache.ways << cpu_data[(__current_thread_info->cpu)].icache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x00));; if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_scache64(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_scache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x17));; start += 64 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_scache64_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_scache128(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 128 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_scache128_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (start), "i" (0x17));; start += 128 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_scache128_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 128 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_inv_dcache16(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].dcache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_inv_dcache16_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x11));; start += 16 * 32; } while (start < end); if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_inv_dcache16_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].dcache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_inv_dcache32(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].dcache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_inv_dcache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x11));; start += 32 * 32; } while (start < end); if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } } static inline __attribute__((always_inline)) void blast_inv_dcache32_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].dcache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].dcache.ways << cpu_data[(__current_thread_info->cpu)].dcache.waybit; unsigned long ws, addr; unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x01));; if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_inv_scache16(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_inv_scache16_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x13));; start += 16 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_inv_scache16_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 16 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_inv_scache32(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_inv_scache32_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x13));; start += 32 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_inv_scache32_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 32 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_inv_scache64(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_inv_scache64_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (start), "i" (0x13));; start += 64 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_inv_scache64_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 64 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
static inline __attribute__((always_inline)) void blast_inv_scache128(void) { unsigned long start = 0x80000000; unsigned long end = start + cpu_data[(__current_thread_info->cpu)].scache.waysize; unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 128 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } } static inline __attribute__((always_inline)) void blast_inv_scache128_page(unsigned long page) { unsigned long start = page; unsigned long end = page + ((1UL) << 12); { do { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (start), "i" (0x13));; start += 128 * 32; } while (start < end); } } static inline __attribute__((always_inline)) void blast_inv_scache128_page_indexed(unsigned long page) { unsigned long indexmask = cpu_data[(__current_thread_info->cpu)].scache.waysize - 1; unsigned long start = 0x80000000 + (page & indexmask); unsigned long end = start + ((1UL) << 12); unsigned long ws_inc = 1UL << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws_end = cpu_data[(__current_thread_info->cpu)].scache.ways << cpu_data[(__current_thread_info->cpu)].scache.waybit; unsigned long ws, addr; { for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 128 * 32) __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" " .set pop \n" : : "r" (addr|ws), "i" (0x03));; } }
# 435 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/r4kcache.h"
static inline __attribute__((always_inline)) void protected_blast_dcache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].dcache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x15), "r" (addr)); if (addr == aend) break; addr += lsize; } if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void protected_blast_scache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].scache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x17), "r" (addr)); if (addr == aend) break; addr += lsize; } } }
static inline __attribute__((always_inline)) void protected_blast_icache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].icache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); unsigned long redundance; extern int mt_n_iflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protiflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_iflushes; redundance++) { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3 \n" "1: cache %0, (%1) \n" "2: .set pop \n" " .section __ex_table,\"a\" \n" " "".word"" 1b, 2b \n" " .previous" : : "i" (0x10), "r" (addr)); if (addr == aend) break; addr += lsize; } if(mt_protiflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_dcache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].dcache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x15), "R" (*(unsigned char *)(addr))); if (addr == aend) break; addr += lsize; } if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_scache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].scache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x17), "R" (*(unsigned char *)(addr))); if (addr == aend) break; addr += lsize; } } }
static inline __attribute__((always_inline)) void blast_inv_dcache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].dcache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); unsigned long redundance; extern int mt_n_dflushes; unsigned long flags = 0; unsigned long mtflags = 0; if(mt_protdflush) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); __asm__ __volatile__( "raw_local_irq_save\t%0" : "=r" (flags) : : "memory"); do { } while (0); } while (0); ehb(); mtflags = dvpe(); mt_cflush_lockdown(); } for (redundance = 0; redundance < mt_n_dflushes; redundance++) { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x11), "R" (*(unsigned char *)(addr))); if (addr == aend) break; addr += lsize; } if(mt_protdflush) { mt_cflush_release(); evpe(mtflags); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); if (raw_irqs_disabled_flags(flags)) { raw_local_irq_restore(flags); do { } while (0); } else { do { } while (0); raw_local_irq_restore(flags); } } while (0); } } }
static inline __attribute__((always_inline)) void blast_inv_scache_range(unsigned long start, unsigned long end) { unsigned long lsize = cpu_data[0].scache.linesz; unsigned long addr = start & ~(lsize - 1); unsigned long aend = (end - 1) & ~(lsize - 1); { while (1) { __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set mips3\n\t \n" " cache %0, %1 \n" " .set pop \n" : : "i" (0x13), "R" (*(unsigned char *)(addr))); if (addr == aend) break; addr += lsize; } } }
# 48 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h" 1
# 47 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h"
# 1 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_reg.h" 1
# 48 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_glb.h" 1
# 49 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h" 1
# 86 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_init(struct ECNT_QDMA_Data *qdma_data) ;
# 95 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_deinit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_tx_dma_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_rx_dma_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_loopback_mode(struct ECNT_QDMA_Data *qdma_data) ;
# 126 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_register_callback_function(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_unregister_callback_function(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_enable_rxpkt_int(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_disable_rxpkt_int(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_receive_packets(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_transmit_packet(struct ECNT_QDMA_Data *qdma_data) ;
# 172 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_tx_qos_weight(struct ECNT_QDMA_Data *qdma_data) ;
# 182 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_tx_qos_weight(struct ECNT_QDMA_Data *qdma_data) ;
# 193 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_tx_qos(struct ECNT_QDMA_Data *qdma_data) ;
# 204 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_tx_qos(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_mac_limit_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_mac_limit_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 234 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_txbuf_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 244 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_txbuf_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 256 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_prefetch_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_pktsize_overhead_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_pktsize_overhead_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_pktsize_overhead_value(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_pktsize_overhead_value(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_lmgr_low_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_lmgr_low_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 309 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_lmgr_status(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_clear_cpu_counters(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_cpu_counters(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_register_value(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_descriptor_info(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_irq_info(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_hwfwd_info(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_dbg_level(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_dma_busy(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_reg_polling(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_force_receive_rx_ring1(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_tx_drop_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_tx_drop_en(struct ECNT_QDMA_Data *qdma_data) ;
# 402 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_tx_ratemeter(struct ECNT_QDMA_Data *qdma_data) ;
# 412 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_tx_ratemeter(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_tx_channel_datarate(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_enable_tx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
# 436 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_tx_ratelimit_cfg(struct ECNT_QDMA_Data *qdma_data) ;
# 445 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_tx_ratelimit_cfg(struct ECNT_QDMA_Data *qdma_data) ;
# 455 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_tx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_tx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_tx_dba_report(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_tx_dba_report(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_rx_protect_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_rx_protect_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_rx_low_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_rx_low_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_rx_ratelimit_en(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_rx_ratelimit_pkt_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_rx_ratelimit_cfg(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_rx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_rx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_dei_drop_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_dei_drop_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_dei_threshold_scale(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_dei_threshold_scale(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_auto_config(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_auto_config(struct ECNT_QDMA_Data *qdma_data) ;
# 599 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_txq_cngst_dynamic_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 608 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_txq_cngst_dynamic_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_total_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_total_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_channel_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_channel_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_queue_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_queue_threshold(struct ECNT_QDMA_Data *qdma_data) ;
# 660 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_txq_peekrate_params(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_peekrate_params(struct ECNT_QDMA_Data *qdma_data) ;
# 676 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_txq_cngst_static_queue_normal_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_static_queue_dei_threshold(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_dynamic_info(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_static_info(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_queue_nonblocking(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_queue_nonblocking(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_set_txq_cngst_channel_nonblocking(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_txq_cngst_channel_nonblocking(struct ECNT_QDMA_Data *qdma_data) ;
# 736 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_virtual_channel_mode(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_virtual_channel_mode(struct ECNT_QDMA_Data *qdma_data) ;
# 753 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_virtual_channel_qos(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_get_virtual_channel_qos(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_clear_dbg_cntr_value_all(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_clear_and_set_dbg_cntr_channel_group(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_clear_and_set_dbg_cntr_queue_group(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_dbg_cntr_value(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_tx_qos(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_virtual_channel_qos(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_tx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_rx_ratelimit(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_tx_dba_report(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_txq_cngst(struct ECNT_QDMA_Data *qdma_data) ;
int qdma_dump_info_all(struct ECNT_QDMA_Data *qdma_data) ;
# 802 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_dbg_cntr_info(QDMA_DBG_CNTR_T *dbgCntrPtr) ;
int qdma_clear_and_enable_dbg_cntr_info(QDMA_DBG_CNTR_T *dbgCntrPtr) ;
# 820 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_dbg_cntr_info(QDMA_DBG_CNTR_T *dbgCntrPtr) ;
# 830 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_txq_cngst_cfg(QDMA_TxQCngstCfg_T *txqCngstCfgPtr) ;
# 840 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_get_txq_cngst_cfg(QDMA_TxQCngstCfg_T *txqCngstCfgPtr) ;
# 865 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
void qdma_set_txqueue_dei_mode(QDMA_Mode_t deiDropMode) ;
QDMA_Mode_t qdma_get_txqueue_dei_mode(void) ;
void qdma_set_txqueue_threshold_mode(QDMA_Mode_t thrsldDropMode) ;
QDMA_Mode_t qdma_get_txqueue_threshold_mode(void) ;
# 899 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_api.h"
int qdma_set_congest_threshold(QDMA_TxQStaticCngstThrh_T *pThreshold) ;
int qdma_get_congest_config(QDMA_TxQueueCongestCfg_T *pCongest) ;
void qdmaSetDbgCntrCfg_init(void);
void qdmaGetDbgCntrCfg_Val(uint *cpuTxCnt, uint *cpuRxCnt, uint *fwdTxCnt, uint *fwdRxCnt);
int qdma_set_tx_cngst_mode(struct ECNT_QDMA_Data *qdma_data);
int qdma_get_tx_cngst_mode(struct ECNT_QDMA_Data *qdma_data);
int qdma_read_vip_info(void);
# 50 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h" 2
# 353 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h"
typedef struct {
uint resv1 ;
struct {
uint done : 1 ;
uint overflow : 1 ;
uint resv2 : 14 ;
uint pkt_len : 16 ;
} ctrl ;
uint pkt_addr ;
uint resv3 : 20 ;
uint next_idx : 12 ;
uint msg[4] ;
} QDMA_DMA_DSCP_T ;
typedef struct {
uint pkt_addr ;
struct {
uint ctx : 1 ;
uint resv : 2 ;
uint ctx_ring : 1 ;
uint ctx_idx : 12 ;
uint pkt_len : 16 ;
} ctrl ;
uint msg[2] ;
} QDMA_HWFWD_DMA_DSCP_T ;
typedef union
{
struct
{
uint reserve1 : 7;
uint dei : 1;
uint reserve2 : 12;
uint oam : 1;
uint channel : 8;
uint queue : 3;
}bits;
uint word;
} txMsgWord0_t;
typedef struct
{
uint word;
} txMsgWord1_t;
typedef struct txMsgWord_s{
txMsgWord0_t *pTxMsgW0;
txMsgWord1_t *pTxMsgW1;
} txMsgWord_t;
typedef struct {
struct cdev *pQdmaDev ;
} QDMA_Dev_T ;
# 449 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_dev.h"
int qdmaSetTxQosScheduler(unchar channel, unchar mode, unchar weight[8]) ;
int qdmaGetTxQosScheduler(unchar channel, unchar *pMode, unchar weight[8]) ;
int qdmaSetRxRateLimitConfig(unchar ringIdx, unchar type, ushort value) ;
int qdmaGetRxRateLimitConfig(unchar ringIdx, unchar type) ;
int qdmaSetTxRateLimitConfig(unchar chnlIdx, unchar type, ushort value) ;
int qdmaGetTxRateLimitConfig(unchar chnlIdx, unchar type) ;
int qdmaSetVirtualChannelQos(unchar phyChnl, unchar virChnlMax, unchar mode, unchar weight[4]) ;
int qdmaGetVirtualChannelQos(unchar phyChnl, unchar virChnlMax, unchar *pMode, unchar weight[4]) ;
int qdmaGetLimitRateMax(void) ;
int qdmaUpdateAllTxRateLimitValue(ushort curUnit, ushort newUnit) ;
int qdmaUpdateAllRxRateLimitValue(ushort curUnit, ushort newUnit) ;
int qdma_clear_and_set_dbg_cntr_info(QDMA_DBG_CNTR_T *dbgCntrPtr) ;
int qdma_get_dbg_cntr_info(QDMA_DBG_CNTR_T *dbgCntrPtr) ;
int qdma_set_dbg_cntr_default_config(void) ;
int qdmaSetDbgMemXsConfig(QDMA_DbgMemXsMemSel_t memSel, unchar byteSel, ushort addr, uint valueLow, uint valueHigh) ;
int qdmaGetDbgMemXsConfig(QDMA_DbgMemXsMemSel_t memSel, unchar byteSel, ushort addr, uint *value) ;
int qdma_get_ringIdx(struct sk_buff *skb, struct port_info *pMacInfo) ;
int qdma_qos_mark(struct sk_buff *skb) ;
int macResourceLimit(struct sk_buff *skb, uint channel, uint queue) ;
int qdmaEnableInt(uint base, uint bit) ;
int qdmaDisableInt(uint base, uint bit) ;
int qdmaSetIntMask(uint base, uint value) ;
int qdmaGetIntMask(uint base) ;
void qdma_dma_busy_timer(unsigned long arg) ;
void qdma_reg_polling_timer(unsigned long arg) ;
void qdma_dev_destroy(void) ;
int qdma_dev_init(void) ;
# 49 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h" 2
# 180 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h"
typedef int (*qdma_api_op_t)(struct ECNT_QDMA_Data *qdma_data);
struct QDMA_DscpInfo_S {
QDMA_DMA_DSCP_T *dscpPtr ;
uint dscpIdx ;
struct sk_buff *skb ;
struct QDMA_DscpInfo_S *next ;
} ;
typedef struct {
struct {
unchar isTxPolling :1 ;
unchar isRxPolling :1 ;
unchar isRxNapi :1 ;
unchar isIsrRequest :1 ;
unchar resv1 :4 ;
} flags ;
unchar txRecycleThreshold ;
unchar macType;
# 224 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h"
int (*bmRecvCallbackFunction)(void *, uint, struct sk_buff *, uint) ;
int (*bmEventCallbackFunction)(QDMA_EventType_t) ;
int (*bmRecyPktCallbackFunction)(struct sk_buff *, uint) ;
void (*bmGponMacIntHandler)(void) ;
void (*bmEponMacIntHandler)(void) ;
void (*bmXponPhyIntHandler)(void) ;
} BM_DevConfig_T ;
typedef struct {
ushort IrqQueueAsynchronous ;
ushort txIrqQueueIdxErrs ;
uint rxAllocFailErrs ;
ushort intIrqFull ;
ushort IrqErr ;
ushort intNoLinkDscp ;
ushort intLowLinkDscp ;
ushort txPktsFreeCounts ;
ushort txPktsFreeErrs ;
ushort txMacLimitDropCnt ;
uint txCounts[(2)] ;
uint txRecycleCounts[(2)] ;
uint rxCounts[(2)] ;
ushort txDscpIncorrect[(2)] ;
ushort txDscpDoneErrs[(2)] ;
ushort rxDscpIncorrect[(2)] ;
ushort rxPktErrs[(2)] ;
ushort noTxDscps[(2)] ;
ushort noRxCbErrs[(2)] ;
ushort noRxDscps[(2)] ;
ushort rxAllocFailDropErrs[(2)] ;
ushort intRxCoherent[(2)] ;
ushort intTxCoherent[(2)] ;
ushort intNoRxBuff[(2)] ;
ushort intNoTxBuff[(2)] ;
ushort intNoRxDscp[(2)] ;
ushort intNoTxDscp[(2)] ;
ushort rxBroadcastCounts[(2)] ;
ushort rxUnknownUnicastCounts[(2)] ;
ushort rxMulticastCounts[(2)] ;
uint intRxDone[(2)] ;
uint intTxDone[(2)] ;
} BM_Counters_T ;
typedef struct {
uint csrBaseAddr ;
spinlock_t txLock[(2)] ;
spinlock_t irqLock ;
spinlock_t txDoneLock ;
ushort txDscpNum[(2)] ;
ushort rxDscpNum[(2)] ;
ushort hwFwdDscpNum ;
ushort irqDepth ;
ushort hwPktSize ;
uint dscpInfoAddr ;
uint txBaseAddr[(2)] ;
uint rxBaseAddr[(2)] ;
uint irqQueueAddr ;
uint hwFwdBaseAddr ;
uint hwFwdBuffAddr ;
uint hwFwdPayloadSize ;
struct QDMA_DscpInfo_S *txHeadPtr[(2)] ;
struct QDMA_DscpInfo_S *txTailPtr[(2)] ;
struct QDMA_DscpInfo_S *txUsingPtr[(2)] ;
struct QDMA_DscpInfo_S *rxStartPtr[(2)] ;
struct QDMA_DscpInfo_S *rxEndPtr[(2)] ;
struct QDMA_DscpInfo_S *rxUsingPtr[(2)] ;
struct tasklet_struct task ;
BM_DevConfig_T devCfg ;
BM_Counters_T counters ;
struct timer_list fwdCfg_timer ;
uint slmPhySize ;
uint slmPhyAddr ;
QDMA_Dev_T qdmaDev ;
QDMA_DbgCntrChnlGroup_t dbgCntrType;
} QDMA_Private_T ;
typedef struct {
ushort txDscpNum[(2)] ;
ushort rxDscpNum[(2)] ;
ushort hwDscpNum ;
ushort irqDepth ;
ushort hwFwdPktLen ;
} QDMA_Init_T ;
typedef struct {
uint headIdx;
uint tailIdx;
uint cntr;
} QDMA_Lmgr_queueInfo_T ;
typedef struct {
ushort sport ;
ushort dport ;
unchar sport_en : 1 ;
unchar dport_en : 1 ;
unchar swap_en : 1 ;
unchar resv : 4 ;
unchar ip_type ;
} QDMA_VIP_INFO_T ;
extern QDMA_Private_T *gpQdmaPriv ;
# 345 "/opt/tclinux_phoenix/modules/private/qdma/EN7512/qdma_bmgr.h"
int inline __attribute__((always_inline)) qdma_kfree_skb(struct sk_buff *skb, uint msg0) ;
int qdma_set_tx_delay(unchar txIrqThreshold, ushort txIrqPtime);
int qdma_set_rx_delay(unchar rxMaxInt, unchar rxMaxPtime, int ringIdx);
int qdma_receive_packet_mode(QDMA_RecvMode_t rxMode);
int qdma_recycle_packet_mode(QDMA_RecycleMode_t txMode, unchar txThrh);
struct sk_buff * allocate_skb_buffer(void);
int qdma_prepare_rx_buffer(int ringIdx);
int qdma_bm_receive_packets(uint maxPkts, int ringIdx) ;
int qdma_bm_hook_receive_buffer(struct sk_buff *skb, int ringIdx) ;
int qdma_bm_recycle_receive_buffer(int ringIdx) ;
int qdma_bm_transmit_packet(struct sk_buff *skb, int ringIdx, uint msg0, uint msg1) ;
int qdma_bm_transmit_done(int amount) ;
int qdma_bm_recycle_transmit_buffer(int ringIdx) ;
int qdma_bm_tx_polling_mode(QDMA_Mode_t txMode, unchar txThreshold) ;
# 79 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "include/ecnt_hook/ecnt_hook_fe.h" 1
# 46 "include/ecnt_hook/ecnt_hook_fe.h"
# 1 "include/ecnt_hook/ecnt_hook_fe_type.h" 1
# 74 "include/ecnt_hook/ecnt_hook_fe_type.h"
typedef enum {
ECNT_FE_API,
}FE_Api_SubType_t;
typedef enum {
FE_GDM_SEL_GDMA1 = 0 ,
FE_GDM_SEL_GDMA2
} FE_Gdma_Sel_t;
typedef enum {
FE_CDM_SEL_CDMA1 = 0 ,
FE_CDM_SEL_CDMA2
} FE_Cdma_Sel_t;
typedef enum {
FE_RED_DROP_Q0 = 0 ,
FE_RED_DROP_Q1L,
FE_RED_DROP_Q1H
} FE_RedDropQ_Sel_t;
typedef enum {
FE_RATE_OR_FULL_DROP = 0 ,
FE_RATE_OR_THRL_DROP,
FE_RATE_AND_FULL_DROP,
FE_RATE_AND_THRL_DROP
} FE_RedDropMode_Sel_t;
typedef enum {
FE_GDM_SEL_TX = 0 ,
FE_GDM_SEL_RX
} FE_TXRX_Sel_t;
typedef enum {
FE_DISABLE = 0 ,
FE_ENABLE
} FE_Enable_t;
typedef enum {
FE_NOT_PPPOE = 0 ,
FE_IS_PPPOE
} FE_PPPOE_t;
typedef enum {
ETH_TYPE = 0 ,
PPPOE_TYPE,
IP_TYPE,
TCP_TYPE,
UDP_TYPE,
} FE_Patn_type;
typedef enum {
FE_KEY0 = 0 ,
FE_KEY1
} FE_L2U_KEY_t;
typedef enum {
FE_LINKUP = 0,
FE_LINKDOWN
} FE_Linkup_t;
typedef enum {
FE_PCP_MODE_DISABLE = 0,
FE_PCP_MODE_8B0D = 1,
FE_PCP_MODE_7B1D = 2,
FE_PCP_MODE_6B2D = 4,
FE_PCP_MODE_5B3D = 8
} FE_PcpMode_t;
typedef enum {
FE_DP_QDMA1_CPU = 0,
FE_DP_GDM1,
FE_DP_GDM2,
FE_DP_QDMA1_HWF,
FE_DP_PPE,
FE_DP_QDMA2_CPU,
FE_DP_QDMA2_HWF,
FE_DP_DISCARD
} FE_Frame_dp_t;
typedef enum {
FE_TYPE_OC = 0,
FE_TYPE_MC,
FE_TYPE_BC,
FE_TYPE_UC,
} FE_Frame_type_t ;
typedef enum {
FE_ERR_CRC = 0,
FE_ERR_LONG,
FE_ERR_RUNT,
} FE_Err_type_t ;
typedef enum {
FE_SET_PACKET_LENGTH = 0,
FE_SET_CHANNEL_ENABLE,
FE_SET_MAC_ADDR,
FE_SET_HWFWD_CHANNEL,
FE_SET_CHANNEL_RETIRE,
FE_SET_CRC_STRIP,
FE_SET_PADDING,
FE_GET_EXT_TPID,
FE_SET_EXT_TPID,
FE_GET_FW_CFG,
FE_SET_FW_CFG,
FE_SET_DROP_UDP_CHKSUM_ERR_ENABLE,
FE_SET_DROP_TCP_CHKSUM_ERR_ENABLE,
FE_SET_DROP_IP_CHKSUM_ERR_ENABLE,
FE_SET_DROP_CRC_ERR_ENABLE,
FE_SET_DROP_RUNT_ENABLE,
FE_SET_DROP_LONG_ENABLE,
FE_SET_VLAN_CHECK,
FE_GET_OK_CNT,
FE_GET_RX_CRC_ERR_CNT,
FE_GET_RX_DROP_FIFO_CNT,
FE_GET_RX_DROP_ERR_CNT,
FE_GET_OK_BYTE_CNT,
FE_GET_TX_GET_CNT,
FE_GET_TX_DROP_CNT,
FE_GET_TIEM_STAMP,
FE_SET_TIEM_STAMP,
FE_SET_INS_VLAN_TPID,
FE_SET_VLAN_ENABLE,
FE_SET_BLACK_LIST,
FE_SET_ETHER_TYEP,
FE_SET_L2U_KEY,
FE_GET_AC_GROUP_PKT_CNT,
FE_GET_AC_GROUP_BYTE_CNT,
FE_SET_METER_GROUP,
FE_GET_METER_GROUP,
FE_SET_GDM_PCP_CODING,
FE_SET_CDM_PCP_CODING,
FE_SET_VIP_ENABLE,
FE_GET_ETH_RX_CNT,
FE_GET_ETH_TX_CNT,
FE_GET_ETH_FRAME_CNT,
FE_GET_ETH_ERR_CNT,
FE_SET_CLEAR_MIB,
FE_SET_CDM_RX_RED_DROP,
FE_GET_CDM_RX_RED_DROP,
FE_SET_CHANNEL_RETIRE_ALL,
FE_SET_CHANNEL_RETIRE_ONE,
FE_SET_TX_RATE,
FE_SET_RXUC_RATE,
FE_SET_RXBC_RATE,
FE_SET_RXMC_RATE,
FE_SET_RXOC_RATE,
FE_ADD_VIP_ETHER,
FE_ADD_VIP_PPP,
FE_ADD_VIP_IP,
FE_ADD_VIP_TCP,
FE_ADD_VIP_UDP,
FE_DEL_VIP_ETHER,
FE_DEL_VIP_PPP,
FE_DEL_VIP_IP,
FE_DEL_VIP_TCP,
FE_DEL_VIP_UDP,
FE_ADD_L2LU_VLAN_DSCP,
FE_ADD_L2LU_VLAN_TRFC,
FE_DEL_L2LU_VLAN_DSCP,
FE_DEL_L2LU_VLAN_TRFC,
FE_SET_TX_FAVOR_OAM_ENABLE,
FE_FUNCTION_MAX_NUM,
} FE_HookFunctionID_t ;
typedef struct FE_TxCnt_s{
uint txFrameCnt;
uint txFrameLen;
uint txDropCnt;
uint txBroadcastCnt;
uint txMulticastCnt;
uint txLess64Cnt;
uint txMore1518Cnt;
uint txEq64Cnt;
uint txFrom65To127Cnt;
uint txFrom128To255Cnt;
uint txFrom256To511Cnt;
uint txFrom512To1023Cnt;
uint txFrom1024To1518Cnt;
}FE_TxCnt_t;
typedef struct FE_RxCnt_s{
uint rxFrameCnt;
uint rxFrameLen;
uint rxDropCnt;
uint rxBroadcastCnt;
uint rxMulticastCnt;
uint rxCrcCnt;
uint rxFragFameCnt;
uint rxJabberFameCnt;
uint rxLess64Cnt;
uint rxMore1518Cnt;
uint rxEq64Cnt;
uint rxFrom65To127Cnt;
uint rxFrom128To255Cnt;
uint rxFrom256To511Cnt;
uint rxFrom512To1023Cnt;
uint rxFrom1024To1518Cnt;
}FE_RxCnt_t;
struct ecnt_fe_data {
FE_HookFunctionID_t function_id;
FE_Gdma_Sel_t gdm_sel;
FE_Cdma_Sel_t cdm_sel;
FE_RedDropQ_Sel_t dropQ_sel;
FE_RedDropMode_Sel_t dropMode_sel;
FE_TXRX_Sel_t txrx_sel;
FE_Err_type_t err_type;
uint channel;
uint index;
uint reg_val;
uint cnt;
uint cnt_hi;
uint timeStamp;
union {
FE_Enable_t enable;
FE_Linkup_t link_mode;
FE_PcpMode_t coding_mode;
uint meter_rate;
struct {
uint byteCnt_L;
uint byteCnt_H;
}byteCnt;
struct {
uint length_long;
uint length_short;
} pkt_len;
struct {
unchar *mac;
ushort mask;
} mac_addr;
struct {
FE_Frame_type_t dp_sel;
FE_Frame_dp_t dp_val;
} fw_cfg;
struct {
FE_Enable_t enable;
FE_PPPOE_t is_pppoe;
uint value;
} eth_cfg;
struct {
FE_L2U_KEY_t key_sel;
uint key0;
uint key1;
} l2u_key;
struct {
FE_Enable_t enable;
FE_Patn_type patten_type;
uint patten;
} vip_cfg;
struct {
uint rate;
uint mode;
uint maxBkSize;
uint tick;
}rate_cfg;
struct {
ushort type;
ushort src;
ushort dst;
unchar mode;
}vip;
struct {
unchar mask;
unchar dscp;
ushort svlan;
ushort cvlan;
}l2lu;
FE_RxCnt_t FE_RxCnt;
FE_TxCnt_t FE_TxCnt;
} api_data;
int retValue;
};
# 47 "include/ecnt_hook/ecnt_hook_fe.h" 2
# 73 "include/ecnt_hook/ecnt_hook_fe.h"
static inline __attribute__((always_inline)) int FE_API_SET_PACKET_LENGTH(FE_Gdma_Sel_t _gdm_sel, uint _length_long, uint _length_short) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_PACKET_LENGTH;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.pkt_len.length_long = _length_long;
in_data.api_data.pkt_len.length_short = _length_short;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CHANNEL_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, unchar _channel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CHANNEL_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
in_data.channel = _channel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_MAC_ADDR(FE_Gdma_Sel_t _gdm_sel, unchar *_mac, ushort _mask) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_MAC_ADDR;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.mac_addr.mac = _mac;
in_data.api_data.mac_addr.mask = _mask;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_HWFWD_CHANNEL(FE_Cdma_Sel_t _cdm_sel, unchar _channel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_HWFWD_CHANNEL;
in_data.cdm_sel = _cdm_sel;
in_data.channel = _channel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CHANNEL_RETIRE(FE_Gdma_Sel_t _gdm_sel, unchar _channel, FE_Linkup_t _mode) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CHANNEL_RETIRE;
in_data.gdm_sel = _gdm_sel;
in_data.channel = _channel;
in_data.api_data.link_mode = _mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CRC_STRIP(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CRC_STRIP;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_PADDING(FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_PADDING;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_EXT_TPID(uint _tpid) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_EXT_TPID;
in_data.reg_val = _tpid;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_EXT_TPID(uint *_tpid) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_EXT_TPID;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_tpid = in_data.reg_val;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_FW_CFG(FE_Gdma_Sel_t _gdm_sel, FE_Frame_type_t _dp_sel, FE_Frame_dp_t _dp_type) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_FW_CFG;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.fw_cfg.dp_sel = _dp_sel;
in_data.api_data.fw_cfg.dp_val = _dp_type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_FW_CFG(FE_Gdma_Sel_t _gdm_sel, uint* pval) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_FW_CFG;
in_data.gdm_sel = _gdm_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*pval = in_data.reg_val;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_UDP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_UDP_CHKSUM_ERR_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_TCP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_TCP_CHKSUM_ERR_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_IP_CHKSUM_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_IP_CHKSUM_ERR_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_CRC_ERR_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_CRC_ERR_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_RUNT_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_RUNT_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_DROP_LONG_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_DROP_LONG_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_VLAN_CHECK(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_VLAN_CHECK;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_OK_CNT(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_OK_CNT;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_RX_CRC_ERR_CNT(uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_RX_CRC_ERR_CNT;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_RX_DROP_FIFO_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_RX_DROP_FIFO_CNT;
in_data.gdm_sel = _gdm_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_RX_DROP_ERR_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_RX_DROP_ERR_CNT;
in_data.gdm_sel = _gdm_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_OK_BYTE_CNT(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_OK_BYTE_CNT;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_TX_GET_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_TX_GET_CNT;
in_data.gdm_sel = _gdm_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_TX_DROP_CNT(FE_Gdma_Sel_t _gdm_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_TX_DROP_CNT;
in_data.gdm_sel = _gdm_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_TIEM_STAMP(uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_TIEM_STAMP;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_TIME_STAMP(uint ts) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_TIEM_STAMP;
in_data.timeStamp = ts & 0xffff;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_INS_VLAN_TPID(FE_Gdma_Sel_t _gdm_sel, uint _tpid) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_INS_VLAN_TPID;
in_data.gdm_sel = _gdm_sel;
in_data.reg_val = _tpid;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_VLAN_ENABLE(FE_Gdma_Sel_t _gdm_sel, FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_VLAN_ENABLE;
in_data.gdm_sel = _gdm_sel;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_BLACK_LIST(FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_BLACK_LIST;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_ETHER_TYEP(uint _index, FE_Enable_t _enable, FE_PPPOE_t _is_pppoe, uint _value) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_ETHER_TYEP;
in_data.index = _index;
in_data.api_data.eth_cfg.enable = _enable;
in_data.api_data.eth_cfg.is_pppoe = _is_pppoe;
in_data.api_data.eth_cfg.value = _value;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_L2U_KEY(uint _index, FE_L2U_KEY_t _key_sel, uint _key0, uint _key1) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_L2U_KEY;
in_data.index = _index;
in_data.api_data.l2u_key.key_sel = _key_sel;
in_data.api_data.l2u_key.key0 = _key0;
in_data.api_data.l2u_key.key1 = _key1;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_AC_GROUP_PKT_CNT(uint _index, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_AC_GROUP_PKT_CNT;
in_data.index = _index;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_AC_GROUP_BYTE_CNT(uint _index, uint *_cnt, uint *_cnt_hi) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_AC_GROUP_BYTE_CNT;
in_data.index = _index;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
*_cnt_hi = in_data.cnt_hi;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_METER_GROUP(uint _index, uint _value) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_METER_GROUP;
in_data.index = _index;
in_data.reg_val = _value;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_GDM_PCP_CODING(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, FE_PcpMode_t mode) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_GDM_PCP_CODING;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
in_data.api_data.coding_mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CDM_PCP_CODING(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel, FE_PcpMode_t mode) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CDM_PCP_CODING;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
in_data.api_data.coding_mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_VIP_ENABLE(uint _index, FE_Enable_t _enable, FE_Patn_type _patten_type, uint _patten) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_VIP_ENABLE;
in_data.index = _index;
in_data.api_data.vip_cfg.enable = _enable;
in_data.api_data.vip_cfg.patten_type = _patten_type;
in_data.api_data.vip_cfg.patten = _patten;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_ETH_RX_CNT(FE_RxCnt_t *_rxCnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_ETH_RX_CNT;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_rxCnt = in_data.api_data.FE_RxCnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_ETH_TX_CNT(FE_TxCnt_t *_txCnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_ETH_TX_CNT;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_txCnt = in_data.api_data.FE_TxCnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_ETH_FRAME_CNT(FE_TXRX_Sel_t _txrx_sel, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_ETH_FRAME_CNT;
in_data.txrx_sel = _txrx_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_ETH_ERR_CNT(FE_Err_type_t _type, uint *_cnt) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_ETH_ERR_CNT;
in_data.err_type = _type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_cnt = in_data.cnt;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CLEAR_MIB(FE_Gdma_Sel_t _gdm_sel, FE_TXRX_Sel_t _txrx_sel) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CLEAR_MIB;
in_data.gdm_sel = _gdm_sel;
in_data.txrx_sel = _txrx_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CDM_RX_RED_DROP(FE_Cdma_Sel_t _cdm_sel, FE_RedDropQ_Sel_t _dropQ_sel, FE_RedDropMode_Sel_t _dropMode_sel) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CDM_RX_RED_DROP;
in_data.cdm_sel = _cdm_sel;
in_data.dropQ_sel = _dropQ_sel;
in_data.dropMode_sel = _dropMode_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_GET_CDM_RX_RED_DROP(FE_Cdma_Sel_t _cdm_sel, FE_RedDropQ_Sel_t _dropQ_sel, FE_RedDropMode_Sel_t *_dropMode_sel_p) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_GET_CDM_RX_RED_DROP;
in_data.cdm_sel = _cdm_sel;
in_data.dropQ_sel = _dropQ_sel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
*_dropMode_sel_p = in_data.dropMode_sel;
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CHANNEL_RETIRE_ALL(FE_Gdma_Sel_t gdm_sel, unchar channel) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CHANNEL_RETIRE_ALL;
in_data.gdm_sel = gdm_sel;
in_data.channel = channel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_CHANNEL_RETIRE_ONE(FE_Gdma_Sel_t gdm_sel, unchar channel) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_CHANNEL_RETIRE_ONE;
in_data.gdm_sel = gdm_sel;
in_data.channel = channel;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_TX_RATE(uint rate,uint mode,uint maxBkSize,uint tick)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_TX_RATE;
in_data.api_data.rate_cfg.rate = rate;
in_data.api_data.rate_cfg.mode = mode;
in_data.api_data.rate_cfg.maxBkSize = maxBkSize;
in_data.api_data.rate_cfg.tick = tick;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_RXUC_RATE(uint rate,uint mode)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_RXUC_RATE;
in_data.api_data.rate_cfg.rate = rate;
in_data.api_data.rate_cfg.mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_RXBC_RATE(uint rate,uint mode)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_RXBC_RATE;
in_data.api_data.rate_cfg.rate = rate;
in_data.api_data.rate_cfg.mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_RXMC_RATE(uint rate,uint mode)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_RXMC_RATE;
in_data.api_data.rate_cfg.rate = rate;
in_data.api_data.rate_cfg.mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_RXOC_RATE(uint rate,uint mode)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_RXOC_RATE;
in_data.api_data.rate_cfg.rate = rate;
in_data.api_data.rate_cfg.mode = mode;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_VIP_ETHER(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_VIP_ETHER;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_VIP_PPP(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_VIP_PPP;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_VIP_IP(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_VIP_IP;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_VIP_TCP(ushort src,ushort dst)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_VIP_TCP;
in_data.api_data.vip.src = src;
in_data.api_data.vip.dst = dst;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_VIP_UDP(ushort src,ushort dst)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_VIP_UDP;
in_data.api_data.vip.src = src;
in_data.api_data.vip.dst = dst;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_VIP_ETHER(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_VIP_ETHER;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_VIP_PPP(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_VIP_PPP;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_VIP_IP(ushort type)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_VIP_IP;
in_data.api_data.vip.type = type;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_VIP_TCP(ushort src,ushort dst)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_VIP_TCP;
in_data.api_data.vip.src = src;
in_data.api_data.vip.dst = dst;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_VIP_UDP(ushort src,ushort dst)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_VIP_UDP;
in_data.api_data.vip.src = src;
in_data.api_data.vip.dst = dst;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_L2LU_VLAN_DSCP(ushort svlan,ushort cvlan,unchar dscp,unchar mask)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_L2LU_VLAN_DSCP;
in_data.api_data.l2lu.svlan = svlan;
in_data.api_data.l2lu.cvlan = cvlan;
in_data.api_data.l2lu.dscp = dscp;
in_data.api_data.l2lu.mask = mask;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_ADD_L2LU_VLAN_TRFC(ushort svlan,ushort cvlan,unchar trfc,unchar mask)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_ADD_L2LU_VLAN_TRFC;
in_data.api_data.l2lu.svlan = svlan;
in_data.api_data.l2lu.cvlan = cvlan;
in_data.api_data.l2lu.dscp = trfc;
in_data.api_data.l2lu.mask = mask;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_L2LU_VLAN_DSCP(ushort svlan,ushort cvlan,unchar dscp,unchar mask)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_L2LU_VLAN_DSCP;
in_data.api_data.l2lu.svlan = svlan;
in_data.api_data.l2lu.cvlan = cvlan;
in_data.api_data.l2lu.dscp = dscp;
in_data.api_data.l2lu.mask = mask;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_DEL_L2LU_VLAN_TRFC(ushort svlan,ushort cvlan,unchar trfc,unchar mask)
{
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_DEL_L2LU_VLAN_TRFC;
in_data.api_data.l2lu.svlan = svlan;
in_data.api_data.l2lu.cvlan = cvlan;
in_data.api_data.l2lu.dscp = trfc;
in_data.api_data.l2lu.mask = mask;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
static inline __attribute__((always_inline)) int FE_API_SET_TX_FAVOR_OAM_ENABLE(FE_Enable_t _enable) {
struct ecnt_fe_data in_data;
int ret = 0;
in_data.function_id = FE_SET_TX_FAVOR_OAM_ENABLE;
in_data.api_data.enable = _enable;
ret = __ECNT_HOOK(ECNT_FE, ECNT_FE_API, (struct ecnt_data *)&in_data);
if(ret != ECNT_HOOK_ERROR)
return in_data.retValue;
else
return ECNT_HOOK_ERROR;
}
# 80 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_hwtest.h" 1
# 36 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/epon_hwtest.h"
int eponMacRegTest(__u32 times);
int eponRegPatternTest(__u32 pattern, eponMacHwtestReg_t *regTable_p, __u32 maxIndex);
int eponRegDefCheck(eponMacHwtestReg_t *regTable_p, __u32 maxIndex);
# 86 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 1
# 1 "include/linux/cdev.h" 1
struct file_operations;
struct inode;
struct module;
struct cdev {
struct kobject kobj;
struct module *owner;
const struct file_operations *ops;
struct list_head list;
dev_t dev;
unsigned int count;
};
void cdev_init(struct cdev *, const struct file_operations *);
struct cdev *cdev_alloc(void);
void cdev_put(struct cdev *p);
int cdev_add(struct cdev *, dev_t, unsigned);
void cdev_del(struct cdev *);
int cdev_index(struct inode *inode);
void cd_forget(struct inode *);
extern struct backing_dev_info directly_mappable_cdev_bdi;
# 8 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_mci.h" 1
# 12 "/opt/tclinux_phoenix/modules/private/xpon/inc/xmcs/xmcs_mci.h"
typedef struct {
struct cdev *pPonMciDev ;
wait_queue_head_t xmcsWaitQueue ;
struct XMCS_PonEventStatus_S xmcsEventStatus ;
Event_ctrlFlag_t ctrlFlag;
spinlock_t fdetLock ;
} MCS_GlbPriv_T ;
int pon_mci_init(void) ;
void pon_mci_destroy(void) ;
# 15 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h" 1
# 1 "include/linux/version.h" 1
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h" 2
# 83 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
typedef enum {
PWAN_IF_OAM = 0,
PWAN_IF_OMCI,
PWAN_IF_DATA,
PWAN_IF_NUMS
} PWAN_IfType_t ;
enum QueInfo{
queue0 = 0,
queue1,
queue2,
queue3,
queue4,
queue5,
queue6,
queue7,
};
# 113 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
typedef union {
struct {
uint resv1 : 8 ;
uint gem : 12 ;
uint oam : 1 ;
uint channel : 8 ;
uint longf : 1 ;
uint runtf : 1 ;
uint crcer : 1 ;
# 154 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
uint resv2 : 3 ;
uint ipv6 : 1 ;
uint ipv4 : 1 ;
uint ipv4f : 1 ;
uint tack : 1 ;
uint l4vld : 1 ;
uint l4f : 1 ;
uint sport : 4 ;
uint crsn : 5 ;
uint ppe : 14 ;
# 178 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
uint resv3 : 13 ;
uint vlan : 1 ;
uint tpid : 2 ;
uint vid : 16 ;
uint timestamp ;
} raw ;
uint word[4] ;
} PWAN_FERxMsg_T ;
typedef union {
struct {
uint resv : 1 ;
uint tsid : 5 ;
uint tse : 1 ;
uint dei : 1 ;
uint gem : 12 ;
uint oam : 1 ;
uint channel : 8 ;
uint queue : 3 ;
# 242 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
uint ico : 1 ;
uint uco : 1 ;
uint tco : 1 ;
uint tso : 1 ;
uint pmap : 6 ;
uint fport : 3 ;
uint insv : 1 ;
uint tpid : 2 ;
uint vid : 16 ;
# 262 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
} raw ;
uint word[2] ;
} PWAN_FETxMsg_T ;
typedef struct {
unchar netIdx ;
unchar drvLb ;
struct napi_struct napi ;
struct net_device_stats stats ;
} PWAN_NetPriv_T ;
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/gpon_wan.h" 1
# 1 "include/ecnt_hook/ecnt_hook.h" 1
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/gpon_wan.h" 2
# 33 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/gpon_wan.h"
typedef enum {
ECNT_PORT_DEVICE_LAN0 = 0,
ECNT_PORT_DEVICE_LAN1,
ECNT_PORT_DEVICE_LAN2,
ECNT_PORT_DEVICE_LAN3,
ECNT_PORT_DEVICE_RA0 = 10,
ECNT_PORT_DEVICE_PON0 = 20,
ECNT_PORT_DEVICE_USB = 30,
} ENUM_Port_Dev_Def_t ;
typedef struct {
struct sk_buff* skb;
ENUM_Port_Dev_Def_t inPort;
ENUM_Port_Dev_Def_t outPort;
} FHNet_Mapping_Vlan_Para_In_t ;
typedef struct {
unsigned short gemportId;
unsigned short allocId;
unsigned char queueId;
unsigned char dropFlag;
} FHNet_Mapping_Result_Out_t;
typedef struct {
struct sk_buff * skb;
char interfaceName[16];
}FHNet_L3Pkt_Mapping_Vlan_Para_In_t;
# 76 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/gpon_wan.h"
typedef enum {
ENUM_CFG_NETIDX = 1 ,
ENUM_CFG_CHANNEL ,
ENUM_CFG_ENCRYPTION ,
ENUM_CFG_LOOPBACK
} ENUM_GWanGemCfgType_t ;
typedef struct {
uint portId : 12 ;
uint channel : 6 ;
uint ani : 9 ;
uint rxLb : 1 ;
uint rxEncrypt : 1 ;
uint valid : 1 ;
uint resv : 2 ;
# 103 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/gpon_wan.h"
} GWAN_GemInfo_T ;
typedef struct {
ushort allocId[32] ;
ushort gemIdToIndex[(4096)] ;
struct {
GWAN_GemInfo_T info ;
struct net_device_stats stats ;
} gemPort[256] ;
uint gemNumbers ;
struct timer_list gemMibTimer ;
} GWAN_Priv_T ;
typedef enum{
GPON_TRAFFIC_UP ,
GPON_TRAFFIC_DOWN ,
}GPON_TRAFFIC_STATUS_t;
int gwan_hwnat_hook_tx(struct sk_buff *skb, struct port_info *xpon_info);
GPON_TRAFFIC_STATUS_t gwan_get_traffic_status(void);
int gwan_prepare_tx_message(PWAN_FETxMsg_T *pTxMsg, unchar netIdx, struct sk_buff *skb, int txq, struct port_info *xpon_info) ;
int gwan_process_rx_message(PWAN_FERxMsg_T *pRxMsg, struct sk_buff *skb, uint pktLens, unchar *pFlag) ;
int gwan_init(GWAN_Priv_T *pGWanPriv) ;
void gwan_update_gem_mib_table(unsigned long arg) ;
int gwan_create_new_tcont(ushort allocId) ;
int gwan_remove_tcont(ushort allocId) ;
int gwan_remove_all_tcont(void) ;
int gwan_create_new_gemport(ushort gemPortId, unchar channel, unchar encryption) ;
int gwan_config_gemport(ushort gemPortId, ENUM_GWanGemCfgType_t cfgType, uint value) ;
int gwan_remove_gemport(ushort gemPortId) ;
int gwan_remove_all_gemport(void) ;
int gwan_is_gemport_valid(ushort gemPortId) ;
int gwanCheckAllocIdExist(ushort allocId);
void fhnet_set_vlan_mapping_hook(void);
extern struct ecnt_hook_ops ecnt_FhNetDsVlanAction_op;
# 279 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/epon_wan.h" 1
# 10 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/epon_wan.h"
typedef struct {
uint llid : 16 ;
uint channel : 4 ;
uint rxDrop : 1 ;
uint rxLb : 1 ;
uint txDrop : 1 ;
uint valid : 1 ;
uint resv : 8 ;
} EWAN_LlidInfo_T ;
typedef struct {
struct {
EWAN_LlidInfo_T info ;
struct net_device_stats stats ;
} llid[8] ;
} EWAN_Priv_T ;
int ewan_hwnat_hook_tx(struct sk_buff *skb, struct port_info *xpon_info);
int ewan_prepare_tx_message(PWAN_FETxMsg_T *pTxMsg, unchar netIdx, struct sk_buff *skb, int txq, struct port_info *xpon_info) ;
int ewan_process_rx_message(PWAN_FERxMsg_T *pRxMsg, struct sk_buff *skb, uint pktLens, unchar *pFlag) ;
int ewan_init(EWAN_Priv_T *pEWanPriv) ;
# 280 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h" 2
typedef struct {
struct {
unchar isRandomLb : 1 ;
unchar isQosUp : 1 ;
unchar isTxDropOmcc: 1 ;
unchar resv : 5 ;
} flags ;
} PWAN_Config_T ;
typedef struct {
struct net_device *pPonNetDev[PWAN_IF_NUMS] ;
uint dropUnknownPackets ;
uint dropForHookBuf;
uint activeChannelNum;
unchar greenMaxthreshold;
GWAN_Priv_T gpon ;
EWAN_Priv_T epon ;
struct timer_list pwan_timer;
struct timer_list txDropTimer;
struct XMCS_VLAN_CFG_INFO_S fhVlan;
PWAN_Config_T devCfg ;
spinlock_t rxLock ;
} PWAN_GlbPriv_T ;
void __dump_skb(struct sk_buff *skb, uint pktLen) ;
void pwan_destroy(void) ;
int pwan_init(void) ;
int pwan_net_start_xmit(struct sk_buff *skb, struct net_device *dev);
int get_pon_link_type(void);
void enable_cpu_us_traffic(void);
void disable_cpu_us_traffic(void);
int pwan_prepare_rx_buffer(PWAN_FERxMsg_T *pRxBmMsg, struct sk_buff *skb) ;
# 344 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
int pwan_cb_rx_packet(void *pMsg, uint msgLen, struct sk_buff *skb, uint pktLen) ;
int pwan_cb_event_handler(QDMA_EventType_t event) ;
int pwan_create_net_interface(uint idx) ;
int pwan_delete_net_interface(uint idx) ;
# 361 "/opt/tclinux_phoenix/modules/private/xpon/inc/pwan/xpon_netif.h"
int getVlanType(unsigned short TPID);
int fb_pwan_tx_vlan_proc(struct sk_buff *skb);
int fb_pwan_rx_vlan_proc(struct sk_buff *skb);
# 16 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/xpondrv.h" 1
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam_raw.h" 1
# 43 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam_raw.h"
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv[10] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_General_T ;
typedef PLOAM_RAW_General_T PLOAM_RAW_Deactivate_OnuID_T,
PLOAM_RAW_Request_Password_T,
PLOAM_RAW_No_Message_T,
PLOAM_RAW_Popup_T,
PLOAM_RAW_Request_Key_T,
PLOAM_RAW_PEE_T,
PLOAM_RAW_Swift_Popup_T,
PLOAM_RAW_Dying_Gasp_T ;
# 69 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam_raw.h"
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar gbits ;
unchar t1_pbits ;
unchar t2_pbits ;
unchar t3_pbits ;
unchar delimiter[3] ;
unchar resv1 :2 ;
unchar delay_mode :1 ;
unchar sn_mask :1 ;
unchar sn_tran_num :2 ;
unchar tx_power :2 ;
unchar delay_time[2] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Upstream_Overhead_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar onu_id ;
unchar sn[8] ;
unchar resv ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Assign_OnuID_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :7 ;
unchar eqd_type :1 ;
unchar delay[4] ;
unchar resv2[5] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Ranging_Time_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar mode ;
unchar sn[8] ;
unchar resv ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Disable_SN_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :6 ;
unchar encrypt :2 ;
unchar port_id_m ;
unchar port_id_l :4 ;
unchar resv2 :4 ;
unchar resv3[7] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Encrypted_PortID_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar alloc_id_m ;
unchar alloc_id_l :4 ;
unchar resv1 :4 ;
unchar type ;
unchar resv2[7] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Assign_AllocID_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :7 ;
unchar activate :1 ;
unchar port_id_m ;
unchar port_id_l :4 ;
unchar resv2 :4 ;
unchar resv3[7] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Configure_PortID_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar power_level ;
unchar resv[9] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_CPL_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar line_num ;
unchar k1_ctrl ;
unchar k2_ctrl ;
unchar resv[7] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_PST_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar interval[4] ;
unchar resv[6] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_BER_Interval_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar counter[4] ;
unchar resv[6] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Key_Switching_Time_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar o3_t3_preamble ;
unchar o5_t3_preamble ;
unchar resv[8] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Extended_Burst_Length_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar a_bit :1 ;
unchar b_class :3 ;
unchar resv :4 ;
unchar pon_id[7] ;
unchar tol ;
} raw ;
uint value[3] ;
} PLOAM_RAW_PonID_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :6 ;
unchar s_bit :1 ;
unchar resv2 :1 ;
unchar eqd_value[4] ;
unchar resv3[5] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Ranging_Adjustment_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :7 ;
unchar sleep_allowed :1 ;
unchar resv2[9] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Sleep_Allow_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar vendor_id[4] ;
unchar vendor_sn[4] ;
unchar rd_m ;
unchar rd_l :4 ;
unchar a_bit :1 ;
unchar g_bit :1 ;
unchar tx_pl :2 ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Serial_Number_ONU_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar passwd[10] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Password_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar key_idx ;
unchar frag_idx ;
unchar key[8] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Encryption_Key_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar counter[4] ;
unchar resv1 :4 ;
unchar seq_num :4 ;
unchar resv2[5] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_REI_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar dm_id ;
unchar dm_byte[9] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Acknowledge_T ;
typedef union {
struct {
unchar dest_id ;
unchar msg_id ;
unchar resv1 :6 ;
unchar sleep_mode :2 ;
unchar resv2[9] ;
} raw ;
uint value[3] ;
} PLOAM_RAW_Sleep_Request_T ;
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam.h" 2
# 17 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_ploam.h"
typedef int (*ploam_recv_handler_t)(PLOAM_RAW_General_T *pPloamMsg) ;
int ploam_init(void) ;
int ploam_parser_down_message(PLOAM_RAW_General_T *pGenPloamMsg) ;
void ploam_eqd_adjustment(uint newEqd) ;
int ploam_send_dying_gasp(void) ;
int ploam_send_pee_msg(void) ;
int ploam_send_pst_msg(unchar line_num, unchar k1_ctl, unchar k2_ctl) ;
int ploam_send_rei_msg(uint err_count, unchar *seq_num_p) ;
int ploam_send_sleep_request_msg(GPON_PLOAMu_SLEEP_MODE_t sleepMode);
# 8 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_act.h" 1
void gpon_act_deactive_onu(void) ;
void gpon_act_to1_timer_expires(unsigned long arg) ;
void gpon_act_to2_timer_expires(unsigned long arg) ;
void gpon_act_change_gpon_state(const ENUM_GponState_t state) ;
uint gpon_act_get_gpon_state(void) ;
void gpon_act_led_config(void);
int gpon_act_init(void) ;
int gpon_act_deinit(void);
# 9 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_dev.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_reg.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header.h" 1
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h" 1
# 20 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD onu_id_vld : 1;
FIELD rsv_8 : 7;
FIELD onu_id : 8;
} Bits;
UINT32 Raw;
} REG_G_ONU_ID, *PREG_G_ONU_ID;
typedef union
{
struct
{
FIELD rsv_17 : 15;
FIELD us_fec_en : 1;
FIELD rsv_8 : 8;
FIELD sr_blk_size : 8;
} Bits;
UINT32 Raw;
} REG_G_GBL_CFG, *PREG_G_GBL_CFG;
typedef union
{
struct
{
FIELD bwm_us_fec_err_int : 1;
FIELD bwm_stop_time_err_int : 1;
FIELD lwi_int : 1;
FIELD fwi_int : 1;
FIELD pop_up_recv_in_O6_int : 1;
FIELD onu_us_fec_chg_int : 1;
FIELD olt_ds_fec_chg_int : 1;
FIELD o5_eqd_adj_done_int : 1;
FIELD sfifo_full_int : 1;
FIELD bfifo_full_int : 1;
FIELD rx_gem_intlv_err_int : 1;
FIELD rx_eof_err_int : 1;
FIELD tx_late_start_int : 1;
FIELD bst_sgl_diff_int : 1;
FIELD fifo_err_int : 1;
FIELD rx_err_int : 1;
FIELD rsv_12 : 4;
FIELD dying_gasp_send_int : 1;
FIELD tod_1pps_int : 1;
FIELD tod_update_done_int : 1;
FIELD aes_key_switch_done_int : 1;
FIELD los_gem_del_int : 1;
FIELD sn_req_crs_int : 1;
FIELD sn_onu_send_o4_int : 1;
FIELD ranging_req_recv_int : 1;
FIELD sn_onu_send_o3_int : 1;
FIELD sn_req_recv_int : 1;
FIELD ploamu_send_int : 1;
FIELD ploamd_recv_int : 1;
# 123 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_INT_STATUS, *PREG_G_INT_STATUS;
typedef union
{
struct
{
FIELD bwm_us_fec_err_int_en : 1;
FIELD bwm_stop_time_err_int_en : 1;
FIELD lwi_int_en : 1;
FIELD fwi_int_en : 1;
FIELD pop_up_recv_in_O6_int_en : 1;
FIELD onu_us_fec_chg_int_en : 1;
FIELD olt_ds_fec_chg_int_en : 1;
FIELD o5_eqd_adj_done_int_en : 1;
FIELD sfifo_full_int_en : 1;
FIELD bfifo_full_int_en : 1;
FIELD rx_gem_intlv_err_int_en : 1;
FIELD rx_eof_err_int_en : 1;
FIELD tx_late_start_int_en : 1;
FIELD bst_sgl_diff_int_en : 1;
FIELD fifo_err_int_en : 1;
FIELD rx_err_int_en : 1;
FIELD rsv_12 : 4;
FIELD dying_gasp_send_int_en : 1;
FIELD tod_1pps_int_en : 1;
FIELD tod_update_done_int_en : 1;
FIELD aes_key_switch_done_int_en : 1;
FIELD los_gem_del_int_en : 1;
FIELD sn_req_crs_int_en : 1;
FIELD sn_onu_send_o4_int_en : 1;
FIELD ranging_req_recv_int_en : 1;
FIELD sn_onu_send_o3_int_en : 1;
FIELD sn_req_recv_int_en : 1;
FIELD ploamu_send_int_en : 1;
FIELD ploamd_recv_int_en : 1;
# 192 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_INT_ENABLE, *PREG_G_INT_ENABLE;
typedef union
{
struct
{
FIELD t_cont1_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont1_id : 12;
FIELD t_cont0_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont0_id : 12;
# 215 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_0_1, *PREG_G_TCONT_ID_0_1;
typedef union
{
struct
{
FIELD t_cont3_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont3_id : 12;
FIELD t_cont2_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont2_id : 12;
# 238 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_2_3, *PREG_G_TCONT_ID_2_3;
typedef union
{
struct
{
FIELD t_cont5_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont5_id : 12;
FIELD t_cont4_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont4_id : 12;
# 261 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_4_5, *PREG_G_TCONT_ID_4_5;
typedef union
{
struct
{
FIELD t_cont7_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont7_id : 12;
FIELD t_cont6_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont6_id : 12;
# 284 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_6_7, *PREG_G_TCONT_ID_6_7;
typedef union
{
struct
{
FIELD t_cont9_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont9_id : 12;
FIELD t_cont8_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont8_id : 12;
# 307 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_8_9, *PREG_G_TCONT_ID_8_9;
typedef union
{
struct
{
FIELD t_cont11_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont11_id : 12;
FIELD t_cont10_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont10_id : 12;
# 330 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_10_11, *PREG_G_TCONT_ID_10_11;
typedef union
{
struct
{
FIELD t_cont13_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont13_id : 12;
FIELD t_cont12_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont12_id : 12;
# 353 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_12_13, *PREG_G_TCONT_ID_12_13;
typedef union
{
struct
{
FIELD t_cont15_vld : 1;
FIELD rsv_28 : 3;
FIELD t_cont15_id : 12;
FIELD t_cont14_vld : 1;
FIELD rsv_12 : 3;
FIELD t_cont14_id : 12;
# 376 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_14_15, *PREG_G_TCONT_ID_14_15;
typedef union
{
struct
{
FIELD gpid_cmd : 1;
FIELD rsv_18 : 13;
FIELD gpid_encrypt : 1;
FIELD gpid_vld : 1;
FIELD rsv_12 : 4;
FIELD gem_port_id : 12;
# 399 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_GEM_PORT_CFG, *PREG_G_GEM_PORT_CFG;
typedef union
{
struct
{
FIELD gpid_cmd_done : 1;
FIELD rsv_2 : 29;
FIELD gpid_encrypt_sts : 1;
FIELD gpid_vld_sts : 1;
} Bits;
UINT32 Raw;
} REG_G_GEM_PORT_STS, *PREG_G_GEM_PORT_STS;
typedef union
{
struct
{
FIELD rsv_17 : 15;
FIELD omci_port_id_vld : 1;
FIELD rsv_12 : 4;
FIELD omci_gpid : 12;
} Bits;
UINT32 Raw;
} REG_G_OMCI_ID, *PREG_G_OMCI_ID;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD gem_tbl_init_done : 1;
FIELD rsv_1 : 7;
FIELD gem_tbl_init_start : 1;
} Bits;
UINT32 Raw;
} REG_G_GEM_TBL_INIT, *PREG_G_GEM_TBL_INIT;
typedef union
{
struct
{
FIELD ploamu_fifo_udrn : 1;
FIELD rsv_24 : 7;
FIELD ploamu_fifo_min_avail : 8;
FIELD rsv_8 : 8;
FIELD ploamu_fifo_avail : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOAMu_FIFO_STS, *PREG_G_PLOAMu_FIFO_STS;
typedef union
{
struct
{
FIELD ploamu_wdata : 32;
} Bits;
UINT32 Raw;
} REG_G_PLOAMu_WDATA, *PREG_G_PLOAMu_WDATA;
typedef union
{
struct
{
FIELD ploamd_fifo_ovrn : 1;
FIELD rsv_24 : 7;
FIELD ploamd_fifo_max_used : 8;
FIELD rsv_8 : 8;
FIELD ploamd_fifo_used : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOAMd_FIFO_STS, *PREG_G_PLOAMd_FIFO_STS;
typedef union
{
struct
{
FIELD ploamd_rdata : 32;
} Bits;
UINT32 Raw;
} REG_G_PLOAMd_RDATA, *PREG_G_PLOAMd_RDATA;
typedef union
{
struct
{
FIELD rsv_30 : 2;
FIELD aes_spf_cnt : 30;
} Bits;
UINT32 Raw;
} REG_G_AES_CFG, *PREG_G_AES_CFG;
typedef union
{
struct
{
FIELD aes_active_key0 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_ACTIVE_KEY0, *PREG_G_AES_ACTIVE_KEY0;
typedef union
{
struct
{
FIELD aes_active_key1 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_ACTIVE_KEY1, *PREG_G_AES_ACTIVE_KEY1;
typedef union
{
struct
{
FIELD aes_active_key2 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_ACTIVE_KEY2, *PREG_G_AES_ACTIVE_KEY2;
typedef union
{
struct
{
FIELD aes_active_key3 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_ACTIVE_KEY3, *PREG_G_AES_ACTIVE_KEY3;
typedef union
{
struct
{
FIELD aes_shadow_key0 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_SHADOW_KEY0, *PREG_G_AES_SHADOW_KEY0;
typedef union
{
struct
{
FIELD aes_shadow_key1 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_SHADOW_KEY1, *PREG_G_AES_SHADOW_KEY1;
typedef union
{
struct
{
FIELD aes_shadow_key2 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_SHADOW_KEY2, *PREG_G_AES_SHADOW_KEY2;
typedef union
{
struct
{
FIELD aes_shadow_key3 : 32;
} Bits;
UINT32 Raw;
} REG_G_AES_SHADOW_KEY3, *PREG_G_AES_SHADOW_KEY3;
typedef union
{
struct
{
FIELD aes_key_switch_by_sw_done : 1;
FIELD rsv_1 : 30;
FIELD aes_key_switch_by_sw_cmd : 1;
} Bits;
UINT32 Raw;
} REG_G_AES_KEY_SWITCH_BY_SW, *PREG_G_AES_KEY_SWITCH_BY_SW;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD plou_overhead : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOu_OVERHEAD, *PREG_G_PLOu_OVERHEAD;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD guard_bit : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOu_GUARD_BIT, *PREG_G_PLOu_GUARD_BIT;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD prmb2_bit : 8;
FIELD prmb1_bit : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOu_PRMBL_TYPE1_2, *PREG_G_PLOu_PRMBL_TYPE1_2;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD ebl_en : 1;
FIELD rsv_16 : 8;
FIELD ext_prmb3_o5_num : 8;
FIELD ext_prmb3_o3_o4_num : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOu_PRMBL_TYPE3, *PREG_G_PLOu_PRMBL_TYPE3;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD delm_bit : 8;
} Bits;
UINT32 Raw;
} REG_G_PLOu_DELM_BIT, *PREG_G_PLOu_DELM_BIT;
typedef union
{
struct
{
FIELD pre_dly_en : 1;
FIELD rsv_16 : 15;
FIELD pre_dly : 16;
} Bits;
UINT32 Raw;
} REG_G_PRE_ASSIGNED_DLY, *PREG_G_PRE_ASSIGNED_DLY;
typedef union
{
struct
{
FIELD eqd : 32;
} Bits;
UINT32 Raw;
} REG_G_EQD, *PREG_G_EQD;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD tresp : 16;
} Bits;
UINT32 Raw;
} REG_G_RSP_TIME, *PREG_G_RSP_TIME;
typedef union
{
struct
{
FIELD vendor_id : 32;
} Bits;
UINT32 Raw;
} REG_G_VENDOR_ID, *PREG_G_VENDOR_ID;
typedef union
{
struct
{
FIELD vs_sn : 32;
} Bits;
UINT32 Raw;
} REG_G_VS_SN, *PREG_G_VS_SN;
typedef union
{
struct
{
FIELD sn_req_thr : 8;
FIELD rsv_18 : 6;
FIELD tx_power_mode : 2;
FIELD rsv_12 : 4;
FIELD rdm_dly : 12;
} Bits;
UINT32 Raw;
} REG_G_SN_MSG_CFG, *PREG_G_SN_MSG_CFG;
typedef union
{
struct
{
FIELD rsv_3 : 29;
FIELD act_st : 3;
} Bits;
UINT32 Raw;
} REG_G_ACTIVATION_ST, *PREG_G_ACTIVATION_ST;
typedef union
{
struct
{
FIELD rsv_30 : 2;
FIELD tod_spf_cnt : 30;
} Bits;
UINT32 Raw;
} REG_G_TOD_CFG, *PREG_G_TOD_CFG;
typedef union
{
struct
{
FIELD new_tod_sec_l32 : 32;
} Bits;
UINT32 Raw;
} REG_G_NEW_TOD_SEC_L32, *PREG_G_NEW_TOD_SEC_L32;
typedef union
{
struct
{
FIELD new_tod_nano_sec : 32;
} Bits;
UINT32 Raw;
} REG_G_NEW_TOD_NANO_SEC, *PREG_G_NEW_TOD_NANO_SEC;
typedef union
{
struct
{
FIELD cur_tod_sec_l32 : 32;
} Bits;
UINT32 Raw;
} REG_G_CUR_TOD_SEC_L32, *PREG_G_CUR_TOD_SEC_L32;
typedef union
{
struct
{
FIELD cur_tod_nano_sec : 32;
} Bits;
UINT32 Raw;
} REG_G_CUR_TOD_NANO_SEC, *PREG_G_CUR_TOD_NANO_SEC;
typedef union
{
struct
{
FIELD rsv_8 : 24;
FIELD tod_period : 8;
} Bits;
UINT32 Raw;
} REG_G_TOD_CLK_PERIOD, *PREG_G_TOD_CLK_PERIOD;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD tx_fcs_tbl_init_done : 1;
FIELD rsv_1 : 7;
FIELD tx_fcs_tbl_init_start : 1;
} Bits;
UINT32 Raw;
} REG_G_TX_FCS_TBL_INIT, *PREG_G_TX_FCS_TBL_INIT;
typedef union
{
struct
{
FIELD mib_cmd : 1;
FIELD mib_cmd_done : 1;
FIELD rsv_25 : 5;
FIELD mib_read_clr_en : 1;
FIELD rsv_19 : 5;
FIELD mib_type : 3;
FIELD rsv_12 : 4;
FIELD mib_gpid : 12;
# 910 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_MIB_CTRL_STS, *PREG_G_MIB_CTRL_STS;
typedef union
{
struct
{
FIELD mib_rdata_l32 : 32;
} Bits;
UINT32 Raw;
} REG_G_MIB_RDATA_L32, *PREG_G_MIB_RDATA_L32;
typedef union
{
struct
{
FIELD mib_rdata_h32 : 32;
} Bits;
UINT32 Raw;
} REG_G_MIB_RDATA_H32, *PREG_G_MIB_RDATA_H32;
typedef union
{
struct
{
FIELD mib_wdata_l32 : 32;
} Bits;
UINT32 Raw;
} REG_G_MIB_WDATA_L32, *PREG_G_MIB_WDATA_L32;
typedef union
{
struct
{
FIELD mib_wdata_h32 : 32;
} Bits;
UINT32 Raw;
} REG_G_MIB_WDATA_H32, *PREG_G_MIB_WDATA_H32;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD mib_tbl_init_done : 1;
FIELD rsv_1 : 7;
FIELD mib_tbl_init_start : 1;
} Bits;
UINT32 Raw;
} REG_G_MIB_TBL_INIT, *PREG_G_MIB_TBL_INIT;
typedef union
{
struct
{
FIELD gpidx_cmd : 1;
FIELD rsv_24 : 7;
FIELD gpidx_tbl_addr : 8;
FIELD rsv_9 : 7;
FIELD gpidx_tbl_wdata : 9;
} Bits;
UINT32 Raw;
} REG_G_GPIDX_TBL_CTRL, *PREG_G_GPIDX_TBL_CTRL;
typedef union
{
struct
{
FIELD gpidx_cmd_done : 1;
FIELD rsv_9 : 22;
FIELD gpidx_tbl_rdata : 9;
} Bits;
UINT32 Raw;
} REG_G_GPIDX_TBL_STS, *PREG_G_GPIDX_TBL_STS;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD gpidx_tbl_init_done : 1;
FIELD rsv_1 : 7;
FIELD gpidx_tbl_init_start : 1;
} Bits;
UINT32 Raw;
} REG_G_GPIDX_TBL_INIT, *PREG_G_GPIDX_TBL_INIT;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD mbi_tx_stop : 1;
FIELD rsv_1 : 7;
FIELD mbi_rx_stop : 1;
} Bits;
UINT32 Raw;
} REG_G_MBI_STOP, *PREG_G_MBI_STOP;
typedef union
{
struct
{
FIELD tcont_cmd : 1;
FIELD rsv_28 : 3;
FIELD wr_tcont_id_vld : 1;
FIELD rsv_20 : 7;
FIELD tcont_id_index : 4;
FIELD rsv_12 : 4;
FIELD wr_tcont_id : 12;
# 1066 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_16_31_CFG, *PREG_G_TCONT_ID_16_31_CFG;
typedef union
{
struct
{
FIELD tcont_cmd_done : 1;
FIELD rsv_17 : 14;
FIELD rd_tcont_id_vld : 1;
FIELD rsv_12 : 4;
FIELD rd_tcont_id : 12;
} Bits;
UINT32 Raw;
} REG_G_TCONT_ID_16_31_STS, *PREG_G_TCONT_ID_16_31_STS;
typedef union
{
struct
{
FIELD rsv_27 : 5;
FIELD mib_frame_type : 1;
FIELD gpon_mib_en : 1;
FIELD us_no_msg_int_en : 1;
FIELD rsv_17 : 7;
FIELD rpt_msg_flt : 1;
FIELD rsv_12 : 4;
FIELD max_rdm_dly : 12;
# 1114 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_CAP_SETTING, *PREG_DBG_CAP_SETTING;
typedef union
{
struct
{
FIELD sch_pref_ofs : 16;
FIELD dt_pref_ofs : 16;
} Bits;
UINT32 Raw;
} REG_DBG_US_PREF_OFS, *PREG_DBG_US_PREF_OFS;
typedef union
{
struct
{
FIELD phy_rx_dly_sel : 1;
FIELD rsv_28 : 3;
FIELD fix_phy_rx_dly : 12;
FIELD fine_int_dly : 8;
FIELD phy_tx_dly : 8;
} Bits;
UINT32 Raw;
} REG_DBG_DLY, *PREG_DBG_DLY;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD idle_gem_thld : 16;
} Bits;
UINT32 Raw;
} REG_DBG_IDLE_GEM_THLD, *PREG_DBG_IDLE_GEM_THLD;
typedef union
{
struct
{
FIELD us_no_msg_b3_b4 : 16;
FIELD us_no_msg_type : 8;
FIELD rsv_0 : 8;
} Bits;
UINT32 Raw;
} REG_DBG_US_NO_MSG0, *PREG_DBG_US_NO_MSG0;
typedef union
{
struct
{
FIELD us_no_msg_b5_b6 : 16;
FIELD us_no_msg_b7_b8 : 16;
} Bits;
UINT32 Raw;
} REG_DBG_US_NO_MSG1, *PREG_DBG_US_NO_MSG1;
typedef union
{
struct
{
FIELD us_no_msg_b9_b10 : 16;
FIELD us_no_msg_b11_b12 : 16;
} Bits;
UINT32 Raw;
} REG_DBG_US_NO_MSG2, *PREG_DBG_US_NO_MSG2;
typedef union
{
struct
{
FIELD rsv_28 : 4;
FIELD dying_gasp_num : 4;
FIELD rsv_17 : 7;
FIELD dying_gasp_test : 1;
FIELD rsv_9 : 7;
FIELD hw_dying_gasp_en : 1;
FIELD dying_gasp_msg_type : 8;
# 1237 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_US_DYING_GASP_CTRL, *PREG_DBG_US_DYING_GASP_CTRL;
typedef union
{
struct
{
FIELD bwm_intvl_invld : 1;
FIELD bwm_len_invld : 1;
FIELD bwm_start_order_err : 1;
FIELD rsv_19 : 10;
FIELD bwm_start_order_err_flt_en : 1;
FIELD bwm_len_vld_chk_en : 1;
FIELD min_bst_intvl_en : 1;
FIELD min_bst_intvl : 16;
# 1264 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_BWM_FILTER_CTRL, *PREG_DBG_BWM_FILTER_CTRL;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD bwm_sfifo_max_used : 9;
FIELD rsv_9 : 7;
FIELD bwm_sfifo_used : 9;
} Bits;
UINT32 Raw;
} REG_DBG_BWM_SFIFO_STS, *PREG_DBG_BWM_SFIFO_STS;
typedef union
{
struct
{
FIELD rsv_22 : 10;
FIELD ether_fcs_err : 1;
FIELD ploamu_wr : 1;
FIELD dbru_wr : 1;
FIELD is_gem_pl : 1;
FIELD is_fcs_only : 1;
FIELD is_inst_idle : 1;
FIELD rsv_6 : 10;
FIELD mbi_pl_fifo_ovrn : 1;
FIELD aes_egn3_busy : 1;
FIELD aes_egn2_busy : 1;
FIELD aes_egn1_busy : 1;
FIELD aes_egn0_busy : 1;
FIELD aes_rdm_ciph_fifo_ovrn : 1;
# 1322 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_GRP_0, *PREG_DBG_GRP_0;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD alen_neq_zero : 1;
FIELD rsv_20 : 4;
FIELD tx_align_fifo_udrn : 1;
FIELD rx_sof_sync : 1;
FIELD sync_start : 1;
FIELD tx_late_start : 1;
FIELD rsv_9 : 7;
FIELD bst_next_full : 1;
FIELD sgl_next_full : 1;
FIELD aes_cryp_cnt_fifo_ovrn : 1;
FIELD aes_ciph_txt_fifo_ovrn : 1;
FIELD mbi_hdr_fifo_ovrn : 1;
FIELD ploamd_crc_err : 1;
FIELD bwm_uc_err : 1;
FIELD blen_err : 1;
FIELD logd : 1;
# 1367 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_GRP_1, *PREG_DBG_GRP_1;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD tx_align_max_used : 16;
} Bits;
UINT32 Raw;
} REG_DBG_TXFIFO_MAX_USED, *PREG_DBG_TXFIFO_MAX_USED;
typedef union
{
struct
{
FIELD rsv_20 : 12;
FIELD pls_used : 1;
FIELD dba_mode2_used : 1;
FIELD dba_mode1_used : 1;
FIELD dba_mode0_used : 1;
FIELD rsv_8 : 8;
FIELD max_assigned_gnt_num : 8;
# 1407 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_BWM_GNT_STS, *PREG_DBG_BWM_GNT_STS;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD gpon_sub_probe_sel : 5;
FIELD probe_bit0_sel : 4;
FIELD bwm_probe_sel : 4;
FIELD tx_align_probe_dt_sel : 1;
FIELD rx_mpi_mbi_probe_dt_sel : 1;
FIELD tx_mpi_mbi_probe_dt_sel : 1;
FIELD rx_data_hl_sel : 1;
FIELD rsv_5 : 3;
FIELD probe_sel : 5;
# 1438 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_PROBE_CTRL, *PREG_DBG_PROBE_CTRL;
typedef union
{
struct
{
FIELD probe_high32 : 32;
} Bits;
UINT32 Raw;
} REG_DBG_PROBE_HIGH32, *PREG_DBG_PROBE_HIGH32;
typedef union
{
struct
{
FIELD probe_low32 : 32;
} Bits;
UINT32 Raw;
} REG_DBG_PROBE_LOW32, *PREG_DBG_PROBE_LOW32;
typedef union
{
struct
{
FIELD rsv_25 : 7;
FIELD bwm_bfifo_max_used : 9;
FIELD rsv_9 : 7;
FIELD bwm_bfifo_used : 9;
} Bits;
UINT32 Raw;
} REG_DBG_BWM_BFIFO_STS, *PREG_DBG_BWM_BFIFO_STS;
typedef union
{
struct
{
FIELD rsv_10 : 22;
FIELD bwm_fec_ctrl : 1;
FIELD ignore_atm_cell : 1;
FIELD rsv_5 : 3;
FIELD o5_eqd_adj_resync_en : 1;
FIELD tx_udrn_resync_en : 1;
FIELD tx_late_resync_en : 1;
FIELD bst_sgl_diff_resync_en : 1;
FIELD ignore_ploamd_crc : 1;
# 1504 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_ERR_CTRL, *PREG_DBG_ERR_CTRL;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD rdi_en : 1;
FIELD rsv_1 : 7;
FIELD sw_rdi : 1;
} Bits;
UINT32 Raw;
} REG_DBG_SW_RDI_CTRL, *PREG_DBG_SW_RDI_CTRL;
typedef union
{
struct
{
FIELD dbg_rx_gem_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_RX_GEM_CNT, *PREG_DBG_RX_GEM_CNT;
typedef union
{
struct
{
FIELD dbg_rx_crc_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_RX_CRC_ERR_CNT, *PREG_DBG_RX_CRC_ERR_CNT;
typedef union
{
struct
{
FIELD dbg_rx_gtc_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_RX_GTC_CNT, *PREG_DBG_RX_GTC_CNT;
typedef union
{
struct
{
FIELD dbg_tx_gem_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_TX_GEM_CNT, *PREG_DBG_TX_GEM_CNT;
typedef union
{
struct
{
FIELD dbg_tx_burst_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_TX_BST_CNT, *PREG_DBG_TX_BST_CNT;
typedef union
{
struct
{
FIELD dbg_gem_one_err_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_GEM_HEC_ONE_ERR_CNT, *PREG_DBG_GEM_HEC_ONE_ERR_CNT;
typedef union
{
struct
{
FIELD dbg_gem_two_err_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_GEM_HEC_TWO_ERR_CNT, *PREG_DBG_GEM_HEC_TWO_ERR_CNT;
typedef union
{
struct
{
FIELD dbg_gem_uc_err_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_GEM_HEC_UC_ERR_CNT, *PREG_DBG_GEM_HEC_UC_ERR_CNT;
typedef union
{
struct
{
FIELD dbg_sw_rpt_dba_en : 1;
FIELD rsv_28 : 3;
FIELD dbg_slight_modify_en : 1;
FIELD rsv_0 : 27;
} Bits;
UINT32 Raw;
} REG_DBG_DBA_BACK_DOOR, *PREG_DBG_DBA_BACK_DOOR;
typedef union
{
struct
{
FIELD dbg_dba_total_buf_size : 32;
} Bits;
UINT32 Raw;
} REG_DBG_DBA_TOTAL_BUF_SIZE, *PREG_DBG_DBA_TOTAL_BUF_SIZE;
typedef union
{
struct
{
FIELD dbg_dba_green_size : 32;
} Bits;
UINT32 Raw;
} REG_DBG_DBA_GREEN_SIZE, *PREG_DBG_DBA_GREEN_SIZE;
typedef union
{
struct
{
FIELD dbg_dba_yellow_size : 32;
} Bits;
UINT32 Raw;
} REG_DBG_DBA_YELLOW_SIZE, *PREG_DBG_DBA_YELLOW_SIZE;
typedef union
{
struct
{
FIELD dbg_yellow_slight_modify_size : 16;
FIELD dbg_green_slight_modify_size : 16;
} Bits;
UINT32 Raw;
} REG_DBG_SLIGHT_MODIFY_SIZE_1, *PREG_DBG_SLIGHT_MODIFY_SIZE_1;
typedef union
{
struct
{
FIELD rsv_16 : 16;
FIELD dbg_total_slight_modify_size : 16;
} Bits;
UINT32 Raw;
} REG_DBG_SLIGHT_MODIFY_SIZE_2, *PREG_DBG_SLIGHT_MODIFY_SIZE_2;
typedef union
{
struct
{
FIELD rsv_30 : 2;
FIELD dbg_ds_spf_cnt : 30;
} Bits;
UINT32 Raw;
} REG_DBG_DS_SPF_CNT, *PREG_DBG_DS_SPF_CNT;
typedef union
{
struct
{
FIELD rsv_2 : 30;
FIELD dbg_tx_sync_offset : 2;
} Bits;
UINT32 Raw;
} REG_DBG_TX_SYNC_OFFSET, *PREG_DBG_TX_SYNC_OFFSET;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD dbg_ext_bst_len_filter_in_o5_en : 1;
FIELD rsv_1 : 7;
FIELD dbg_us_overhead_filter_in_o5_en : 1;
} Bits;
UINT32 Raw;
} REG_DBG_PLOAMD_FILTER_IN_O5, *PREG_DBG_PLOAMD_FILTER_IN_O5;
typedef union
{
struct
{
FIELD dbg_sw_resync_en : 1;
FIELD rsv_1 : 30;
FIELD dbg_sw_resync : 1;
} Bits;
UINT32 Raw;
} REG_DBG_SW_RESYNC, *PREG_DBG_SW_RESYNC;
typedef union
{
struct
{
FIELD sniff_dbg_tx_rst : 1;
FIELD rsv_9 : 22;
FIELD sniff_pkt_padding_en : 1;
FIELD rsv_1 : 7;
FIELD dbg_gtc_eth_extr_en : 1;
} Bits;
UINT32 Raw;
} REG_DBG_GTC_ETH_EXTR, *PREG_DBG_GTC_ETH_EXTR;
typedef union
{
struct
{
FIELD dbg_extr_rx_ds_eth_type : 16;
FIELD dbg_extr_rx_ds_sp : 16;
} Bits;
UINT32 Raw;
} REG_DBG_DS_GTC_EXTR_ETH_HDR, *PREG_DBG_DS_GTC_EXTR_ETH_HDR;
typedef union
{
struct
{
FIELD dbg_extr_tx_us_eth_type : 16;
FIELD dbg_extr_tx_us_sp : 16;
} Bits;
UINT32 Raw;
} REG_DBG_US_GTC_EXTR_ETH_HDR, *PREG_DBG_US_GTC_EXTR_ETH_HDR;
typedef union
{
struct
{
FIELD dbg_extr_rx_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_DS_GTC_EXTR_ETH_CNT, *PREG_DBG_DS_GTC_EXTR_ETH_CNT;
typedef union
{
struct
{
FIELD dbg_extr_tx_cnt : 32;
} Bits;
UINT32 Raw;
} REG_DBG_US_GTC_EXTR_ETH_CNT, *PREG_DBG_US_GTC_EXTR_ETH_CNT;
typedef union
{
struct
{
FIELD dbg_ds_all_gem_filter : 1;
FIELD rsv_28 : 3;
FIELD dbg_ds_gem_filter_exclude_omci : 1;
FIELD rsv_16 : 11;
FIELD dbg_us_gtc_invld_gem_byte : 16;
} Bits;
UINT32 Raw;
} REG_SNIFF_GTC_GTC_INVLD_GEM_BYTE, *PREG_SNIFF_GTC_GTC_INVLD_GEM_BYTE;
typedef union
{
struct
{
FIELD rsv_17 : 15;
FIELD dbg_mbi_pl_fifo_write_conflict : 1;
FIELD rsv_9 : 7;
FIELD dbg_mbi_hdr_fifo_write_conflict : 1;
FIELD rsv_1 : 7;
FIELD dbg_us_gtc_info_fifo_ovrn : 1;
# 1850 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_SNIFF_DBG_REG, *PREG_DBG_SNIFF_DBG_REG;
typedef union
{
struct
{
FIELD rsv_7 : 25;
FIELD dbg_us_info_tx_send_depth : 7;
} Bits;
UINT32 Raw;
} REG_DBG_SNIFF_US_INFO_FIFO, *PREG_DBG_SNIFF_US_INFO_FIFO;
typedef union
{
struct
{
FIELD stop_time_of_wido_en : 1;
FIELD rsv_16 : 15;
FIELD stop_time_of_wido : 16;
} Bits;
UINT32 Raw;
} REG_SNIFF_US_HDR_WIDO, *PREG_SNIFF_US_HDR_WIDO;
typedef union
{
struct
{
FIELD tx_us_da_h16 : 16;
FIELD tx_us_sa_h16 : 16;
} Bits;
UINT32 Raw;
} REG_SNIFF_TX_DA_SA, *PREG_SNIFF_TX_DA_SA;
typedef union
{
struct
{
FIELD rx_ds_da_h16 : 16;
FIELD rx_ds_sa_h16 : 16;
} Bits;
UINT32 Raw;
} REG_SNIFF_RX_DA_SA, *PREG_SNIFF_RX_DA_SA;
typedef union
{
struct
{
FIELD rsv_28 : 4;
FIELD tx_eth_pid : 12;
FIELD rsv_12 : 4;
FIELD rx_eth_pid : 12;
} Bits;
UINT32 Raw;
} REG_SNIFF_TX_RX_PID, *PREG_SNIFF_TX_RX_PID;
typedef union
{
struct
{
FIELD rsv_1 : 31;
FIELD sniff_cnt_en : 1;
} Bits;
UINT32 Raw;
} REG_SNIFF_CNT_EN, *PREG_SNIFF_CNT_EN;
typedef union
{
struct
{
FIELD sniff_rx_tpid : 16;
FIELD sniff_tx_tpid : 16;
} Bits;
UINT32 Raw;
} REG_SNIFF_RX_TX_TPID, *PREG_SNIFF_RX_TX_TPID;
typedef union
{
struct
{
FIELD asb_tx_eth_4byte_align_en : 1;
FIELD rsv_28 : 3;
FIELD olt_ds_fec : 1;
FIELD rsv_24 : 3;
FIELD onu_us_fec : 1;
FIELD rsv_1 : 22;
FIELD gpon_mac_sw_rst_n : 1;
# 1986 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header_en7521.h"
} Bits;
UINT32 Raw;
} REG_DBG_GPON_MAC_SET, *PREG_DBG_GPON_MAC_SET;
typedef union
{
struct
{
FIELD sleep_filt_all_bwm : 1;
FIELD reload_sleep_cnt : 1;
FIELD rsv_0 : 30;
} Bits;
UINT32 Raw;
} REG_SLEEP_GLB_CFG, *PREG_SLEEP_GLB_CFG;
typedef union
{
struct
{
FIELD sleep_cnt : 32;
} Bits;
UINT32 Raw;
} REG_SLEEP_CNT, *PREG_SLEEP_CNT;
typedef union
{
struct
{
FIELD rsv_1 : 31;
FIELD tx_sync_opt : 1;
} Bits;
UINT32 Raw;
} REG_TX_SYNC_OPT, *PREG_TX_SYNC_OPT;
typedef union
{
struct
{
FIELD rsv_9 : 23;
FIELD ploamu_ind_ctrl : 1;
FIELD rsv_1 : 7;
FIELD o3_o4_ploamu_ctrl : 1;
} Bits;
UINT32 Raw;
} REG_O3_O4_PLOAMU_CTRL, *PREG_O3_O4_PLOAMU_CTRL;
typedef union
{
struct
{
FIELD tod_1pps_width_ctrl : 32;
} Bits;
UINT32 Raw;
} REG_TOD_1PPS_WD_CTRL, *PREG_TOD_1PPS_WD_CTRL;
typedef volatile struct
{
UINT32 rsv_0000[4096];
REG_G_ONU_ID G_ONU_ID;
REG_G_GBL_CFG G_GBL_CFG;
REG_G_INT_STATUS G_INT_STATUS;
REG_G_INT_ENABLE G_INT_ENABLE;
UINT32 rsv_4010[4];
REG_G_TCONT_ID_0_1 G_TCONT_ID_0_1;
REG_G_TCONT_ID_2_3 G_TCONT_ID_2_3;
REG_G_TCONT_ID_4_5 G_TCONT_ID_4_5;
REG_G_TCONT_ID_6_7 G_TCONT_ID_6_7;
REG_G_TCONT_ID_8_9 G_TCONT_ID_8_9;
REG_G_TCONT_ID_10_11 G_TCONT_ID_10_11;
REG_G_TCONT_ID_12_13 G_TCONT_ID_12_13;
REG_G_TCONT_ID_14_15 G_TCONT_ID_14_15;
REG_G_GEM_PORT_CFG G_GEM_PORT_CFG;
REG_G_GEM_PORT_STS G_GEM_PORT_STS;
REG_G_OMCI_ID G_OMCI_ID;
REG_G_GEM_TBL_INIT G_GEM_TBL_INIT;
REG_G_PLOAMu_FIFO_STS G_PLOAMu_FIFO_STS;
REG_G_PLOAMu_WDATA G_PLOAMu_WDATA;
REG_G_PLOAMd_FIFO_STS G_PLOAMd_FIFO_STS;
REG_G_PLOAMd_RDATA G_PLOAMd_RDATA;
REG_G_AES_CFG G_AES_CFG;
REG_G_AES_ACTIVE_KEY0 G_AES_ACTIVE_KEY0;
REG_G_AES_ACTIVE_KEY1 G_AES_ACTIVE_KEY1;
REG_G_AES_ACTIVE_KEY2 G_AES_ACTIVE_KEY2;
REG_G_AES_ACTIVE_KEY3 G_AES_ACTIVE_KEY3;
REG_G_AES_SHADOW_KEY0 G_AES_SHADOW_KEY0;
REG_G_AES_SHADOW_KEY1 G_AES_SHADOW_KEY1;
REG_G_AES_SHADOW_KEY2 G_AES_SHADOW_KEY2;
REG_G_AES_SHADOW_KEY3 G_AES_SHADOW_KEY3;
REG_G_AES_KEY_SWITCH_BY_SW G_AES_KEY_SWITCH_BY_SW;
UINT32 rsv_4088[2];
REG_G_PLOu_OVERHEAD G_PLOu_OVERHEAD;
REG_G_PLOu_GUARD_BIT G_PLOu_GUARD_BIT;
REG_G_PLOu_PRMBL_TYPE1_2 G_PLOu_PRMBL_TYPE1_2;
REG_G_PLOu_PRMBL_TYPE3 G_PLOu_PRMBL_TYPE3;
REG_G_PLOu_DELM_BIT G_PLOu_DELM_BIT;
REG_G_PRE_ASSIGNED_DLY G_PRE_ASSIGNED_DLY;
REG_G_EQD G_EQD;
REG_G_RSP_TIME G_RSP_TIME;
REG_G_VENDOR_ID G_VENDOR_ID;
REG_G_VS_SN G_VS_SN;
REG_G_SN_MSG_CFG G_SN_MSG_CFG;
REG_G_ACTIVATION_ST G_ACTIVATION_ST;
UINT32 rsv_40C0[4];
REG_G_TOD_CFG G_TOD_CFG;
REG_G_NEW_TOD_SEC_L32 G_NEW_TOD_SEC_L32;
REG_G_NEW_TOD_NANO_SEC G_NEW_TOD_NANO_SEC;
REG_G_CUR_TOD_SEC_L32 G_CUR_TOD_SEC_L32;
REG_G_CUR_TOD_NANO_SEC G_CUR_TOD_NANO_SEC;
REG_G_TOD_CLK_PERIOD G_TOD_CLK_PERIOD;
UINT32 rsv_40E8[6];
REG_G_TX_FCS_TBL_INIT G_TX_FCS_TBL_INIT;
UINT32 rsv_4104[7];
REG_G_MIB_CTRL_STS G_MIB_CTRL_STS;
REG_G_MIB_RDATA_L32 G_MIB_RDATA_L32;
REG_G_MIB_RDATA_H32 G_MIB_RDATA_H32;
REG_G_MIB_WDATA_L32 G_MIB_WDATA_L32;
REG_G_MIB_WDATA_H32 G_MIB_WDATA_H32;
REG_G_MIB_TBL_INIT G_MIB_TBL_INIT;
UINT32 rsv_4138[2];
REG_G_GPIDX_TBL_CTRL G_GPIDX_TBL_CTRL;
REG_G_GPIDX_TBL_STS G_GPIDX_TBL_STS;
REG_G_GPIDX_TBL_INIT G_GPIDX_TBL_INIT;
UINT32 rsv_414C[5];
REG_G_MBI_STOP G_MBI_STOP;
UINT32 rsv_4164[7];
REG_G_TCONT_ID_16_31_CFG G_TCONT_ID_16_31_CFG;
REG_G_TCONT_ID_16_31_STS G_TCONT_ID_16_31_STS;
UINT32 rsv_4188[30];
REG_DBG_CAP_SETTING DBG_CAP_SETTING;
REG_DBG_US_PREF_OFS DBG_US_PREF_OFS;
REG_DBG_DLY DBG_DLY;
REG_DBG_IDLE_GEM_THLD DBG_IDLE_GEM_THLD;
REG_DBG_US_NO_MSG0 DBG_US_NO_MSG0;
REG_DBG_US_NO_MSG1 DBG_US_NO_MSG1;
REG_DBG_US_NO_MSG2 DBG_US_NO_MSG2;
REG_DBG_US_DYING_GASP_CTRL DBG_US_DYING_GASP_CTRL;
REG_DBG_BWM_FILTER_CTRL DBG_BWM_FILTER_CTRL;
REG_DBG_BWM_SFIFO_STS DBG_BWM_SFIFO_STS;
REG_DBG_GRP_0 DBG_GRP_0;
REG_DBG_GRP_1 DBG_GRP_1;
REG_DBG_TXFIFO_MAX_USED DBG_TXFIFO_MAX_USED;
REG_DBG_BWM_GNT_STS DBG_BWM_GNT_STS;
UINT32 rsv_4238[2];
REG_DBG_PROBE_CTRL DBG_PROBE_CTRL;
REG_DBG_PROBE_HIGH32 DBG_PROBE_HIGH32;
REG_DBG_PROBE_LOW32 DBG_PROBE_LOW32;
UINT32 rsv_424C;
REG_DBG_BWM_BFIFO_STS DBG_BWM_BFIFO_STS;
UINT32 rsv_4254[3];
REG_DBG_ERR_CTRL DBG_ERR_CTRL;
REG_DBG_SW_RDI_CTRL DBG_SW_RDI_CTRL;
UINT32 rsv_4268[38];
REG_DBG_RX_GEM_CNT DBG_RX_GEM_CNT;
REG_DBG_RX_CRC_ERR_CNT DBG_RX_CRC_ERR_CNT;
REG_DBG_RX_GTC_CNT DBG_RX_GTC_CNT;
REG_DBG_TX_GEM_CNT DBG_TX_GEM_CNT;
REG_DBG_TX_BST_CNT DBG_TX_BST_CNT;
UINT32 rsv_4314[7];
REG_DBG_GEM_HEC_ONE_ERR_CNT DBG_GEM_HEC_ONE_ERR_CNT;
REG_DBG_GEM_HEC_TWO_ERR_CNT DBG_GEM_HEC_TWO_ERR_CNT;
REG_DBG_GEM_HEC_UC_ERR_CNT DBG_GEM_HEC_UC_ERR_CNT;
UINT32 rsv_433C;
REG_DBG_DBA_BACK_DOOR DBG_DBA_BACK_DOOR;
REG_DBG_DBA_TOTAL_BUF_SIZE DBG_DBA_TOTAL_BUF_SIZE;
REG_DBG_DBA_GREEN_SIZE DBG_DBA_GREEN_SIZE;
REG_DBG_DBA_YELLOW_SIZE DBG_DBA_YELLOW_SIZE;
REG_DBG_SLIGHT_MODIFY_SIZE_1 DBG_SLIGHT_MODIFY_SIZE_1;
REG_DBG_SLIGHT_MODIFY_SIZE_2 DBG_SLIGHT_MODIFY_SIZE_2;
REG_DBG_DS_SPF_CNT DBG_DS_SPF_CNT;
REG_DBG_TX_SYNC_OFFSET DBG_TX_SYNC_OFFSET;
REG_DBG_PLOAMD_FILTER_IN_O5 DBG_PLOAMD_FILTER_IN_O5;
REG_DBG_SW_RESYNC DBG_SW_RESYNC;
REG_DBG_GTC_ETH_EXTR DBG_GTC_ETH_EXTR;
REG_DBG_DS_GTC_EXTR_ETH_HDR DBG_DS_GTC_EXTR_ETH_HDR;
REG_DBG_US_GTC_EXTR_ETH_HDR DBG_US_GTC_EXTR_ETH_HDR;
REG_DBG_DS_GTC_EXTR_ETH_CNT DBG_DS_GTC_EXTR_ETH_CNT;
REG_DBG_US_GTC_EXTR_ETH_CNT DBG_US_GTC_EXTR_ETH_CNT;
REG_SNIFF_GTC_GTC_INVLD_GEM_BYTE SNIFF_GTC_GTC_INVLD_GEM_BYTE;
REG_DBG_SNIFF_DBG_REG DBG_SNIFF_DBG_REG;
REG_DBG_SNIFF_US_INFO_FIFO DBG_SNIFF_US_INFO_FIFO;
REG_SNIFF_US_HDR_WIDO SNIFF_US_HDR_WIDO;
REG_SNIFF_TX_DA_SA SNIFF_TX_DA_SA;
REG_SNIFF_RX_DA_SA SNIFF_RX_DA_SA;
REG_SNIFF_TX_RX_PID SNIFF_TX_RX_PID;
REG_SNIFF_CNT_EN SNIFF_CNT_EN;
REG_SNIFF_RX_TX_TPID SNIFF_RX_TX_TPID;
REG_DBG_GPON_MAC_SET DBG_GPON_MAC_SET;
REG_SLEEP_GLB_CFG SLEEP_GLB_CFG;
REG_SLEEP_CNT SLEEP_CNT;
UINT32 rsv_43AC[5];
REG_TX_SYNC_OPT TX_SYNC_OPT;
REG_O3_O4_PLOAMU_CTRL O3_O4_PLOAMU_CTRL;
REG_TOD_1PPS_WD_CTRL TOD_1PPS_WD_CTRL;
}gpon_mac_reg_REGS, *Pgpon_mac_reg_REGS;
extern Pgpon_mac_reg_REGS g_gpon_mac_reg_BASE;
# 6 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_mac_reg_c_header.h" 2
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_reg.h" 2
# 5 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_dev.h" 2
# 1 "include/ecnt_hook/ecnt_hook_pon_phy.h" 1
# 140 "include/ecnt_hook/ecnt_hook_pon_phy.h"
enum {
XPON_PHY_API_TYPE_GET = 0,
XPON_PHY_API_TYPE_SET,
};
typedef enum XponPhy_Mode_e{
PHY_EPON_CONFIG,
PHY_GPON_CONFIG,
PHY_UNKNOWN_CONFIG,
} Xpon_Phy_Mode_t ;
typedef enum PHY_Los_Status_e{
PHY_LINK_STATUS_LOS,
PHY_LINK_STATUS_READY,
PHY_LINK_STATUS_UNKNOWN,
}PHY_Los_Status_t;
typedef enum {
PHY_CALLBACK_XPON_STATE_NOTIFY =8,
} PHY_CbType_t ;
typedef enum {
PHY_TRANS_NO_ALARM = 0x0,
PHY_TRANS_TX_HIGH_POWER_ALARM = 0x01,
PHY_TRANS_TX_LOW_POWER_ALARM = 0x02,
PHY_TRANS_TX_HIGH_CUR_ALARM = 0x04,
PHY_TRANS_TX_LOW_CUR_ALARM = 0x08,
PHY_TRANS_RX_HIGH_POWER_ALARM = 0x10,
PHY_TRANS_RX_LOW_POWER_ALARM = 0x20
} ENUM_PhyTransAlarm ;
typedef enum {
PHY_ERR_CNT_CLR = 0x01,
PHY_BIP_CNT_CLR = 0x02,
PHY_RXFRAME_CNT_CLR = 0x04,
PHY_TXFRAME_CNT_CLR = 0x08,
PHY_EPON_ERR_CNT_CLR = 0x10
} ENUM_PhyCounterClr;
typedef enum {
PHY_GUARD_BIT_NUM_EN = 0x01,
PHY_PRE_T1_NUM_EN = 0x02,
PHY_PRE_T2_NUM_EN = 0x04,
PHY_PRE_T3_PAT_EN = 0x08,
PHY_T3_O4_PRE_EN = 0x10,
PHY_T3_O5_PRE_EN = 0x20,
PHY_EXT_BUR_MODE_EN = 0x40,
PHY_OPER_RANG_EN = 0x80,
PHY_DIS_SCRAM_EN = 0x200,
PHY_EXTB_LENG_SEL_EN = 0x100
} ENUM_GponPreb_T ;
enum ECNT_XPON_PHY_SUBTYPE {
ECNT_XPON_PHY_API,
};
typedef struct
{
ushort temprature;
ushort supply_voltage;
ushort tx_current;
ushort tx_power;
ushort rx_power;
}PHY_TransParam_T, *PPHY_TransParam_T;
typedef struct{
PHY_TransParam_T params;
uint alarms;
} PHY_Trans_Status_t;
typedef struct{
Xpon_Phy_Mode_t mode ;
int txEnable;
} PHY_Mode_Cfg_t;
typedef struct
{
uint correct_bytes;
uint correct_codewords;
uint uncorrect_codewords;
uint total_rx_codewords;
uint fec_seconds;
}PHY_FecCount_T, *PPHY_FecCount_T;
typedef struct
{
uint frame_count_low;
uint frame_count_high;
uint lof_counter;
}PHY_FrameCount_T, *PPHY_FrameCount_T;
typedef struct{
uint delimiter ;
unchar guard_time ;
} PHY_GPON_Delimiter_Guard_t;
typedef struct
{
unchar trans_tx_sd_inv_status;
unchar trans_burst_en_inv_status;
unchar trans_tx_fault_inv_status;
}PHY_TransConfig_T, *PPHY_TransConfig_T;
typedef struct
{
unchar guard_bit_num;
unchar preamble_t1_num;
unchar preamble_t2_num;
unchar preamble_t3_pat;
unchar t3_O4_preamble;
unchar t3_O5_preamble;
unchar extend_burst_mode;
unchar oper_ranged_st;
unchar dis_scramble;
unchar extb_length_sel;
uint mask;
}PHY_GponPreb_T, *PPHY_GponPreb_T;
typedef struct xpon_phy_api_data_s {
int api_type ;
unsigned int cmd_id ;
int ret ;
union{
int * data ;
PHY_Mode_Cfg_t * phy_mode_cfg ;
PHY_Trans_Status_t * trasn_status ;
PHY_FecCount_T * rx_fec_cnt ;
PHY_FrameCount_T * rx_frame_cnt ;
PHY_GPON_Delimiter_Guard_t * delimiter_guard ;
PHY_TransConfig_T * tx_trans_cfg ;
PHY_GponPreb_T * gpon_preamble ;
PHY_GponPreb_T * gpon_ex_preamble;
void * raw ;
};
}xpon_phy_api_data_t;
# 9 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_dev.h" 2
typedef struct {
ushort gemPortId ;
unchar isValid ;
unchar isEncrypted ;
} GPONDEV_GemPortInfo_T ;
typedef struct {
REG_G_AES_ACTIVE_KEY0 aesActiveKey0;
REG_G_AES_ACTIVE_KEY1 aesActiveKey1;
REG_G_AES_ACTIVE_KEY2 aesActiveKey2;
REG_G_AES_ACTIVE_KEY3 aesActiveKey3;
} GPON_DEV_AES_ACTIVE_KEY_T;
typedef struct {
REG_G_AES_SHADOW_KEY0 aesShadowKey0;
REG_G_AES_SHADOW_KEY1 aesShadowKey1;
REG_G_AES_SHADOW_KEY2 aesShadowKey2;
REG_G_AES_SHADOW_KEY3 aesShadowKey3;
} GPON_DEV_AES_SHADOW_KEY_T;
typedef enum {
GPON_TCONT_INVALID = 0,
GPON_TCONT_VALID,
} GPON_TCONT_t ;
typedef enum {
GPON_TCONT_CMD_SUCCESS = 0,
GPON_TCONT_CMD_FAIL,
} GPON_TCONT_CMD_RESULT_t ;
typedef enum{
GEMPORT_RX_FRAME_CNT = 0,
GEMPORT_RX_PL_BYTE_CNT,
GEMPORT_TX_FRAME_CNT,
GEMPORT_TX_PL_BYTE_CNT,
}GPON_GEMPORT_STATS_TYPE_t;
typedef enum {
GPON_TCONT_READ = 0,
GPON_TCONT_WRITE,
} GPON_TCONT_CMD_t ;
typedef enum {
GPON_AES_KEY_SWITCH_BY_SW_DONE_PROCESSING = 0,
GPON_AES_KEY_SWITCH_BY_SW_DONE,
} GPON_AES_KEY_SWITCH_BY_SW_DONE_t ;
# 106 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_dev.h"
void select_xpon_wan(Xpon_Phy_Mode_t mode);
int gpon_dev_init(void) ;
int gponDevGetPloamMsg(PLOAM_RAW_General_T *pPloamMsg) ;
int gponDevSendPloamMsg(PLOAM_RAW_General_T *pPloamMsg, uint times) ;
int gponDevSet1PPSInt(XPON_Mode_t mode) ;
int gponDevSetNewTod(uint superframe, uint sec, uint nanosec) ;
int gponDevGetCurrentTod(uint *pSec, uint *pNanosec) ;
int gponDevGetNewTod(uint *pNewSec, uint *pNewNanoSec);
int gponDevGetTodSwitchTime(uint *spf);
int gponDevEnable1PPSInt(XPON_Mode_t mode) ;
int gponDevSetSerialNumber(unchar *sn) ;
void gponDevResetGemInfo(void) ;
int gponDevGetSuperframe(uint *counter);
int gponDevGetGemInfo(ushort gemPortId, unchar *pValid, unchar *pEncrypted) ;
int gponDevSetGemInfo(ushort gemPortId, unchar isValid, unchar isEncrypted) ;
int gponDevDumpGemInfo(void);
int gponDevGemMibTablesInit(void) ;
int gponDevUpdateGemMibIdxTable(unchar addr, ushort data) ;
int gponDevGetGemPortCounter(ushort gemPortId, GPON_GEMPORT_STATS_TYPE_t type, uint *pHData, uint *pLData) ;
void gponDevResetAllocId(void) ;
int gponDevIsChannelValid(unchar channel) ;
void gpon_dev_reset_GPON_MAC(void);
int gponDevSetSniffMode(GPON_DEV_SNIFFER_MODE_T *sniffer);
GPON_AES_KEY_SWITCH_BY_SW_DONE_t gponDevSetAesKeySwitchBySw(void);
int gponDevSwReset(void);
int gponDevSwResync(void);
int gponDevSetDbaBackdoor(XPON_Mode_t enable);
int gponDevSetDbaBackdoorTotal(uint total_buf_size);
int gponDevSetDbaBackdoorGreen(uint green_size);
int gponDevSetDbaBackdoorYellow(uint yellow_size);
int gponDevGetDbaBackdoor(GPON_DEV_DBA_BACKDOOR_T *dba_backdoor);
int gponDevSetDbaSlightModify(XPON_Mode_t enable);
int gponDevSetDbaSlightModifyTotal(ushort total_size);
int gponDevSetDbaSlightModifyGreen(ushort green_size);
int gponDevSetDbaSlightModifyYellow(ushort yellow_size);
int gponDevGetDbaSlightModify(GPON_DEV_SLIGHT_MODIFY_T *dba_slight_modify);
int gponDevSetBwmStopTimeInt(XPON_Mode_t enable) ;
int gponDevSetTx4bytesAlign(XPON_Mode_t enable);
int gponDevGetTx4bytesAlign(XPON_Mode_t *enable);
int gponDevGetTxSync(unchar *tx_sync);
int gponDevGetO3O4PloamCtrl(GPON_SW_HW_SELECT_T *sel);
int gponDevSetO3O4PloamCtrl(GPON_SW_HW_SELECT_T sel);
int gponDevSetFilterUpstreamOverheadPLOAM(XPON_Mode_t enable);
int gponDevGetFilterUpstreamOverheadPLOAM(XPON_Mode_t *enable);
int gponDevSetFilterExtBurstLengthPLOAM(XPON_Mode_t enable);
int gponDevGetFilterExtBurstLengthPLOAM(XPON_Mode_t *enable);
int gponDevSet1ppsHighWidth(uint width);
int gponDevGet1ppsHighWidth(uint *width);
int gponDevSetSendPloamuWaitMode(GPON_DEV_SEND_PLOAMU_WAIT_MODE_T mode);
int gponDevGetSendPloamuWaitMode(GPON_DEV_SEND_PLOAMU_WAIT_MODE_T *mode);
int gponDevSetTodClkPeriod(ushort period);
int gponDevSetIntMask(uint mask);
int gponDevGetIntMask(uint *mask);
GPON_TCONT_CMD_RESULT_t gponDevSetTCont(GPON_TCONT_t isValid, int tcont_index, ushort allocId);
GPON_TCONT_CMD_RESULT_t gponDevGetTCont(GPON_TCONT_t *isValid, int tcont_index, ushort *allocId);
int gponDevDisableTCont(ushort allocId) ;
int gponDevEnableTCont(ushort allocId) ;
int gponDevDumpTcontInfo(void);
int gponDevDumpCsr(void);
void gpon_dev_init_reset(void) ;
int gponDevDeactiveOnu(void) ;
int gponDevSetEncryptKey(unchar *aesKey) ;
int gponDevGetEncryptKey(GPON_DEV_ENCRYPT_KEY_INFO_T * aesKey);
int gponDevSetKeySwithTime(uint counter) ;
int gponDevSetDBABlockSize(ushort blockSize) ;
int gponDevGetDBABlockSize(ushort *blockSize);
int gponDevHardwareDyingGasp(GPON_SW_HW_SELECT_T mode) ;
int gponDevGetDyingGaspMode(GPON_SW_HW_SELECT_T *mode);
int gponDevSetDyingGaspNum(uint num);
int gponDevGetDyingGaspNum(uint *num);
int gponDevSetIdleGemThreshold(ushort idle_gem_thld);
int gponDevgetIdleGemThreshold(ushort *idle_gem_thld);
int gponDevSetCounterType(GPON_COUNTER_TYPE_t type);
int gponDevGetCounterType(GPON_COUNTER_TYPE_t *type);
int gponDevSetResponseTime(ushort time);
int gponDevGetResponseTime(ushort *time);
int gponDevSetInternalDelayFineTune(unchar delay);
int gponDevGetInternalDelayFineTune(unchar *delay);
int gponDevClearSwCounter(void);
int gponDevClearHwCounter(void);
int gponDevMbiStop(XPON_Mode_t mode) ;
int gponDevCheckTContReg(ushort channelId, ushort allocId);
# 10 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_qos.h" 1
void gpon_save_cpu_protection(void);
void gpon_enable_cpu_protection(uint pkt_limit);
void gpon_restore_cpu_protection(void);
int gpon_qos_init(void) ;
int gpon_qos_deinit(void) ;
# 11 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_recovery.h" 1
# 66 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_recovery.h"
extern struct Gpon_Recovery_S gponRecovery;
struct Gem_Recovery_Info{
ushort gemPortId ;
ushort allocId ;
unchar channel;
unchar encryption;
ushort ani;
};
struct Gpon_Recovery_S{
int allocId[32];
struct Gem_Recovery_Info gemPort[256];
int gemPortNum;
int allocIdNum;
unchar dbgPrint;
};
static inline __attribute__((always_inline)) int clear_gemport_info(struct Gem_Recovery_Info* gemPortClear)
{
gemPortClear->allocId = (0xFF);
gemPortClear->gemPortId = (0xFFFF);
do{ if(gponRecovery.dbgPrint){ printk("%s [%d]: ", (__func__), 90); printk("Delete gemport success\n"); } }while(0);
return 0;
}
static inline __attribute__((always_inline)) int * find_backup_allocId(ushort allocId)
{
int i = 0;
int * tmp = ((void *)0);
for(i = 0; i < gponRecovery.allocIdNum; i++)
{
if(gponRecovery.allocId[i] == allocId)
{
tmp = &(gponRecovery.allocId[i]);
break;
}
}
return tmp;
}
static inline __attribute__((always_inline)) struct Gem_Recovery_Info * find_backup_gemport(ushort gemPort)
{
int i = 0;
struct Gem_Recovery_Info * tmp = ((void *)0);
for(i = 0; i < gponRecovery.gemPortNum; i++)
{
if(gponRecovery.gemPort[i].gemPortId == gemPort)
{
tmp = &(gponRecovery.gemPort[i]);
break;
}
}
return tmp;
}
# 136 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon_recovery.h"
int gpon_recovery_init(void);
int gpon_recover_backup_allocId(unsigned short allocId);
int gpon_recover_create_allocId(void);
int gpon_recover_delete_allocId(unsigned short allocId);
int gpon_recover_backup_gemport(struct Gem_Recovery_Info * bakGemportId);
int gpon_recover_create_gemport(void);
int gpon_recover_delete_gemport(unsigned short gemPortId);
int gpon_recover_get_real_channel(unsigned short allocId);
# 13 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h" 2
# 32 "/opt/tclinux_phoenix/modules/private/xpon/inc/gpon/gpon.h"
typedef struct {
REG_G_TCONT_ID_0_1 gponTCont0 ;
REG_G_TCONT_ID_2_3 gponTCont2 ;
REG_G_TCONT_ID_4_5 gponTCont4 ;
REG_G_TCONT_ID_6_7 gponTCont6 ;
REG_G_TCONT_ID_8_9 gponTCont8 ;
REG_G_TCONT_ID_10_11 gponTCont10 ;
REG_G_TCONT_ID_12_13 gponTCont12 ;
REG_G_TCONT_ID_14_15 gponTCont14 ;
} GPON_TCONT_T;
typedef struct {
unchar onu_id ;
unchar sn[(8)] ;
unchar PasswdLength;
unchar hexFlag;
unchar emergencystate;
unchar passwd[(10)] ;
unchar keyIdx ;
unchar key[(24)] ;
ushort omcc ;
unchar reiSeq ;
uint to1Timer ;
uint to2Timer ;
uint berInterval ;
uint eqd ;
uint byteDelay ;
unchar bitDelay ;
unchar eqdO4Offset ;
unchar eqdO5Offset ;
unchar t3PreambleOffset ;
unchar dvtPcpCheck ;
uint dvtPcpCounter ;
uint dvtPcpCheckErr ;
ushort onuResponseTime;
unchar internalDelayFineTune ;
unchar dis_ranging_in_o5;
ushort idle_gem_thld;
GPON_COUNTER_TYPE_t counter_type;
uint sleep_count;
uint phy_psync_to_sof_delay;
GPON_DEV_SNIFFER_MODE_T sniffer_mode;
QDMA_RxRateLimitCfg_T wan_rxRateLimitCfg;
QDMA_RxRateLimitSet_T wan_rxRateLimit;
QDMA_RxRateLimitCfg_T lan_rxRateLimitCfg;
QDMA_RxRateLimitSet_T lan_rxRateLimit;
unchar sr_blk_size;
GPON_TCONT_T tconts;
REG_G_PLOu_GUARD_BIT gponGuardBit ;
REG_G_PLOu_DELM_BIT gponDelmBit ;
REG_G_PLOu_PRMBL_TYPE1_2 gponPrmblType ;
REG_G_PLOu_PRMBL_TYPE3 gponT3Prmbl ;
REG_G_PRE_ASSIGNED_DLY gponPreAssignDly ;
struct {
unchar isRequestKey : 1 ;
unchar isTodUpdate : 1 ;
unchar dvtGponLosFlag : 1 ;
unchar eqdOffsetFlag : 1 ;
unchar preambleFlag : 1 ;
unchar sniffer : 1 ;
unchar hotplug : 1 ;
unchar isPloamFilter : 1 ;
unchar isBwmStopTimeErrInt : 1 ;
} flags ;
} GPON_Config_T ;
typedef struct {
unsigned int bip_cnt_val;
ENUM_GponState_t state ;
struct timer_list to1_timer ;
struct timer_list to2_timer ;
struct timer_list ber_timer ;
ploam_recv_handler_t ploamRecvHandler[(0x19)] ;
PLOAM_RAW_General_T prePloamMsg ;
GPON_Config_T gponCfg ;
spinlock_t act_lock ;
spinlock_t mac_reset_lock ;
spinlock_t swReset_lock ;
unchar typeBOnGoing : 1 ;
unchar disableSnFlag : 1 ;
unchar emergencystate : 1 ;
wait_queue_head_t wq_gpon_reset_done ;
struct Gpon_Recovery_S* pGponRecovery;
} GPON_GlbPriv_T ;
typedef enum {
GPON_DEV_PLAIN_RESET ,
GPON_DEV_RESET_WITH_FE_RESET ,
} GPON_DEV_RESET_TYPE_t;
int gpon_disable_with_option(GPON_DEV_RESET_TYPE_t reset_type);
static inline __attribute__((always_inline)) int gpon_disable(void)
{
return gpon_disable_with_option(GPON_DEV_PLAIN_RESET);
}
int gpon_enable(void) ;
void prepare_gpon(void);
void gpon_isr(void) ;
void gpon_detect_los_lof(void) ;
void gpon_detect_phy_ready(void) ;
void gpon_ber_interval_expires(unsigned long arg) ;
void gpon_recv_ploam_message(void);
void schedule_gpon_dev_reset(GPON_DEV_RESET_TYPE_t type);
void gpon_stop_timer(void);
# 17 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/omci_oam_monitor.h" 1
# 38 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/omci_oam_monitor.h"
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/../pwan/xpon_netif.h" 1
# 39 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/omci_oam_monitor.h" 2
typedef enum{
MONITOR_STOP ,
MONITOR_RUNNING ,
MONITOR_DONE ,
}OMCI_OAM_Monitor_State_t ;
typedef struct {
unsigned int max_diff_cnt ;
unsigned int rx_pkt_lmt ;
unsigned int timer_interval;
unsigned int max_wait_time ;
unsigned int max_timeout_cnt ;
}Omci_Oam_Monitor_Params_t;
typedef struct {
Omci_Oam_Monitor_Params_t gpon_params;
Omci_Oam_Monitor_Params_t epon_params;
unsigned char pkt_ever_arrived;
unsigned int timeout_cnt;
unsigned int diff_idx;
unsigned int pkt_cnt_diff[30];
OMCI_OAM_Monitor_State_t run_state;
unsigned long last_pkt_cnt;
unsigned long total_diff;
struct timer_list xponCntRxPkt;
} Omci_Oam_Monitor_t;
void stop_omci_oam_monitor(void) ;
void start_omci_oam_monitor(void);
void omci_oam_monitor_init(Omci_Oam_Monitor_t * monitor_data_ptr) ;
int xpon_omai_oam_monitor_read_proc(char *buf, char **start, off_t off, int count, int *eof, void *data);
int xpon_omci_oam_monitor_write_proc(struct file *file, const char *buffer, unsigned long count, void *data);
# 19 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_debug.h" 1
# 36 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_debug.h"
extern int xpon_mac_print_open;
extern int drop_print_flag;
# 20 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h" 2
# 49 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h"
typedef enum {
PON_LINK_STATUS_OFF,
PON_LINK_STATUS_GPON,
PON_LINK_STATUS_EPON,
} PON_WanLinkStatus_t ;
typedef enum {
PON_ONU_TYPE_UNKNOWN =0,
PON_ONU_TYPE_SFU,
PON_ONU_TYPE_HGU,
} PON_OnuType_t ;
typedef enum{
PON_OLT_FIBERHOME_551601 = 1,
PON_OLT_DASAN ,
}PON_OltType_t;
# 79 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h"
typedef struct {
unchar isGponHwFlag : 1 ;
unchar isEponHwFlag : 1 ;
unchar resv : 6;
}PON_DyingGasp_t ;
typedef enum{
XPON_PHY_SYNC_STATUS_NOT_START ,
XPON_PHY_SYNC_STATUS_SYNCING ,
XPON_PHY_SYNC_STATUS_SYNCED ,
XPON_PHY_SYNC_STATUS_STOPPED ,
} XponPhy_Mode_Detect_Status_t ;
typedef enum XPON_DAEMON_Job_type_s {
XPON_DAEMON_JOB_GET_PHY_PARAM ,
XPON_DAEMON_JOB_GPON_DEV_RESET ,
XPON_DAEMON_JOB_EPON_DEV_RESET ,
XPON_DAEMON_JOB_FE_RESET ,
XPON_DAEMON_JOB_QUIT,
} XPON_DAEMON_Job_type_t ;
typedef enum XPON_DAEMON_Job_Priority_s {
XPON_DAEMON_JOB_PRIORITY_LOW ,
XPON_DAEMON_JOB_PRIORITY_HIGH ,
} XPON_DAEMON_Job_Priority_t ;
typedef struct XPON_DAEMON_Job_data_s{
XPON_DAEMON_Job_type_t id ;
XPON_DAEMON_Job_Priority_t priority ;
void * private_data ;
}XPON_DAEMON_Job_data_t;
typedef struct XPON_DAEMON_Job_s{
XPON_DAEMON_Job_data_t data ;
volatile unchar valid ;
}XPON_DAEMON_Job_t;
typedef struct XPON_DAEMON_Job_Queue_s {
XPON_DAEMON_Job_t jobs[16] ;
spinlock_t lock ;
volatile uint in_index ;
volatile uint out_index ;
uint drop_counter ;
}XPON_DAEMON_Job_Queue_t ;
typedef struct XPON_Daemon_s{
wait_queue_head_t wq ;
struct task_struct * task ;
XPON_DAEMON_Job_Queue_t job_queue ;
} XPON_Daemon_t;
typedef struct {
unchar should_detect_stop ;
struct timer_list delay_start_detect_timer ;
struct timer_list check_sync_timer ;
unchar is_fix_mode : 1;
Xpon_Phy_Mode_t detect_mode : 2;
XponPhy_Mode_Detect_Status_t detect_status : 2;
Xpon_Phy_Mode_t working_mode : 2 ;
unchar calibrating : 1 ;
PHY_Los_Status_t phy_link_status : 3 ;
unchar trans_status_refresh_pending ;
struct timer_list trans_status_refresh_timer ;
spinlock_t trans_params_lock;
PHY_TransParam_T trans_params;
} XPON_PHY_Data_t;
typedef enum {
PON_WAN_START,
PON_WAN_STOP
} PON_StartMode_t ;
typedef enum {
XPON_ROUGE_STATE_FALSE,
XPON_ROUGE_STATE_TRUE,
} PON_Rouge_Status_t ;
typedef struct {
PON_WanLinkStatus_t sysLinkStatus ;
PON_WanLinkStatus_t sysPrevLink ;
PON_StartMode_t sysStartup ;
PWAN_GlbPriv_T wanPrivData ;
MCS_GlbPriv_T mcsPrivData ;
GPON_GlbPriv_T gponPrivData ;
EPON_GlbPriv_T eponPrivData ;
PON_DyingGasp_t dyingGaspData ;
PON_OnuType_t sysOnuType ;
PON_OltType_t sysOltType ;
ushort debugLevel ;
XPON_PHY_Data_t phyCfg ;
XPON_Daemon_t xpon_daemon ;
PON_Rouge_Status_t rogue_state ;
unchar fe_reset_happened : 1 ;
unchar ponMacPhyReset : 1 ;
unchar isUpDataTraffic : 1 ;
unchar isUpOmciOamTraffic: 1 ;
Omci_Oam_Monitor_t Omci_Oam_Monitor ;
unsigned long onlineStartTime;
} PON_SysData_T ;
typedef struct {
uint rx_rgst_cnt;
uint tx_rgst_cnt;
uint dscvr_gate_cnt;
uint tx_rgst_ack_cnt;
unsigned long jiffies_val;
} epon_stat;
typedef struct {
uint wan_mode_val;
uint gpon_eqd_val;
uint epon_rx_mpcp_cnt;
uint epon_sync_time_val;
uint gpon_resp_time_val;
uint gpon_ploam_stat_val;
uint epon_tx_mpcp_rgst_cnt;
} xpon_regs;
typedef enum {
GPON_DISABLE_SN_REPORT_O7 =0,
GPON_DISABLE_SN_SET_EMERGNCE_STATE,
GPON_PHY_READY_REPORT_O7,
GPON_SET_CONNECTION_REPORT_O7,
GPON_OMCI_SET_EMERGNCE_STATE,
}GPON_Emergence_Record;
typedef struct
{
ktime_t time;
unchar isHappen;
unchar event;
unchar emergenceState;
unchar reserve;
}GPON_Emergence_Info;
typedef struct
{
int onu_type_id;
u32 chipid;
}chipInformation;
typedef enum {
MT7520S = 1,
MT7520,
MT7520G,
MT7525,
MT7525G,
EN7521S,
EN7521F,
EN7526F,
EN7526D,
EN7526G,
EN751221,
} chipId_t ;
extern PON_SysData_T *gpPonSysData ;
extern GPON_GlbPriv_T *gpGponPriv ;
extern EPON_GlbPriv_T *gpEponPriv ;
extern PWAN_GlbPriv_T *gpWanPriv ;
extern MCS_GlbPriv_T *gpMcsPriv ;
extern XPON_PHY_Data_t *gpPhyData ;
# 292 "/opt/tclinux_phoenix/modules/private/xpon/inc/common/drv_global.h"
extern struct net init_net;
void gpon_show_emergence_info(void);
void gpon_record_emergence_info(unchar event);
# 89 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/inc/epon/fe_reg.h" 1
# 92 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/phy_if_wrapper.h" 1
# 44 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/phy_if_wrapper.h"
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/drv_global.h" 1
# 45 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/phy_if_wrapper.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/drv_types.h" 1
# 46 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/phy_if_wrapper.h" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/xpondrv.h" 1
# 47 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/common/phy_if_wrapper.h" 2
void XPON_PHY_SET_MODE(Xpon_Phy_Mode_t mode);
void check_sync_result(unsigned long arg);
void try_set_phy_mode(unsigned long arg);
static inline __attribute__((always_inline)) void schedule_phy_mode_auto_detection(void)
{
gpPhyData->detect_status = XPON_PHY_SYNC_STATUS_NOT_START ;
{ mod_timer(&gpPhyData->delay_start_detect_timer, (jiffies + ((gpPhyData->delay_start_detect_timer.data*100)/1000))) ; };
}
static inline __attribute__((always_inline)) int should_stop_phy_mode_detect(void)
{
return ( XPON_PHY_SYNC_STATUS_SYNCED == gpPhyData->detect_status ||
XPON_PHY_SYNC_STATUS_STOPPED == gpPhyData->detect_status );
}
static inline __attribute__((always_inline)) void stop_phy_mode_detect(void)
{
if(XPON_PHY_SYNC_STATUS_SYNCING == gpPhyData->detect_status){
gpPhyData->detect_status = XPON_PHY_SYNC_STATUS_STOPPED ;
}
}
static inline __attribute__((always_inline)) int CALL_PON_PHY_ENCT_HOOK(struct xpon_phy_api_data_s * data)
{
if(ECNT_HOOK_ERROR == __ECNT_HOOK(ECNT_XPON_PHY, ECNT_XPON_PHY_API, (struct ecnt_data * )data) ){
panic("ECNT_HOOK_ERROR occur with cmd_id:0x%x\n", data->cmd_id);
}
if((-1) == data->ret){
panic("No such API with type:%d, cmd_id:0x%x\n", data->api_type, data->cmd_id);
}
return data->ret;
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_API(uint cmd_id, void * in_data)
{
struct xpon_phy_api_data_s phy_data = {0};
phy_data.api_type = XPON_PHY_API_TYPE_SET;
phy_data.cmd_id = cmd_id ;
phy_data.raw = in_data ;
return CALL_PON_PHY_ENCT_HOOK(&phy_data);
}
static inline __attribute__((always_inline)) int XPON_PHY_GET_API(uint cmd_id, void * in_data)
{
struct xpon_phy_api_data_s phy_data = {0};
phy_data.api_type = XPON_PHY_API_TYPE_GET;
phy_data.cmd_id = cmd_id ;
phy_data.raw = in_data ;
return CALL_PON_PHY_ENCT_HOOK(&phy_data);
}
static inline __attribute__((always_inline)) int XPON_PHY_GET(uint cmd_id)
{
return XPON_PHY_GET_API(cmd_id, ((void *)0));
}
static inline __attribute__((always_inline)) int XPON_PHY_SET(uint cmd_id)
{
return XPON_PHY_SET_API(cmd_id, ((void *)0));
}
static inline __attribute__((always_inline)) int XPON_PHY_FW_READY_ENABLE(void)
{
int data = (1);
return XPON_PHY_SET_API(0x0005, &data);
}
static inline __attribute__((always_inline)) int XPON_PHY_FW_READY_DISABLE(void)
{
int data = (0);
return XPON_PHY_SET_API(0x0005, &data);
}
static inline __attribute__((always_inline)) void XPON_PHY_COUNTER_CLEAR(int counter_mask)
{
XPON_PHY_SET_API(0x0009, &counter_mask);
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_BIT_DELAY(int bit_delay)
{
return XPON_PHY_SET_API(0x000a, &bit_delay );
}
static inline __attribute__((always_inline)) void XPON_PHY_TX_POWER_CONFIG(int enable)
{
XPON_PHY_SET_API(0x000b, &enable );
}
static inline __attribute__((always_inline)) void XPON_PHY_GET_TRANS_STATUS(PHY_Trans_Status_t * phy_status)
{
XPON_PHY_GET_API(0x801c, phy_status );
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_ROGUE(int mode)
{
return XPON_PHY_SET_API(0x0013, &mode);
}
static inline __attribute__((always_inline)) int XPON_PHY_TX_ENABLE(void)
{
int data = (1);
return XPON_PHY_SET_API(0x000c, &data);
}
static inline __attribute__((always_inline)) int XPON_PHY_TX_DISABLE(void)
{
int data = (0);
return XPON_PHY_SET_API(0x000c, &data);
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_RX_FEC(int enable)
{
return XPON_PHY_SET_API(0x0014, &enable);
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_TX_BURST_MODE(int mode)
{
return XPON_PHY_SET_API(0x000d, &mode);
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_TRANS_RX(int mode)
{
return XPON_PHY_SET_API(0x0015, &mode);
}
static inline __attribute__((always_inline)) int XPON_PHY_SET_EPON_TS_CONTINUE_MODE(int mode)
{
return XPON_PHY_SET_API(0x0017, &mode);
}
# 95 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/../../inc/xmcs/xmcs_if.h" 1
# 96 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
# 1 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/ledcetrl.h" 1
# 74 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/ledcetrl.h"
typedef enum e_ledbutton_ioctl
{
LEDBUTTON_IOCTL_SELECT = (((4U) << (((0 +8)+8)+13)) | (((0x99)) << (0 +8)) | (((0)) << 0) | (((((sizeof(int[2]) == sizeof(int[2][1]) && sizeof(int[2]) < (1 << 13)) ? sizeof(int[2]) : __invalid_size_argument_for_IOC))) << ((0 +8)+8))),
LEDBUTTON_MAX_IOCTL
} E_LEDBUTTON_IOCTL;
# 270 "/opt/tclinux_phoenix/linux-2.6.36/arch/mips/include/asm/tc3162/ledcetrl.h"
void ledInit(void);
void ledSysInitOn(void);
void ledSysInitOff(void);
void ledTurnOn(uint8 led_no);
void ledTurnOff(uint8 led_no);
uint8 ledGetMode(uint8 led_no);
void ledSetMode(uint8 led_no, uint8 mode);
uint8 ledGetGpio(uint8 led_no);
void led_oen(uint8 led_no);
void led_ien(uint8 led_no);
int exModeMDIOGpioConf(uint8 mdc_gpio_num,uint8 mdio_gpio_num);
void exModeMDIOGpioQuery(uint8 * mdc_gpio_num,uint8 * mdio_gpio_num);
uint32 exModeMDIORead(uint32 reg);
void exModeMDIOWrite(uint32 reg, uint32 data);
void ledMtnInit(void);
int ledTaskInit(void);
void ledTask(void);
# 98 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c" 2
extern int led_xpon_status;
PEPON_MAC_REGS g_EPON_MAC_BASE = ((void *)0);
u8 isSfu = 1;
spinlock_t epon_reg_lock;
extern void eponPhyReadyRealStart(void);
int eponReadyFlag = 0;
extern __u8 g_silence_time;
static struct timer_list gEponTypeBTimer;
# 133 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
atomic_t eponMacRestart_flag = { (0) };
# 144 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
static int eponCmdInit(void);
static void eponCmdExit(void);
static int doEponMpcp(int argc, char *argv[], void *p);
static int doEponSet(int argc, char *argv[], void *p);
static int doEponDebuglevel(int argc, char *argv[], void *p);
static int doEponDeregister(int argc, char *argv[], void *p);
int eponMacOpen(struct inode *inode, struct file *filp);
void attack_protect_set(int active, int mode);
static int doEponRegtest(int argc, char *argv[], void *p);
static int doEponHwtest(int argc, char *argv[], void *p);
static int doEponRegDefCheck(int argc, char *argv[], void *p);
static int doEponSetTmdrftflag(int argc, char *argv[], void *p);
static int doEponDumpAllReg(int argc, char *argv[], void *p);
static int doEponSetProbe(int argc, char *argv[], void *p);
static int doEponResetMac(int argc, char *argv[], void *p);
static int doEponResetWan(int argc, char *argv[], void *p);
static int doEponSetRgstAckType(int argc, char *argv[], void *p);
static int doEponSetIgnoreIntMask(int argc, char *argv[], void *p);
static int doEponSetDefLlidMask(int argc, char *argv[], void *p);
static int doEponSetLlidThrshldNum(int argc, char *argv[], void *p);
static int doEponSetLlidQueThrshld(int argc, char *argv[], void *p);
extern __u8 g_silence_time;
# 192 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
struct tasklet_struct eponMacMpcpDscvGateTask;
struct tasklet_struct eponStartTask;
static const cmds_t eponMpcpCmds[] = {
{"deregister", doEponDeregister, 0x02, 1, "<deregister>"},
{"tmdrftflag", doEponSetTmdrftflag, 0x02, 1, "<tmdrftflag>"},
{"setprobe", doEponSetProbe, 0x02, 1, "<probe>"},
{"wanreset", doEponResetWan, 0x02, 0, "<wanreset>"},
{"macreset", doEponResetMac, 0x02, 0, "<macreset>"},
{"rgstack", doEponSetRgstAckType, 0x02, 0, "<llid,ack>"},
{"ignoreIntMask", doEponSetIgnoreIntMask, 0x02, 0, "<mask(32bit)>"},
{"setDefLlidMask", doEponSetDefLlidMask, 0x02, 1, "<mask(16bit)>"},
{((void *)0), ((void *)0), 0x10, 0, ((void *)0)},
};
static const cmds_t eponSetCmds[] = {
{"llidThrshldNum", doEponSetLlidThrshldNum, 0x02, 1, "<llidIndex(0~7)> <llidThrshldNum(1~3)>"},
{"llidQueThrshld", doEponSetLlidQueThrshld, 0x02, 1, "<llidIndex(0~7)> <thrshldIndex(0~2)> <q0 value> ... <q7 value>"},
{((void *)0), ((void *)0), 0, 0, ((void *)0)},
};
static const cmds_t eponHwtestCmds[] = {
{"regtest", doEponRegtest, 0x02, 1, "<times>"},
{"regDefCheck", doEponRegDefCheck, 0x02, 0, "<regDefCheck>"},
{((void *)0), ((void *)0), 0x10, 0, ((void *)0)},
};
static const cmds_t eponCmds[] = {
{"debuglevel", doEponDebuglevel, 0x02, 0, "<debuglevel>"},
{"dumpallreg", doEponDumpAllReg, 0x02, 0, "<dumpallreg>"},
{"mpcp", doEponMpcp, 0x12, 0, ((void *)0)},
{"set", doEponSet, 0x12, 0, ((void *)0)},
{"hwtest", doEponHwtest, 0x12, 0, ((void *)0)},
{((void *)0), ((void *)0), 0x10, 0, ((void *)0)},
};
eponMacHwtestReg_t *eponMacRegTable = ((void *)0);
# 253 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
__u32 eponDebugLevel = 1;
epon_t eponDrv;
# 274 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
int eponMacGetRegTblSize(void){
int i = 0;
if(((void *)0) == eponMacRegTable)
return 0;
while(eponMacRegTable[i].addr != ((void *)0))
i++;
return i;
}
static int doEpon(int argc, char *argv[], void *p)
{
return subcmd(eponCmds, argc, argv, p);
}
static int doEponMpcp(int argc, char *argv[], void *p)
{
return subcmd(eponMpcpCmds, argc, argv, p);
}
static int doEponSet(int argc, char *argv[], void *p)
{
return subcmd(eponSetCmds, argc, argv, p);
}
static int doEponDebuglevel(int argc, char *argv[], void *p){
if(argc < 2){
printk("\r\ndebugLevel = %u\n" , eponDebugLevel);
return 0;
}
eponDebugLevel = simple_strtoul(argv[1], ((void *)0), 16);
return 0;
}
static int doEponDumpAllReg(int argc, char *argv[], void *p){
eponMacDumpAllReg();
return 0;
}
static int doEponDeregister(int argc, char *argv[], void *p){
__u32 llidIndex;
if(argc < 2){
printk("\r\nderegister <LLID Index | 0-7>\n" );
return 0;
}
llidIndex = simple_strtoul(argv[1], ((void *)0), 16);
eponMpcpLocalDergstr((__u8)llidIndex);
return 0;
}
extern __u8 eponTimeDrftHandleFlag ;
static int doEponSetTmdrftflag(int argc, char *argv[], void *p){
if(argc < 2){
printk("\r\ntmdrftflag <0 | 1>\n" );
return 0;
}
if(simple_strtoul(argv[1], ((void *)0), 16) == 1){
eponTimeDrftHandleFlag = XPON_ENABLE;
}else{
eponTimeDrftHandleFlag = XPON_DISABLE;
}
return 0;
}
__u32 eponSetProbe = 0 ;
__u32 eponDefLlidMask = 1;
__u32 ignoreIntMask = 0xffffffff;
static int doEponSetProbe(int argc, char *argv[], void *p){
if(argc < 2){
printk("\r\n set epon mac probe after reset\n" );
printk("\r\n current defalt probe :0x%x\n",eponSetProbe);
return 0;
}
eponSetProbe = simple_strtoul(argv[1], ((void *)0), 16);
printk("set probe bfb66100 :0x%x",eponSetProbe);
return 0;
}
static int doEponSetDefLlidMask(int argc, char *argv[], void *p){
if(argc < 2){
printk("\r\n set default llid mask after reset\n" );
printk("\r\n current defalt probe :0x%x\n",eponDefLlidMask);
return 0;
}
eponDefLlidMask = simple_strtoul(argv[1], ((void *)0), 16);
printk("set default llid mask :0x%x",eponDefLlidMask);
return 0;
}
static int doEponResetMac(int argc, char *argv[], void *p){
eponMacRestart();
dbgtoMem(4, "epon mac reset");
return 0;
}
static int doEponResetWan(int argc, char *argv[], void *p){
eponWanResetWithChannelRetire();
dbgtoMem(4, "epon wan reset");
return 0;
}
static int doEponSetRgstAckType(int argc, char *argv[], void *p){
__u8 llidIndex=0,ack=1;
if(argc < 3){
printk("\r\n rgstack <llid:0~7> <flag:0(nack)/1(ack)> \n" );
printk(" set status(ack/nack) of Register_ack. \n");
return 0;
}
llidIndex = simple_strtoul(argv[1], ((void *)0), 16);
ack = simple_strtoul(argv[2], ((void *)0), 16);
if(ack !=0)
ack =1;
eponMpcpSetDiscvRgstAck(llidIndex,ack);
printk( "\r\n rgstack llid=%x,ack=%x\n",llidIndex,ack);
return 0;
}
static int doEponSetIgnoreIntMask(int argc, char *argv[], void *p){
if(argc < 2){
printk("\r\n ignoreIntMask <mask>,current mask %x \n",ignoreIntMask);
return 0;
}
ignoreIntMask = simple_strtoul(argv[1], ((void *)0), 16);
printk("\r\n ignoreIntMask =%x\n",ignoreIntMask);
return 0;
}
static int doEponSetLlidThrshldNum(int argc, char *argv[], void *p)
{
__u8 llidIndex = 0;
__u8 num = 0;
if (argc < 3)
{
printk("Usage: llidThrshldNum <llidIndex(0~7)> <llidThrshldNum(1~3)>\n");
return -1;
}
llidIndex = (__u8)simple_strtoul(argv[1], ((void *)0), 10);
num = (__u8)simple_strtoul(argv[2], ((void *)0), 10);
eponSetLlidThrshldNum(llidIndex, num);
return 0;
}
static int doEponSetLlidQueThrshld(int argc, char *argv[], void *p)
{
__u8 queueIndex = 0;
__u8 llidIndex = 0;
__u8 thrshldIndex = 0;
__u16 queueThreshold[8] = {0};
__u8 i = 0;
int ret = 0;
eponQueueThreshold_t EponQThr;
if (argc < 11)
{
printk("Usage: llidQueThrshld <llidIndex(0~7)> <thrshldIndex(0~2)> <q0 value> ... <q7 value>\n");
return -1;
}
llidIndex = (__u8)simple_strtoul(argv[1], ((void *)0), 10);
thrshldIndex = (__u8)simple_strtoul(argv[2], ((void *)0), 10);
for (i =0; i<8; i++)
{
queueThreshold[i] = (__u16)simple_strtoul(argv[3+i], ((void *)0), 10);
}
for (queueIndex = 0; queueIndex< 8;queueIndex++)
{
if (queueThreshold[queueIndex] != 0)
{
EponQThr.channel = llidIndex;
EponQThr.queue = queueIndex;
EponQThr.thrIdx = thrshldIndex;
EponQThr.value = queueThreshold[queueIndex];
ret = epon_set_queue_threshold(&EponQThr) ;
if(ret < 0 ){
printk("doEponSetLlidQueThrshld error\n");
return -1;
}
}
}
return 0;
}
static int eponTod1ppsIntHandler(void){
return 0;
}
static int eponTodUpdtIntHandler(void){
return 0;
}
static int eponPtpMsgTxIntHandler(void){
return 0;
}
# 720 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
__u8 eponStaticRptEnable = 0;
extern atomic_t mpcpTmOutCnt;
# 758 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
void eponIsr( void )
{
__u32 isrValue;
__u32 isrValue2;
int ret = 0;
int index = 0;
dbgtoMem(4, "enter eponIsr");
isrValue = ioread32((void *)((&(g_EPON_MAC_BASE)->e_int_status)));
iowrite32(0xffffffff, (void *)((&(g_EPON_MAC_BASE)->e_int_status)));
if(eponDrv.hldoverEnable && (1 == eponDrv.typeBOnGoing))
return;
if(isrValue == 0){
dbgtoMem(1 , "eponIsr interrupt status is 0");
return ;
}
isrValue2 = ioread32((void *)((&(g_EPON_MAC_BASE)->e_int_status)));
if(isrValue2){
dbgtoMem(1, "eponIsr interrupt status 0x%x --> 0x%x, after clear",isrValue, isrValue2);
}
isrValue &= ignoreIntMask;
if(isrValue&(1<<13))
{
dbgtoMem(3, "EponIsr TIMEDRFT_INT isrValue=%X", isrValue);
isrValue |= (1<<13);
eponTimeDrftIntHandler(0);
}
if(isrValue&(1<<14))
{
dbgtoMem(3, "EponIsr MPCP_TIMEOUT_INT");
isrValue |= (1<<14);
eponMpcpTmOutIntHandler(0);
}
else
{
((&mpcpTmOutCnt)->counter = (0));
}
if (gpEponPriv->mpcpInterruptMode) {
if(isrValue&(1<<24))
{
dbgtoMem(3, "EponIsr REGISTER_REQ_DONE_INT");
eponMpcpRgstReqIntHandler();
}
if(isrValue&(1<<25))
{
dbgtoMem(3, "EponIsr REGISTER_ACK_DONE_INT");
eponMpcpRgstAckIntHandler();
}
}
for(index = 0 ; index < 8 ; index++)
{
if(isrValue & ((1<<1) << index) )
{
dbgtoMem(3, "EponIsr LLID%d_RCV_RGST_INT", index);
ret = eponMpcpRgstIntHandler(index);
if(ret <0){
dbgtoMem(1, "ERR: LLID%d_RCV_RGST_INT eponMpcpRgstIntHandler ret=%d", index, ret);
}
}
}
# 971 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
if(isrValue&(1<<0))
{
dbgtoMem(4, "EponIsr Receive DiscvGate Message");
isrValue |= (1<<0);
tasklet_schedule(&eponMacMpcpDscvGateTask);
}
if(isrValue&(1<<12))
{
dbgtoMem(3, "EponIsr TOD_1PPS_INT");
isrValue |= (1<<12);
ret = eponTod1ppsIntHandler();
if(ret <0){
dbgtoMem(1 , "ERR: TOD_1PPS_INT eponTod1ppsIntHandler ret=%d", ret);
}
}
if(isrValue&(1<<11))
{
dbgtoMem(3, "EponIsr TOD_UPDT_INT");
isrValue |= (1<<11);
ret = eponTodUpdtIntHandler();
if(ret <0){
dbgtoMem(1, "ERR: TOD_UPDT_INT eponTodUpdtIntHandler ret=%d", ret);
}
}
if(isrValue&(1<<10))
{
dbgtoMem(3, "EponIsr PTP_MSG_TX_INT");
isrValue |= (1<<10);
ret = eponPtpMsgTxIntHandler();
if(ret <0){
dbgtoMem(1, "ERR: PTP_MSG_TX_INT eponPtpMsgTxIntHandler ret=%d", ret);
}
}
if(isrValue&(1<<9))
{
dbgtoMem(3, "EponIsr GNT_BUF_OVRRUN_INT");
isrValue |= (1<<9);
ret = eponMpcpGntOvrRunIntHandler();
if(ret <0){
dbgtoMem(1, "ERR: GNT_BUF_OVRRUN_INT eponMpcpGntOvrRunHandler ret=%d", ret);
}
}
if(isrValue&(1<<15))
{
dbgtoMem(3, "EponIsr RPT_OVERINTVL_INT");
isrValue |= (1<<15);
ret = eponMpcpRptOvrIntvalIntHandler();
if(ret <0){
dbgtoMem(1, "ERR: RPT_OVERINTVL_INT eponMpcpRptOvrIntvalIntHandler ret=%d", ret);
}
}
return;
}
static void dbgPrintInit(void);
static void stringWrite(void);
static void stringRead(void);
static void dbgPrintTimer(void);
spinlock_t dbgPrintLock = (spinlock_t ) { { .rlock = { .raw_lock = { .lock = 0 }, } } };
unsigned long dbgPrintFlags;
struct SCREEM_PRINT
{
ktime_t time_info;
char mem[128];
}screenprint;
struct SCREEM_PRINT print_mem[256]={0};
char test [128] = {0};
static volatile struct SCREEM_PRINT *pWrite = ((void *)0);
static volatile struct SCREEM_PRINT *pRead = ((void *)0);
static char temp[128] = {0};
struct timer_list dbgPrint;
void dbgPrintQuit(void)
{
del_timer(&dbgPrint);
}
static void dbgPrintInit(void)
{
init_timer_key((&dbgPrint), ((void *)0), ((void *)0));
dbgPrint.function = dbgPrintTimer;
dbgPrint.expires = jiffies + (100 >> 1);
add_timer(&dbgPrint);
pWrite = print_mem;
pRead = print_mem;
}
static inline __attribute__((always_inline)) void print_time_information(ktime_t * time)
{
u32 nsec = time->tv.nsec % 1000;
u32 usec = (time->tv.nsec / 1000) % 1000;
u32 msec = (time->tv.nsec / 1000000) % 1000;
u32 sec = time->tv.sec % 60;
u32 min = time->tv.sec / 60;
printk("[%03dmin : %03ds : %03dms: %03dus : %03dns] ", min, sec, msec, usec, nsec);
}
static void stringWrite()
{
if (pWrite == (print_mem + 256)){
pWrite = print_mem;
}
if(strcmp(pWrite, &test)){
printk("Memory out.\n");
}
else
{
pWrite->time_info = ktime_get();
memcpy(pWrite->mem, temp, sizeof(temp));
pWrite++;
}
}
static void stringRead()
{
int i;
for (i = 0; i < 60; i++)
{
if(pRead == (print_mem + 256)){
pRead = print_mem;
}
if(strcmp(pRead->mem, test) == 0){
break;
}
print_time_information(&pRead->time_info);
printk("%s\n", pRead->mem);
__asm__ __volatile__( ".set push\n\t" ".set noreorder\n\t" ".set mips2\n\t" "sync\n\t" ".set pop" : : : "memory");
memset(pRead, 0, sizeof(screenprint));
pRead++;
}
}
static void dbgPrintTimer(void)
{
stringRead();
mod_timer(&dbgPrint, jiffies + (100 >> 1));
}
void dbgtoMem(__u32 debugLevel, char *fmt,...)
{
if (eponDebugLevel < debugLevel){
return;
}
do { do { ({ unsigned long __dummy; typeof(dbgPrintFlags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); dbgPrintFlags = _raw_spin_lock_irqsave(spinlock_check(&dbgPrintLock)); } while (0); } while (0);
va_list wp;
memset(temp, 0, 128);
__builtin_va_start(wp,fmt);
vsnprintf(temp, 128, fmt, wp);
stringWrite();
__builtin_va_end(wp);
spin_unlock_irqrestore(&dbgPrintLock, dbgPrintFlags);
}
extern typeof(dbgtoMem) dbgtoMem; static const char __kstrtab_dbgtoMem[] __attribute__((section("__ksymtab_strings"), aligned(1))) = "" "dbgtoMem"; static const struct kernel_symbol __ksymtab_dbgtoMem __attribute__((__used__)) __attribute__((section("__ksymtab" ""), unused)) = { (unsigned long)&dbgtoMem, __kstrtab_dbgtoMem };
int eponIrqRegister(void)
{
int err;
err = QDMA_API_REGISTER_HOOKFUNC(ECNT_QDMA_WAN, QDMA_CALLBACK_EPON_MAC_HANDLER, eponIsr);
if(err){
dbgtoMem(1, "ERR: eponIrqInit request_irq err=%d", err);
return err;
}
return 0;
}
void eponIrqUnregister(void)
{
QDMA_API_UNREGISTER_HOOKFUNC(ECNT_QDMA_WAN, QDMA_CALLBACK_EPON_MAC_HANDLER);
dbgtoMem(4, "eponIrqDinit QDMA_CALLBACK_EPON_MAC_HANDLER");
}
int getOnuMacAddr(__u8 *macAddr){
char flashMacAddr[6];
int i;
for (i=0; i<6; i++) {
flashMacAddr[i] = ( ((*(volatile unsigned long int *)(0xBFA10114) & 0x2)) ? ((ranand_read_byte != ((void *)0)) ? ranand_read_byte((flash_base + 0xff48 + i)) : -1) : (ReadSPIByte(flash_base + 0xff48 + i)) );
}
if( (flashMacAddr[0] == 0) && (flashMacAddr[1] == 0) && (flashMacAddr[2] == 0) &&
(flashMacAddr[3] == 0) && (flashMacAddr[4] == 0) && (flashMacAddr[5] == 0) )
printk("<6>" "The MAC address in flash is null!\n");
else
memcpy(macAddr, flashMacAddr, 6);
return 0;
}
int eponMacCheckMacCfg(void){
REG_e_mac_addr_cfg eponMacAddrCfgReg;
__u32 timers = 0;
while(timers < 100){
eponMacAddrCfgReg.Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_mac_addr_cfg)));
if(eponMacAddrCfgReg.Bits.mac_addr_rwcmd_done == 0){
break;
}
timers++;
}
if(timers == 100){
dbgtoMem(1, "ERR: eponMacSetMacAddr timers == 100");
return -1;
}
return 0;
}
int eponMacSetMacAddr(eponLlid_t *llidEntry_p)
{
REG_e_mac_addr_cfg eponMacAddrCfgReg;
dbgtoMem(4 , "enter eponMacSetMacAddr");
if(llidEntry_p == ((void *)0)){
dbgtoMem(1, "ERR: eponMacSetMacAddr llidEntry_p == NULL");
return -1;
}
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr timers == 100");
return -1;
}
eponMacAddrCfgReg.Raw = 0;
iowrite32(get32(llidEntry_p->macAddr+2), (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_value)));
eponMacAddrCfgReg.Bits.mac_addr_dw_idx = 0;
eponMacAddrCfgReg.Bits.mac_addr_llid_indx = llidEntry_p->llidIndex;
eponMacAddrCfgReg.Bits.mac_addr_rwcmd = 1;
iowrite32(eponMacAddrCfgReg.Raw, (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_cfg)));
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr set 1 timers == 100");
return -1;
}
eponMacAddrCfgReg.Raw = 0;
iowrite32(get16(llidEntry_p->macAddr), (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_value)));
eponMacAddrCfgReg.Bits.mac_addr_dw_idx = 1;
eponMacAddrCfgReg.Bits.mac_addr_llid_indx = llidEntry_p->llidIndex;
eponMacAddrCfgReg.Bits.mac_addr_rwcmd = 1;
iowrite32(eponMacAddrCfgReg.Raw, (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_cfg)));
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr set 2 timers == 100");
return -1;
}
return 0;
}
int eponMacGetMacAddr(eponLlid_t *llidEntry_p){
REG_e_mac_addr_cfg eponMacAddrCfgReg;
__u32 macLow = 0;
__u32 macHigh = 0;
dbgtoMem(4 , "enter eponMacGetMacAddr");
if(llidEntry_p == ((void *)0)){
return -1;
}
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacGetMacAddr timers == 100");
return -1;
}
eponMacAddrCfgReg.Raw = 0;
eponMacAddrCfgReg.Bits.mac_addr_dw_idx = 0;
eponMacAddrCfgReg.Bits.mac_addr_llid_indx = llidEntry_p->llidIndex;
eponMacAddrCfgReg.Bits.mac_addr_rwcmd = 0;
iowrite32(eponMacAddrCfgReg.Raw, (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_cfg)));
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr set 1 timers == 100");
return -1;
}
macLow = ioread32((void *)((&(g_EPON_MAC_BASE)->e_mac_addr_value)));
dbgtoMem(4, "eponMacSetMacAddr set 1 macLow == %8X", macLow);
eponMacAddrCfgReg.Raw = 0;
eponMacAddrCfgReg.Bits.mac_addr_dw_idx = 1;
eponMacAddrCfgReg.Bits.mac_addr_llid_indx = llidEntry_p->llidIndex;
eponMacAddrCfgReg.Bits.mac_addr_rwcmd = 0;
iowrite32(eponMacAddrCfgReg.Raw, (void *)((&(g_EPON_MAC_BASE)->e_mac_addr_cfg)));
if(eponMacCheckMacCfg() < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr set 2 timers == 100");
return -1;
}
macHigh = ioread32((void *)((&(g_EPON_MAC_BASE)->e_mac_addr_value)));
dbgtoMem(4, "eponMacSetMacAddr set 2 macHigh == %8X", macHigh);
return 0;
}
extern __u8 eponOnuMacAddr[];
int eponLlidEnable(__u8 llidIndex){
eponLlid_t *llidEntry_p = &(eponDrv.eponLlidEntry[llidIndex]);
__u32 tmp = 0;
__u8 macAddr[6] = {0};
dbgtoMem(4, "enter eponLlidEnable");
# 1367 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
memcpy( macAddr ,eponOnuMacAddr ,6);
tmp = get32(macAddr+2);
tmp += llidIndex;
memcpy(macAddr+3 ,(__u8 *)&tmp+1 , 3 );
memcpy(llidEntry_p->macAddr , macAddr , 6);
tmp = 0;
if(eponMacSetMacAddr(llidEntry_p) < 0){
dbgtoMem(1, "ERR: eponMacSetMacAddr fail llidIndex=%d", llidIndex);
return -1;
}
if(eponDebugLevel >= 2)
eponMacGetMacAddr(llidEntry_p);
llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState = 0;
llidEntry_p->eponMpcp.eponDiscFsm.begin = 1;
llidEntry_p->eponMpcp.eponDiscFsm.registered = 0;
eponMpcpDscvFsmWaitHandler(llidIndex);
llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscMacr_RgstrAck = 1;
llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState = 1;
llidEntry_p->enableFlag= XPON_ENABLE;
tmp = ioread32((void *)((&(g_EPON_MAC_BASE)->e_int_en)));
tmp |= (1<<(llidIndex+1));
iowrite32(tmp, (void *)((&(g_EPON_MAC_BASE)->e_int_en)));
return 0;
}
int eponLlidDisable(__u8 llidIndex){
eponLlid_t *llidEntry_p = &(eponDrv.eponLlidEntry[llidIndex]);
__u32 tmp = 0;
eponLlidDscvStsReg_t sdcvSts;
dbgtoMem(4, "enter eponLlidDisable");
llidEntry_p->enableFlag = XPON_DISABLE;
tmp = ioread32((void *)((&(g_EPON_MAC_BASE)->e_int_en)));
tmp &= ~(1<<(llidIndex+1));
iowrite32(tmp, (void *)((&(g_EPON_MAC_BASE)->e_int_en)));
sdcvSts.Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_llid0_dscvry_sts)+ llidIndex));
sdcvSts.Bits.llidDscvrySts = 0;
iowrite32(sdcvSts.Raw, (void *)((&(g_EPON_MAC_BASE)->e_llid0_dscvry_sts)+ llidIndex));
FE_API_SET_CHANNEL_RETIRE_ONE(FE_GDM_SEL_GDMA2, llidIndex);
xmcs_remove_llid(llidIndex);
return 0;
}
static int eponInitAllLlid(void){
__u8 llidIndex=0;
__u32 Raw = 0;
eponLlid_t *llidEntry_p = ((void *)0);
extern spinlock_t mpcpDscvGateLock;
extern spinlock_t mpcpDscvPendingLock;
int temp = 0;
int currentState = 0;
Raw |= ((1<<15) | (1<<14) | (1<<13) | (1<<12) | (1<<11) | (1<<9) | (1<<0));
if (gpEponPriv->mpcpInterruptMode)
Raw |= ((1<<24) | (1<<25));
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_int_en)));
eponDrv.llidMask = 1;
eponDrv.llidMask = eponDefLlidMask;
while(llidIndex < 8){
llidEntry_p = &(eponDrv.eponLlidEntry[llidIndex]);
temp = llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout;
currentState = llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState;
dbgtoMem(4,"eponInitAllLlid llid %d , Timeout %d, State %d",
llidIndex, temp, currentState);
memset(llidEntry_p, 0 , sizeof(eponLlid_t));
llidEntry_p->llidIndex = llidIndex;
do { spinlock_check(&mpcpDscvPendingLock); do { *(&(&mpcpDscvPendingLock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
do { spinlock_check(&mpcpDscvGateLock); do { *(&(&mpcpDscvGateLock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
if((eponDrv.llidMask)&(1<<llidIndex)){
eponLlidEnable(llidIndex);
}
if (5 == currentState)
{
if(temp > 0 && temp <= g_silence_time)
llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout = temp;
llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState = currentState;
}
llidIndex++;
}
return 0;
}
int epon_set_qdma_qos(void)
{
QDMA_TxRateMeter_T txRateMeter;
QDMA_TxQDynCngstThrh_T txqDynCngstThrh;
QDMA_txCngstCfg_t txCngstCfg;
memset(&txRateMeter, 0, sizeof(QDMA_TxRateMeter_T));
txRateMeter.txRateMeterTimeSlice = 0x3e8;
txRateMeter.txRateMeterTimeDivisor = 0;
QDMA_API_SET_TX_RATEMETER(ECNT_QDMA_WAN, &txRateMeter);
memset(&txCngstCfg, 0, sizeof(QDMA_txCngstCfg_t));
txCngstCfg.txCngstMode = QDMA_TXCNGST_DYNAMIC_PEAKRATE_MARGIN;
txCngstCfg.peekRateMargin = QDMA_TXCNGST_PEEKRATE_MARGIN_100;
txCngstCfg.peekRateDuration = 0xff;
QDMA_API_SET_TXQ_CNGST_AUTO_CONFIG(ECNT_QDMA_WAN, &txCngstCfg);
memset(&txqDynCngstThrh, 0, sizeof(QDMA_TxQDynCngstThrh_T));
QDMA_API_GET_TXQ_CNGST_DYNAMIC_THRESHOLD(ECNT_QDMA_WAN, &txqDynCngstThrh);
txqDynCngstThrh.dynCngstTotalMinThrh = 0x300;
txqDynCngstThrh.dynCngstChnlMinThrh = 0x30;
txqDynCngstThrh.dynCngstQueueMinThrh = 0x30;
QDMA_API_SET_TXQ_CNGST_DYNAMIC_THRESHOLD(ECNT_QDMA_WAN, &txqDynCngstThrh);
return 0;
}
int eponTxOamFavorModeCtl(__u8 flag)
{
__u32 Raw = 0;
QDMA_TxQCngstQueueCfg_T txqCngstQueueCfg;
if (!(((*(volatile unsigned long int *)(0xbfb00064)&0xffff0000))==0x00080000))
return 0;
memset(&txqCngstQueueCfg, 0, sizeof(QDMA_TxQCngstQueueCfg_T));
if (flag == XPON_ENABLE)
{
txqCngstQueueCfg.queueMode = QDMA_QUEUE_NONBLOCKING;
txqCngstQueueCfg.queue = 7;
QDMA_API_SET_TXQ_CNGST_QUEUE_NONBLOCKING(ECNT_QDMA_WAN, &txqCngstQueueCfg);
FE_API_SET_TX_FAVOR_OAM_ENABLE(FE_ENABLE);
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw |= (1<<25);
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
}
else
{
txqCngstQueueCfg.queueMode = QDMA_QUEUE_NORMAL;
txqCngstQueueCfg.queue = 7;
QDMA_API_SET_TXQ_CNGST_QUEUE_NONBLOCKING(ECNT_QDMA_WAN, &txqCngstQueueCfg);
FE_API_SET_TX_FAVOR_OAM_ENABLE(FE_DISABLE);
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw &= ~(1<<25);
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
}
return 0;
}
static int eponInitParam(void){
eponDrv.hldoverEnable = 0;
eponDrv.hldOverTime = 0;
eponDrv.timeDrftEq255Cnt = 0;
eponDrv.timeDrftFrom8To16Cnt = 0;
eponDrv.llidMask = 1;
gpWanPriv->activeChannelNum = 1;
xpon_reset_qdma_tx_buf();
epon_set_qdma_qos();
return 0;
}
int eponHwDygaspCtrl(__u8 flag){
__u32 Raw;
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
if(flag == XPON_ENABLE){
Raw = 0x80000102;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
}else if((flag == XPON_DISABLE)){
Raw = 0x00000000;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
}
return 0;
}
int eponDevGetDyingGaspMode(__u8 *mode)
{
REG_e_dyinggsp_cfg eponDGCtrl;
eponDGCtrl.Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
*mode = eponDGCtrl.Bits.hw_dying_gasp_en;
return 0 ;
}
int eponDevSetDyingGaspNum(__u32 num)
{
REG_e_dyinggsp_cfg eponDGCtrl;
eponDGCtrl.Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
eponDGCtrl.Bits.dygsp_num_of_times = num;
iowrite32(eponDGCtrl.Raw, (void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg))) ;
return 0;
}
int eponDevGetDyingGaspNum(__u32 *num)
{
REG_e_dyinggsp_cfg eponDGCtrl;
eponDGCtrl.Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_dyinggsp_cfg)));
*num = eponDGCtrl.Bits.dygsp_num_of_times;
return 0;
}
void eponSnifferModeCtrl(__u8 flag)
{
__u32 Raw = 0;
__u32 tmp = 0;
__u8 mask = gpEponPriv->eponCfg.snifferModeConfig.snifferModeLanMask;
if (!(((*(volatile unsigned long int *)(0xbfb00064)&0xffff0000))==0x00080000))
return;
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
if (flag != 0)
{
Raw |= ((1<<26) | (1<<22) | (1<<10));
if (flag == 2)
{
Raw |= (1<<18);
}
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
tmp = ioread32((void *)((&(g_EPON_MAC_BASE)->e_sniff_sp_tag)));
tmp &= ~((0x3F) << (16));
tmp |= ((mask & (0x3F)) << (16));
iowrite32(tmp, (void *)((&(g_EPON_MAC_BASE)->e_sniff_sp_tag)));
}
else
{
Raw &= 0xFBBBFBFF;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
iowrite32(0, (void *)((&(g_EPON_MAC_BASE)->e_sniff_sp_tag)));
}
}
int eponSetSnifferModeConfig(__u8 enable, __u16 mask)
{
if (!(((*(volatile unsigned long int *)(0xbfb00064)&0xffff0000))==0x00080000))
return 0;
gpEponPriv->eponCfg.snifferModeConfig.snifferModeEnable= enable;
gpEponPriv->eponCfg.snifferModeConfig.snifferModeLanMask = mask;
eponSnifferModeCtrl(enable);
return 0;
}
# 1720 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
int eponMacSwReset(void){
__u32 Raw;
__u32 valueRst;
volatile __u32 cnt = 0;
valueRst = ioread32((void *)(0xBFB00834));
valueRst |= (1<<31);
iowrite32(valueRst, (void *)(0xBFB00834));
while(cnt <(800000)){
cnt++;
}
valueRst &= 0x7fffffff;
iowrite32(valueRst, (void *)(0xBFB00834));
cnt = 0;
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw |= 0x10;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
while(cnt <(800000)){
cnt++;
}
Raw &= 0xffffffef;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
cnt = 0;
while(cnt <(800000)){
cnt++;
}
if(eponSetProbe != 0)
iowrite32(eponSetProbe, (void *)((&(g_EPON_MAC_BASE)->e_dbg_prb_sel))) ;
dbgtoMem(4,"e_int_status=%x,e_llid0_dscvry_sts=%x",
ioread32((void *)((&(g_EPON_MAC_BASE)->e_int_status))),
ioread32((void *)((&(g_EPON_MAC_BASE)->e_llid0_dscvry_sts))));
Raw |= 0x2;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
iowrite32(0x08, (void *)((&(g_EPON_MAC_BASE)->e_grd_thrshld)));
iowrite32(0x004ffff1, (void *)((&(g_EPON_MAC_BASE)->e_trx_adjust_time1)));
iowrite32(6, (void *)((&(g_EPON_MAC_BASE)->e_trx_adjust_time2)));
iowrite32(0x202403e8, (void *)((&(g_EPON_MAC_BASE)->e_txfetch_cfg)));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_glb_cfg))), (0xFFFFFFFF>>(32-1)), 6, (1));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_glb_cfg))), (0xFFFFFFFF>>(32-1)), 1, (1));
if (eponStaticRptEnable == 1){
printk("Enable epon static report\n");
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8, (1));
GetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8);
GetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8);
GetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8);
GetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8);
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-1)), 8, (0));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_len))), (0xFFFFFFFF>>(32-6)), 0, (1));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_data))), (0xFFFFFFFF>>(32-32)), 0, (0x0101a000));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_rpt_data))), (0xFFFFFFFF>>(32-32)), 0, (0));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_glb_cfg))), (0xFFFFFFFF>>(32-1)), 5, (1));
SetRegMask((uint)(((&(g_EPON_MAC_BASE)->e_tx_cal_cnst))), (0xFFFFFFFF>>(32-6)), 0, (0x8));
}
# 1819 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw |= (1<<23);
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
printk("Raw = %x\n", Raw);
ledTurnOff((0 + 64));
ledTurnOff((0 + 65));
led_xpon_status = 0;
return 0;
}
int eponMacStop(void){
__u8 llidIndex = 0;
dbgtoMem(4, "enter eponMacStop");
eponMacSwReset();
for(llidIndex = 0; llidIndex < 8;llidIndex++){
xmcs_remove_llid(llidIndex);
}
return 0;
}
void checkEponMacRestart(void){
int cnt = 0;
while(cnt <= 1000){
if((*(volatile int *)&(&eponMacRestart_flag)->counter)) {
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);}));
}else{
break;
}
cnt++;
}
}
int eponMacRestart(void){
__u8 llidIndex = 0;
__u8 snifferModeEnable = 0;
dbgtoMem(4, "enter eponMacRestart");
((&eponMacRestart_flag)->counter = (1));
eponMacSwReset();
for(llidIndex = 0; llidIndex < 8;llidIndex++){
xmcs_remove_llid(llidIndex);
}
eponInitAllLlid();
if (gpPonSysData->dyingGaspData.isEponHwFlag)
eponHwDygaspCtrl(XPON_ENABLE);
else
eponHwDygaspCtrl(XPON_DISABLE);
if (gpEponPriv->eponCfg.txOamFavorMode)
eponTxOamFavorModeCtl(XPON_ENABLE);
else
eponTxOamFavorModeCtl(XPON_DISABLE);
snifferModeEnable = gpEponPriv->eponCfg.snifferModeConfig.snifferModeEnable;
eponSnifferModeCtrl(snifferModeEnable);
((&eponMacRestart_flag)->counter = (0));
return 0;
}
# 1946 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
int eponMacTxRxEnable(void){
__u32 Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw &= ~((1<<9)|(1<<8));
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
return 0;
}
int eponMacTxRxDisable(void){
__u32 Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw |= ((1<<9)|(1<<8));
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
return 0;
}
# 2009 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
int eponWanStopWithChannelRetire(void){
__u8 chanIndex =0;
__u8 i = 0;
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&epon_reg_lock)); } while (0); } while (0);
eponMacTxRxDisable();
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);}));
disable_cpu_us_traffic();
# 2033 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
for(i = 0; i < 32; i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_TX, i, FE_DISABLE);
}
for(i = 0; i < 16; i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_RX, i, FE_DISABLE);
}
for(i = 0; i < 8; i++) {
FE_API_SET_HWFWD_CHANNEL(FE_CDM_SEL_CDMA2, i, FE_DISABLE);
}
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);}));
FE_API_SET_CHANNEL_RETIRE_ALL(FE_GDM_SEL_GDMA2, 8);
eponMacStop();
eponMacTxRxDisable();
spin_unlock_irqrestore(&epon_reg_lock,flags);
return 0;
}
uint g_EponLaserTime = 0;
void eponSetlaserTime(void)
{
__u32 Raw =0 ;
__u32 temp = 0;
Raw = (ioread32((void *)((&(g_EPON_MAC_BASE)->e_laser_onoff_time))));
temp = (Raw & 0xFF00 >> 8) - g_EponLaserTime;
Raw &= 0xFFFF00FF;
Raw = Raw | (temp << 8);
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_laser_onoff_time)));
}
int eponWanResetWithChannelRetire(void){
__u8 chanIndex =0;
__u32 Raw =0 ;
__u32 valueRst;
volatile __u32 cnt = 0;
__u8 i = 0;
unsigned long flags;
do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&epon_reg_lock)); } while (0); } while (0);
eponMacTxRxDisable();
cnt = 0;
while(cnt <(800000)){
cnt++;
}
cnt = 0;
Raw = ioread32((void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
Raw |= 0x10;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
while(cnt <(800000)){
cnt++;
}
Raw &= 0xffffffef;
iowrite32(Raw, (void *)((&(g_EPON_MAC_BASE)->e_glb_cfg)));
cnt = 0;
while(cnt <(800000)){
cnt++;
}
valueRst = ioread32((void *)(0xBFB00834));
valueRst |= (1<<31);
iowrite32(valueRst, (void *)(0xBFB00834));
while(cnt <(800000)){
cnt++;
}
valueRst &= 0x7fffffff;
iowrite32(valueRst, (void *)(0xBFB00834));
# 2138 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
for(i = 0; i < 32; i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_TX, i, FE_DISABLE);
}
for(i = 0; i < 16; i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_RX, i, FE_DISABLE);
}
for(i = 0; i < 8; i++) {
FE_API_SET_HWFWD_CHANNEL(FE_CDM_SEL_CDMA2, i, FE_DISABLE);
}
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);}));
FE_API_SET_CHANNEL_RETIRE_ALL(FE_GDM_SEL_GDMA2, 8);
eponMacRestart();
eponMacTxRxDisable();
for(i = 0; i < 8; i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_TX, i, FE_ENABLE);
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_RX, i, FE_ENABLE);
FE_API_SET_HWFWD_CHANNEL(FE_CDM_SEL_CDMA2, i, FE_ENABLE);
}
if (gpEponPriv->eponCfg.txOamFavorMode) {
for (i=16;i<24;i++) {
FE_API_SET_CHANNEL_ENABLE(FE_GDM_SEL_GDMA2, FE_GDM_SEL_TX, i, FE_ENABLE);
}
}
eponMacTxRxEnable();
eponSetlaserTime();
spin_unlock_irqrestore(&epon_reg_lock,flags);
return 0;
}
# 2208 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
void eponPhyInit(void)
{
iowrite32(0x7f000000, (void *)(0x124)) ;
iowrite32(0xCD810110, (void *)(0x108)) ;
iowrite32(0x45810110, (void *)(0x108)) ;
iowrite32(0x00086100, (void *)(0x1D0)) ;
iowrite32(0x4581E130, (void *)(0x108)) ;
iowrite32(0x400000C8, (void *)(0x1F0)) ;
iowrite32(0x00000013, (void *)(0x1D4)) ;
iowrite32(0x00000045, (void *)(0x204)) ;
}
__u8 eponOnuMacAddr[6] = {0};
extern uint8 noRxDscpInt;
extern uint8 attackEnhance;
extern unsigned long int dropThred;
extern uint8 rxDropCheckTimer;
static struct timer_list rgstIntvalTimer;
void rgstIntvalTimerHandler(unsigned long date)
{
__u8 llidIndex = 0;
static uint8 timer_count=0;
static unsigned long int oldDropCounter = 0;
static unsigned long int newDropCounter = 0;
if(1 == gpPonSysData->ponMacPhyReset
) {
goto restart_timer;
}
int rgs_flag = 0;
eponLlid_t *llidEntry_p = &(eponDrv.eponLlidEntry[0]);
if(timer_count == rxDropCheckTimer)
{
FE_API_GET_RX_DROP_FIFO_CNT(FE_GDM_SEL_GDMA2, &newDropCounter);
if(newDropCounter - oldDropCounter > dropThred)
{
printk("GDMA2_RX_OVDROPCNT: newDropCounter = 0x%08lx\n",newDropCounter);
printk("GDMA2_RX_OVDROPCNT: oldDropCounter = 0x%08lx\n",oldDropCounter);
noRxDscpInt = 1;
}
else
{
noRxDscpInt = 0;
}
oldDropCounter = newDropCounter;
timer_count = 0;
}
else
{
timer_count++;
}
dbgtoMem(4, "enter rgstIntvalTimerHandler phy ready is %d", eponReadyFlag);
for(llidIndex = 0; llidIndex< 8; llidIndex++, llidEntry_p++){
dbgtoMem(4, "rgstIntvalTimerHandler start epon: llidIndex=%d state=%d rgstAgainTimeout = %d", llidIndex, llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState, llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout);
if (llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout > 0) {
dbgtoMem(4, "Onu is in silent time out is %d rest, delete 1",llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout );
llidEntry_p->eponMpcp.eponDiscFsm.rgstAgainTimeout--;
rgs_flag = 1;
}
if ((rgs_flag == 0) && (1 == XPON_PHY_GET(0x8019)) && (5 == llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState))
{
eponPhyReadyRealStart();
llidEntry_p->eponMpcp.eponDiscFsm.mpcpDiscState = 1;
dbgtoMem(2, "Epon phy up and silent time is 0, begin to response MPCP");
}
# 2317 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
}
restart_timer:
mod_timer(&rgstIntvalTimer, jiffies + (100 ));
}
void eponEnable(void)
{
int ret;
eponWanResetWithChannelRetire();
eponIrqRegister();
}
void eponDisable(void)
{
ledTurnOff((0 + 64));
ledTurnOff((0 + 65));
led_xpon_status = 0;
eponIrqUnregister();
eponWanStopWithChannelRetire();
}
# 2364 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
void eponFirmwareSetMode(int mode)
{
if ( (((regRead32(0xbfb00064) & 0xF) == 0x3) && ((regRead32(0xbfb5fffc) & 0xF) >= 0x1) && ((((regRead32(0xbfb000f8)&0x3)==0x2) && (((regRead32(0xbfb00064)&0xffff0000))==0x00050000) && ((((regRead32(0xBFBF8214)&0x8)==0x8)?(((regRead32(0xBFBF8214)&0x20)==0x20)?1:0):(((regRead32(0xBFBF8214)&0x2)==0x2)?1:0))) && (!(((regRead32(0xBFBF8214)&0x8)==0x8)?((regRead32(0xBFBF8214)&0x40)==0x40):((regRead32(0xBFBF8214)&0x4)==0x4)))) || (((regRead32(0xbfb000f8)&0x3)==0x3) && (((regRead32(0xbfb00064)&0xffff0000))==0x00050000) && ((((regRead32(0xBFBF8214)&0x8)==0x8)?(((regRead32(0xBFBF8214)&0x20)==0x20)?1:0):(((regRead32(0xBFBF8214)&0x2)==0x2)?1:0)))) || (((regRead32(0xbfb000f8)&0x3)==0x2) && (((regRead32(0xbfb00064)&0xffff0000))==0x00050000) && (!(((regRead32(0xBFBF8214)&0x8)==0x8)?(((regRead32(0xBFBF8214)&0x20)==0x20)?1:0):(((regRead32(0xBFBF8214)&0x2)==0x2)?1:0))) && (!(((regRead32(0xBFBF8214)&0x8)==0x8)?((regRead32(0xBFBF8214)&0x40)==0x40):((regRead32(0xBFBF8214)&0x4)==0x4)))) || (((regRead32(0xbfb000f8)&0x3)==0x3) && (((regRead32(0xbfb00064)&0xffff0000))==0x00050000) && (!(((regRead32(0xBFBF8214)&0x8)==0x8)?(((regRead32(0xBFBF8214)&0x20)==0x20)?1:0):(((regRead32(0xBFBF8214)&0x2)==0x2)?1:0)))) || (((regRead32(0xbfb000f8)&0x3)==0x2) && (((regRead32(0xbfb00064)&0xffff0000))==0x00050000) && ((((regRead32(0xBFBF8214)&0x8)==0x8)?((regRead32(0xBFBF8214)&0x40)==0x40):((regRead32(0xBFBF8214)&0x4)==0x4)))))) )
{
if (0 == mode)
{
{ uint t = regRead32((0xBFAF0124)); regWrite32((0xBFAF0124), (t&~((1<<26)))); };
}
else if (1 == mode)
{
{ uint t = regRead32((0xBFAF0124)); regWrite32((0xBFAF0124), (t|(1<<26))); };
}
}
return;
}
void eponStart(unsigned long data)
{
dbgtoMem(4, "eponStart");
regWrite32(0xbfb00070, 1);
eponFirmwareSetMode(0);
eponInitParam();
eponInitAllLlid();
eponEnable();
return ;
}
int eponCleanAllLlidQueThrod(void ){
__u8 queueIndex = 0;
__u8 llidIndex = 0;
__u8 setIndex = 0;
eponQueueThreshold_t EponQThr;
int ret = 0;
for(llidIndex = 0 ;llidIndex < 8 ;llidIndex++ ){
for(setIndex = 0; setIndex< 3;setIndex++){
for(queueIndex = 0; queueIndex< 8;queueIndex++){
EponQThr.channel = llidIndex;
EponQThr.queue = queueIndex;
EponQThr.thrIdx = setIndex;
EponQThr.value = 0;
ret = epon_set_queue_threshold(&EponQThr) ;
if(ret < 0 ){
dbgtoMem(1, "ERR: eponSetLlidQueThrod error");
return -1;
}
}
}
}
return 0;
}
int eponDumpAllLlidQueThrod(void){
__u8 queueIndex = 0;
eponQueueThreshold_t EponQThr;
int ret = 0;
__u8 llidIndex = 0;
__u8 setIndex = 0;
printk("eponDumpAllLlidQueThrod enter\n");
for(llidIndex = 0 ;llidIndex < 8 ;llidIndex++ ){
for(setIndex = 0; setIndex< 3;setIndex++){
printk("\n ch%d,set%d : ",llidIndex,setIndex);
for(queueIndex = 0; queueIndex< 8;queueIndex++){
memset(&EponQThr, 0, sizeof(QDMA_EponQueueThreshold_T));
EponQThr.channel = llidIndex;
EponQThr.queue = queueIndex ;
EponQThr.thrIdx = setIndex;
ret = epon_get_queue_threshold(&EponQThr);
if(ret < 0){
dbgtoMem(1, "ERR: eponGetLlidQueThrod error");
return -1;
}
printk("%x ",EponQThr.value);
}
}
printk("\n");
}
return 0;
}
int epon_set_queue_threshold(eponQueueThreshold_t *pEponQThr)
{
unsigned int eponThresholdCfg = 0 ;
int RETRY = 3 ;
if (pEponQThr->channel>=8 || pEponQThr->queue>=8 || pEponQThr->thrIdx>=3)
{
return -1 ;
}
eponThresholdCfg = ((1<<31) |
(((pEponQThr->value)<<(8))&(0xFFFF<<(8))) |
(((pEponQThr->thrIdx)<<(6))&(0x3<<(6))) |
(((pEponQThr->channel)<<(3))&(0x7<<(3))) |
(((pEponQThr->queue)<<(0))&(0x7<<(0)))) ;
iowrite32(eponThresholdCfg, (void *)((&(g_EPON_MAC_BASE)->e_rpt_qthld_cfg)));
while(RETRY--)
{
eponThresholdCfg = ioread32((void *)((&(g_EPON_MAC_BASE)->e_rpt_qthld_cfg))) ;
if(eponThresholdCfg&(1<<30))
{
return 0 ;
}
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);})) ;
}
printk("Timeout for set EPON Threshold configuration.\n") ;
return -1 ;
}
int epon_get_queue_threshold(eponQueueThreshold_t *pEponQThr)
{
int ret ;
unsigned int eponThresholdCfg = 0 ;
int RETRY = 3 ;
if(pEponQThr->channel>=8 || pEponQThr->queue>=8 || pEponQThr->thrIdx>=3)
{
return -1 ;
}
eponThresholdCfg = ((((pEponQThr->thrIdx)<<(6))&(0x3<<(6))) |
(((pEponQThr->channel)<<(3))&(0x7<<(3))) |
(((pEponQThr->queue)<<(0))&(0x7<<(0)))) ;
iowrite32(eponThresholdCfg, (void *)((&(g_EPON_MAC_BASE)->e_rpt_qthld_cfg))) ;
while(RETRY--)
{
eponThresholdCfg = ioread32((void *)((&(g_EPON_MAC_BASE)->e_rpt_qthld_cfg))) ;
if(eponThresholdCfg&(1<<30))
{
ret = ((eponThresholdCfg&(0xFFFF<<(8)))>>(8)) ;
if (ret >= 0)
{
pEponQThr->value = ret ;
return 0;
}
else
return ret ;
}
( (__builtin_constant_p(1) && (1)<=5) ? __udelay((1)*1000) : ({unsigned long __ms=(1); while (__ms--) __udelay(1000);})) ;
}
printk("Timeout for get EPON Threshold configuration.\n") ;
return -1 ;
}
void eponStop(void)
{
dbgtoMem(4, "eponStop");
eponCleanAllLlidQueThrod();
eponDisable();
XPON_PHY_COUNTER_CLEAR(0x1f);
return;
}
struct file_operations eponMacFops = {
.owner = (&__this_module),
.write = ((void *)0),
.read = ((void *)0),
.unlocked_ioctl = eponMacIoctl,
.open = eponMacOpen,
.release = ((void *)0),
};
int eponMacOpen(struct inode *inode, struct file *filp)
{
dbgtoMem(4, "eponMacOpen entered");
return 0;
}
static int eponCmdInit(void)
{
cmds_t eponCmd;
int ret = -1;
memset(&eponCmd , 0 , sizeof(cmds_t));
dbgtoMem(4, "enter eponCmdInit");
eponCmd.name = "epon";
eponCmd.func = doEpon;
eponCmd.flags = 0x12;
eponCmd.argcmin = 0;
eponCmd.argc_errmsg = ((void *)0);
ret = cmd_register(&eponCmd);
if(ret <0){
dbgtoMem(1, "ERR: eponCmdInit ret=%d \n", ret);
}
return ret;
}
static void eponCmdExit(void)
{
cmd_unregister("epon");
}
# 2626 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
void eponDetectPhyLosHandler(void)
{
gpPhyData->phy_link_status = PHY_LINK_STATUS_LOS;
xmcs_report_event(XMCS_EVENT_TYPE_EPON, XMCS_EVENT_EPON_LOS, 0) ;
if ((ignoreIntMask &0x8000) == 0){
dbgtoMem(4, "ignore epon phy los");
return;
}
eponReadyFlag = 0;
dbgtoMem(3, "EponIsr Epon Phy Los");
gpPonSysData->sysLinkStatus = PON_LINK_STATUS_OFF ;
{
eponStop();
}
gpWanPriv->activeChannelNum = 1;
xpon_reset_qdma_tx_buf();
gpPonSysData->sysPrevLink = PON_LINK_STATUS_EPON ;
eponFirmwareSetMode(0);
stop_omci_oam_monitor();
schedule_fe_reset();
}
static void epon_los_timer_func(void)
{
eponDrv.typeBOnGoing = 0;
if(eponDrv.hldoverEnable && (XPON_PHY_GET(0x8019) == (1)))
return;
eponDetectPhyLosHandler();
}
static int epon_typeB_phy_los(void)
{
eponLlid_t *llidEntry_p = &(eponDrv.eponLlidEntry[0]);
eponMpcpDiscFsm_t *mpcpDiscFsm = &(llidEntry_p->eponMpcp.eponDiscFsm);
int ret = 0;
if(eponDrv.hldoverEnable && (mpcpDiscFsm->mpcpDiscState == 8))
{
ret = 1;
gpPhyData->phy_link_status = PHY_LINK_STATUS_LOS;
dbgtoMem(3 ,"EponIsr Epon typeB Phy Los");
gEponTypeBTimer.data = eponDrv.hldOverTime;
do { if((((__current_thread_info->preempt_count) & ((((1UL << (10))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (1))-1) << (((0 + 8) + 8) + 10)))))) { del_timer(&gEponTypeBTimer) ; } else { del_timer_sync(&gEponTypeBTimer) ; } }while(0);
{ mod_timer(&gEponTypeBTimer, (jiffies + ((gEponTypeBTimer.data*100)/1000))) ; };
eponDrv.typeBOnGoing = 1;
}
return ret;
}
void eponDetectPhyLosLof(void)
{
if(0 == epon_typeB_phy_los()){
eponDetectPhyLosHandler();
}
}
void eponPhyReadyRealStart(void)
{
if ((ignoreIntMask &0x8000) == 0){
dbgtoMem(4,"ignore epon phy ready");
return;
}
dbgtoMem(3,"EponIsr Epon Phy Ready");
if(gpPonSysData->sysPrevLink == PON_LINK_STATUS_GPON) {
xmcs_report_event(XMCS_EVENT_TYPE_GPON, XMCS_EVENT_GPON_MODE_CHANGE, 0);
}
xmcs_report_event(XMCS_EVENT_TYPE_EPON, XMCS_EVENT_EPON_PHY_READY, 0);
if(gpPonSysData->sysStartup == PON_WAN_START) {
gpPonSysData->sysLinkStatus = PON_LINK_STATUS_EPON ;
tasklet_schedule(&eponStartTask);
gpWanPriv->devCfg.flags.isTxDropOmcc = 0;
xpon_set_qdma_qos(gpWanPriv->devCfg.flags.isQosUp);
}
}
extern int isSlientBegin;
void eponDetectPhyReady(void)
{
eponReadyFlag = 1;
if ((ignoreIntMask &0x8000) == 0){
dbgtoMem(4, "ignore epon phy ready");
return;
}
change_alarm_led_status(ALARM_LED_OFF);
dbgtoMem(3, "EponIsr Epon Phy Ready");
if(0 == eponDrv.typeBOnGoing){
eponFirmwareSetMode(0);
}
if(gpPonSysData->sysPrevLink == PON_LINK_STATUS_GPON) {
xmcs_report_event(XMCS_EVENT_TYPE_GPON, XMCS_EVENT_GPON_MODE_CHANGE, 0);
}
xmcs_report_event(XMCS_EVENT_TYPE_EPON, XMCS_EVENT_EPON_PHY_READY, 0);
if(gpPonSysData->sysStartup == PON_WAN_START) {
gpPonSysData->sysLinkStatus = PON_LINK_STATUS_EPON ;
if(0 == eponDrv.typeBOnGoing){
tasklet_schedule(&eponStartTask);
gpWanPriv->devCfg.flags.isTxDropOmcc = 0;
xpon_set_qdma_qos(gpWanPriv->devCfg.flags.isQosUp);
}
}
}
# 3116 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
void eponMacTableInit(void);
# 3137 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
static int epon_init_private_data(EPON_GlbPriv_T *epon_priv_p)
{
int ret = 0 ;
epon_priv_p->mpcpInterruptMode = 0;
epon_priv_p->eponCfg.snifferModeConfig.snifferModeEnable = 0;
epon_priv_p->eponCfg.snifferModeConfig.snifferModeLanMask = 0;
epon_priv_p->eponCfg.txOamFavorMode= 0;
if ((((*(volatile unsigned long int *)(0xbfb00064)&0xffff0000))==0x00080000)) {
epon_priv_p->mpcpInterruptMode = 1;
}
return ret;
}
int eponInit(void)
{
int ret = -1;
XMCSIF_OnuType_t type;
dbgtoMem(4, "eponInit");
do { spinlock_check(&epon_reg_lock); do { *(&(&epon_reg_lock)->rlock) = (raw_spinlock_t) { .raw_lock = { .lock = 0 }, }; } while (0); } while (0);
g_EPON_MAC_BASE = (PEPON_MAC_REGS)(__ioremap_mode(((0x1FB60000)), ((sizeof(EPON_MAC_REGS))), (2<<(((((0 ? (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1))) + 1 : (0 ? (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)) + 1 : (((((0 ? (0) : (0) + 1) + 1) + 1) + 1)))) + 1) + 1) + 1) + 1)))) ;
if(!g_EPON_MAC_BASE) {
dbgtoMem(1 ,"ERR: ioremap the EPON base address failed:addr=0x%x,size=0x%x",(0x1FB60000),(sizeof(EPON_MAC_REGS))) ;
return -12 ;
}
ret = epon_init_private_data(gpEponPriv);
if (ret < 0) {
dbgtoMem(1, "EPON private data initialization failed");
return ret;
}
eponMacTableInit();
getOnuMacAddr(eponOnuMacAddr);
eponCmdInit();
memset(&eponDrv , 0, sizeof(epon_t));
eponFirmwareSetMode(0);
ret = register_chrdev(221, "epon_mac", &eponMacFops);
if (ret < 0)
return ret;
tasklet_init(&eponMacMpcpDscvGateTask, eponMpcpDiscvGateIntHandler, 0);
tasklet_init(&eponStartTask, eponStart, 0);
eponMacAddTimer(&rgstIntvalTimer,1,rgstIntvalTimerHandler,0);
xmcs_get_onu_type(&type);
if (type != XMCS_IF_ONU_TYPE_HGU)
{
isSfu = 1;
}
else
{
isSfu = 0;
}
# 3225 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
init_timer_key((&gEponTypeBTimer), ((void *)0), ((void *)0)) ;
gEponTypeBTimer.data = 1000;
gEponTypeBTimer.function = epon_los_timer_func;
# 3238 "/opt/tclinux_phoenix/modules/private/xpon/src/epon/epon_main.c"
dbgPrintInit();
return ret;
}
void eponMacTableExit(void);
void eponExit(void)
{
dbgtoMem(4, "eponExit");
eponCmdExit();
unregister_chrdev(221, "epon_mac");
tasklet_kill(&eponMacMpcpDscvGateTask);
tasklet_kill(&eponStartTask);
eponMacDelTimer(&rgstIntvalTimer);
dbgPrintQuit();
if(g_EPON_MAC_BASE) {
iounmap(g_EPON_MAC_BASE) ;
g_EPON_MAC_BASE = ((void *)0) ;
}
eponMacTableExit();
do { if((((__current_thread_info->preempt_count) & ((((1UL << (10))-1) << ((0 + 8) + 8)) | (((1UL << (8))-1) << (0 + 8)) | (((1UL << (1))-1) << (((0 + 8) + 8) + 10)))))) { del_timer(&gEponTypeBTimer) ; } else { del_timer_sync(&gEponTypeBTimer) ; } }while(0);
return;
}
void eponMacTableInit(void)
{
int i = 0;
eponMacRegTable = (eponMacHwtestReg_t *)kmalloc(sizeof(eponMacHwtestReg_t)*70, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))) ;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_glb_cfg) ;
eponMacRegTable[i].def_value = 0x00800042;
eponMacRegTable[i].rwmask = 0x003f0007;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_int_status);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_int_en);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_mpcp_timeout_llid_idx);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_dyinggsp_cfg);
eponMacRegTable[i].def_value = 0x00000100;
eponMacRegTable[i].rwmask = 0x8000037f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_pending_gnt_num);
eponMacRegTable[i].def_value = 0x00000040;
eponMacRegTable[i].rwmask = 0x0000007f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid0_3_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0f0f0f0f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid4_7_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0f0f0f0f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid_dscvry_ctrl);
eponMacRegTable[i].def_value = 0x00010000;
eponMacRegTable[i].rwmask = 0x80001107;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid0_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid1_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid2_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid3_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid4_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid5_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid6_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_llid7_dscvry_sts);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xc0000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_mac_addr_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x8000000f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_mac_addr_value);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_security_key_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x8000003f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_key_value);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_data);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0xffffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_len);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0000013f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_cfg);
eponMacRegTable[i].def_value = 0x000a0000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_qthld_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_local_time);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_pwr_sv_cfg);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_slp_durt_max);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_slp_duration);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_act_duration);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_pwron_dly);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_slp_duration_i);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_txfetch_cfg);
eponMacRegTable[i].def_value = 0x242a03e8;
eponMacRegTable[i].rwmask = 0xffff0fff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_sync_time);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0000ffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_tx_cal_cnst);
eponMacRegTable[i].def_value = 0x2612040c;
eponMacRegTable[i].rwmask = 0xffffff3f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_laser_onoff_time);
eponMacRegTable[i].def_value = 0x00002020;
eponMacRegTable[i].rwmask = 0x0000ffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_grd_thrshld);
eponMacRegTable[i].def_value = 0x00000008;
eponMacRegTable[i].rwmask = 0x000000ff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_mpcp_timeout_intvl);
eponMacRegTable[i].def_value = 0x03b9aca0;
eponMacRegTable[i].rwmask = 0xffffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_timeout_intvl);
eponMacRegTable[i].def_value = 0x002faf08;
eponMacRegTable[i].rwmask = 0x00ffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_max_future_gnt_time);
eponMacRegTable[i].def_value = 0x03b9aca0;
eponMacRegTable[i].rwmask = 0xffffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_min_proc_time);
eponMacRegTable[i].def_value = 0x00000400;
eponMacRegTable[i].rwmask = 0x0000ffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_trx_adjust_time1);
eponMacRegTable[i].def_value = 0x004ffff1;
eponMacRegTable[i].rwmask = 0xffffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_trx_adjust_time2);
eponMacRegTable[i].def_value = 0x00000006;
eponMacRegTable[i].rwmask = 0xffffffff;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_dbg_prb_sel);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0000001f;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_dbg_prb_h32);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask =0x00000000 ;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_dbg_prb_l32);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x00000000;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rxmbi_eth_cnt);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rxmpi_eth_cnt);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_txmbi_eth_cnt);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_txmpi_eth_cnt);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_oam_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_mpcp_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_mpcp_rgst_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_gnt_pending_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_gnt_length_stat);
eponMacRegTable[i].def_value = 0x0000ffff;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_gnt_type_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_time_drft_stat);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_snf_mpcp_oam_ctl);
eponMacRegTable[i].def_value = 0x00000000;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = (&(g_EPON_MAC_BASE)->e_rpt_adj);
eponMacRegTable[i].def_value = 0x00000002;
eponMacRegTable[i].rwmask = 0x0;
i++;
eponMacRegTable[i].addr = ((void *)0);
eponMacRegTable[i].rwmask = 0x0;
}
void eponMacTableExit(void)
{
kfree(eponMacRegTable);
eponMacRegTable = ((void *)0);
}
int eponMacDumpAllReg(void){
__u32 n = 0;
__u32 Raw = 0;
if(((void *)0) == eponMacRegTable){
dbgtoMem(1, "ERR: dump epon mac register fail");
return -1;
}
dbgtoMem(4, "Dump EPON MAC REG");
n = 0;
while(eponMacRegTable[n].addr != 0){
Raw = ioread32((void *)(eponMacRegTable[n].addr));
printk("\r\n %X : %X" , eponMacRegTable[n].addr, Raw);
n++;
}
return 0;
}
static int doEponHwtest(int argc, char *argv[], void *p)
{
return subcmd(eponHwtestCmds, argc, argv, p);
}
static int doEponRegtest(int argc, char *argv[], void *p){
__u32 times =0;
if(argc < 2){
printk("\r\nregtest <times>" );
return 0;
}
times = simple_strtoul(argv[1], ((void *)0), 16);
eponMacRegTest(times);
return 0;
}
static int doEponRegDefCheck(int argc, char *argv[], void *p){
eponMacSwReset();
if (eponRegDefCheck(eponMacRegTable , eponMacGetRegTblSize()) == -1)
{
printk("EPON Register default value verifycation is failure!!\r\n");
}
else
{
printk("EPON Register default value verifycation is ok!!\r\n");
}
return 0;
}