summaryrefslogtreecommitdiff
path: root/cleopatre
diff options
context:
space:
mode:
Diffstat (limited to 'cleopatre')
-rw-r--r--cleopatre/linux-2.6.25.10-spc300/include/asm-arm/arch-spc300/vmalloc.h11
1 files changed, 0 insertions, 11 deletions
diff --git a/cleopatre/linux-2.6.25.10-spc300/include/asm-arm/arch-spc300/vmalloc.h b/cleopatre/linux-2.6.25.10-spc300/include/asm-arm/arch-spc300/vmalloc.h
index ff5babe8fd..3ba5ee9336 100644
--- a/cleopatre/linux-2.6.25.10-spc300/include/asm-arm/arch-spc300/vmalloc.h
+++ b/cleopatre/linux-2.6.25.10-spc300/include/asm-arm/arch-spc300/vmalloc.h
@@ -20,17 +20,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
-/*
- * Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts. That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END (PAGE_OFFSET + SZ_64M)
#endif /* __ASM_ARCH_VMALLOC_H */