-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathhighmem.h
135 lines (103 loc) · 3.02 KB
/
highmem.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H
#include <linux/config.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_HIGHMEM
extern struct page *highmem_start_page;
#include <asm/highmem.h>
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig);
static inline char *bh_kmap(struct buffer_head *bh)
{
return kmap(bh->b_page) + bh_offset(bh);
}
static inline void bh_kunmap(struct buffer_head *bh)
{
kunmap(bh->b_page);
}
/*
* remember to add offset! and never ever reenable interrupts between a
* bh_kmap_irq and bh_kunmap_irq!!
*/
static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags)
{
unsigned long addr;
__save_flags(*flags);
/*
* could be low
*/
if (!PageHighMem(bh->b_page))
return bh->b_data;
/*
* it's a highmem page
*/
__cli();
addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);
if (addr & ~PAGE_MASK)
BUG();
return (char *) addr + bh_offset(bh);
}
static inline void bh_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
kunmap_atomic((void *) ptr, KM_BH_IRQ);
__restore_flags(*flags);
}
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
static inline void *kmap(struct page *page) { return page_address(page); }
#define kunmap(page) do { } while (0)
#define kmap_atomic(page,idx) kmap(page)
#define kunmap_atomic(page,idx) kunmap(page)
#define bh_kmap(bh) ((bh)->b_data)
#define bh_kunmap(bh) do { } while (0)
#define kmap_nonblock(page) kmap(page)
#define bh_kmap_irq(bh, flags) ((bh)->b_data)
#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0)
#endif /* CONFIG_HIGHMEM */
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr);
kunmap_atomic(addr, KM_USER0);
}
static inline void clear_highpage(struct page *page)
{
clear_page(kmap(page));
kunmap(page);
}
/*
* Same but also flushes aliased cache contents to RAM.
*/
static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
{
char *kaddr;
if (offset + size > PAGE_SIZE)
out_of_line_bug();
kaddr = kmap(page);
memset(kaddr + offset, 0, size);
flush_dcache_page(page);
flush_page_to_ram(page);
kunmap(page);
}
static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}
static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;
vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}
#endif /* _LINUX_HIGHMEM_H */