-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapfs_buf.c
229 lines (185 loc) · 4.39 KB
/
apfs_buf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 Su Yue <[email protected]>
* All Rights Reserved.
*/
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include "ctree.h"
#include "volumes.h"
#include "apfs_buf.h"
static void
apfs_buf_ioend(struct apfs_buf *bp)
{
complete(&bp->io_wait);
}
static void
apfs_buf_bio_end_io(struct bio *bio)
{
struct apfs_buf *bp = (struct apfs_buf *)bio->bi_private;
/*
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
if (bio->bi_status) {
int error = blk_status_to_errno(bio->bi_status);
cmpxchg(&bp->io_errors, 0, error);
}
atomic_dec(&bp->io_remaining);
if (atomic_read(&bp->io_remaining) == 1) {
if (!bp->error && bp->io_errors)
apfs_crit_in_rcu(bp->fs_info, "buf bio errors %d",
bp->io_errors);
bp->error = bp->io_errors;
complete(&bp->io_wait);
}
bio_put(bio);
}
static void
apfs_buf_ioapply(struct apfs_buf *bp)
{
int offset;
unsigned int total_nr_pages = bp->page_count;
int page_index;
int nr_pages;
struct bio *bio;
int size = bp->len;
sector_t sector = bp->bno;
/* skip the pages in the buffer before the start offset */
page_index = 0;
offset = bp->offset;
while (offset >= PAGE_SIZE) {
page_index++;
offset -= PAGE_SIZE;
}
next_chunk:
atomic_inc(&bp->io_remaining);
nr_pages = bio_max_segs(total_nr_pages);
bio = bio_alloc(GFP_NOIO, nr_pages);
bio_set_dev(bio, bp->fs_info->device->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = apfs_buf_bio_end_io;
bio->bi_private = bp;
bio->bi_opf = bp->op;
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
if (nbytes > size)
nbytes = size;
rbytes = bio_add_page(bio, bp->pages[page_index], nbytes,
offset);
if (rbytes < nbytes)
break;
offset = 0;
sector += nbytes >> 9;
size -= nbytes;
total_nr_pages--;
}
if (likely(bio->bi_iter.bi_size)) {
submit_bio(bio);
if (size)
goto next_chunk;
} else {
/*
* This is guaranteed not to be the last io reference count
* because the caller (apfs_buf_submit) holds a count itself.
*/
atomic_dec(&bp->io_remaining);
bp->error = -EIO;
bio_put(bio);
}
}
int apfs_buf_alloc_pages(struct apfs_buf *bp, u32 flags)
{
gfp_t gfp_mask = __GFP_NOWARN;
long filled = 0;
gfp_mask |= GFP_NOFS;
/* Make sure that we have a page list */
bp->page_count = DIV_ROUND_UP(bp->len, PAGE_SIZE);
bp->pages = kcalloc(bp->page_count, sizeof(struct page *), gfp_mask);
if (!bp->pages)
return -ENOMEM;
/* Assure zeroed buffer for non-read cases. */
if (!(flags & ABF_READ))
gfp_mask |= __GFP_ZERO;
/*
* Bulk filling of pages can take multiple calls. Not filling the entire
* array is not an allocation failure, so don't back off if we get at
* least one extra page.
*/
for (;;) {
long last = filled;
filled = alloc_pages_bulk_array(gfp_mask, bp->page_count,
bp->pages);
if (filled == bp->page_count)
break;
if (filled != last)
continue;
congestion_wait(BLK_RW_ASYNC, HZ / 50);
}
return 0;
}
static void
apfs_buf_free_pages(struct apfs_buf *bp)
{
int i;
for (i = 0; i < bp->page_count; i++) {
if (bp->pages[i])
__free_page(bp->pages[i]);
}
kfree(bp->pages);
}
void apfs_buf_free(struct apfs_buf *bp)
{
apfs_buf_free_pages(bp);
kfree(bp);
}
struct apfs_buf *apfs_buf_alloc(void)
{
struct apfs_buf *bp;
bp = kmalloc(sizeof(struct apfs_buf), GFP_NOFS);
if (!bp)
return NULL;
init_completion(&bp->io_wait);
return bp;
}
void apfs_buf_init(struct apfs_fs_info *fs_info, struct apfs_buf *bp,
int op, u64 bytenr, size_t size)
{
bp->fs_info = fs_info;
bp->offset = offset_in_page(bytenr);
bp->len = ALIGN(size, 1 << 9);
bp->page_count = DIV_ROUND_UP(bp->len, PAGE_SIZE);
bp->bno = bytenr >> 9;
switch (op) {
case ABF_READ:
bp->op = REQ_OP_READ;
break;
case ABF_WRITE:
bp->op = REQ_OP_WRITE;
break;
default:
BUG_ON(1);
}
}
/*
* Wait for I/O completion of a sync buffer and return the I/O error code.
*/
static int
apfs_buf_iowait(
struct apfs_buf *bp)
{
wait_for_completion(&bp->io_wait);
return bp->error;
}
int apfs_buf_submit(struct apfs_buf *bp, bool wait)
{
/* clear the internal error state to avoid spurious errors */
bp->error = 0;
bp->io_errors = 0;
atomic_set(&bp->io_remaining, 1);
apfs_buf_ioapply(bp);
if (wait)
wait_for_completion(&bp->io_wait);
return bp->error;
}