Branch data Line data Source code
1 : : /*
2 : : * JFFS2 -- Journalling Flash File System, Version 2.
3 : : *
4 : : * Copyright © 2001-2007 Red Hat, Inc.
5 : : * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 : : *
7 : : * Created by David Woodhouse <dwmw2@infradead.org>
8 : : *
9 : : * For licensing information, see the file 'LICENCE' in this directory.
10 : : *
11 : : */
12 : :
13 : : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 : :
15 : : #include <linux/kernel.h>
16 : : #include <linux/sched.h>
17 : : #include <linux/slab.h>
18 : : #include <linux/vmalloc.h>
19 : : #include <linux/mtd/mtd.h>
20 : : #include "nodelist.h"
21 : :
22 : : static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
23 : : struct jffs2_inode_cache *, struct jffs2_full_dirent **);
24 : :
25 : : static inline struct jffs2_inode_cache *
26 : : first_inode_chain(int *i, struct jffs2_sb_info *c)
27 : : {
28 [ # # ][ # # ]: 0 : for (; *i < c->inocache_hashsize; (*i)++) {
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
29 [ # # ][ # # ]: 0 : if (c->inocache_list[*i])
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
30 : : return c->inocache_list[*i];
31 : : }
32 : : return NULL;
33 : : }
34 : :
35 : : static inline struct jffs2_inode_cache *
36 : : next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
37 : : {
38 : : /* More in this chain? */
39 [ # # ]: 0 : if (ic->next)
[ # # # # ]
[ # # ]
40 : : return ic->next;
41 : 0 : (*i)++;
42 : : return first_inode_chain(i, c);
43 : : }
44 : :
45 : : #define for_each_inode(i, c, ic) \
46 : : for (i = 0, ic = first_inode_chain(&i, (c)); \
47 : : ic; \
48 : : ic = next_inode(&i, ic, (c)))
49 : :
50 : :
51 : 0 : static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
52 : : struct jffs2_inode_cache *ic)
53 : : {
54 : : struct jffs2_full_dirent *fd;
55 : :
56 : : dbg_fsbuild("building directory inode #%u\n", ic->ino);
57 : :
58 : : /* For each child, increase nlink */
59 [ # # ]: 0 : for(fd = ic->scan_dents; fd; fd = fd->next) {
60 : : struct jffs2_inode_cache *child_ic;
61 [ # # ]: 0 : if (!fd->ino)
62 : 0 : continue;
63 : :
64 : : /* we can get high latency here with huge directories */
65 : :
66 : 0 : child_ic = jffs2_get_ino_cache(c, fd->ino);
67 [ # # ]: 0 : if (!child_ic) {
68 : : dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
69 : : fd->name, fd->ino, ic->ino);
70 : 0 : jffs2_mark_node_obsolete(c, fd->raw);
71 : 0 : continue;
72 : : }
73 : :
74 [ # # ]: 0 : if (fd->type == DT_DIR) {
75 [ # # ]: 0 : if (child_ic->pino_nlink) {
76 : 0 : JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
77 : : fd->name, fd->ino, ic->ino);
78 : : /* TODO: What do we do about it? */
79 : : } else {
80 : 0 : child_ic->pino_nlink = ic->ino;
81 : : }
82 : : } else
83 : 0 : child_ic->pino_nlink++;
84 : :
85 : : dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
86 : : /* Can't free scan_dents so far. We might need them in pass 2 */
87 : : }
88 : 0 : }
89 : :
90 : : /* Scan plan:
91 : : - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
92 : : - Scan directory tree from top down, setting nlink in inocaches
93 : : - Scan inocaches for inodes with nlink==0
94 : : */
95 : 0 : static int jffs2_build_filesystem(struct jffs2_sb_info *c)
96 : : {
97 : : int ret;
98 : : int i;
99 : 0 : struct jffs2_inode_cache *ic;
100 : : struct jffs2_full_dirent *fd;
101 : 0 : struct jffs2_full_dirent *dead_fds = NULL;
102 : :
103 : : dbg_fsbuild("build FS data structures\n");
104 : :
105 : : /* First, scan the medium and build all the inode caches with
106 : : lists of physical nodes */
107 : :
108 : 0 : c->flags |= JFFS2_SB_FLAG_SCANNING;
109 : 0 : ret = jffs2_scan_medium(c);
110 : 0 : c->flags &= ~JFFS2_SB_FLAG_SCANNING;
111 [ # # ]: 0 : if (ret)
112 : : goto exit;
113 : :
114 : : dbg_fsbuild("scanned flash completely\n");
115 : : jffs2_dbg_dump_block_lists_nolock(c);
116 : :
117 : : dbg_fsbuild("pass 1 starting\n");
118 : 0 : c->flags |= JFFS2_SB_FLAG_BUILDING;
119 : : /* Now scan the directory tree, increasing nlink according to every dirent found. */
120 [ # # ]: 0 : for_each_inode(i, c, ic) {
121 [ # # ]: 0 : if (ic->scan_dents) {
122 : 0 : jffs2_build_inode_pass1(c, ic);
123 : 0 : cond_resched();
124 : : }
125 : : }
126 : :
127 : : dbg_fsbuild("pass 1 complete\n");
128 : :
129 : : /* Next, scan for inodes with nlink == 0 and remove them. If
130 : : they were directories, then decrement the nlink of their
131 : : children too, and repeat the scan. As that's going to be
132 : : a fairly uncommon occurrence, it's not so evil to do it this
133 : : way. Recursion bad. */
134 : : dbg_fsbuild("pass 2 starting\n");
135 : :
136 [ # # ]: 0 : for_each_inode(i, c, ic) {
137 [ # # ]: 0 : if (ic->pino_nlink)
138 : 0 : continue;
139 : :
140 : 0 : jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
141 : 0 : cond_resched();
142 : : }
143 : :
144 : : dbg_fsbuild("pass 2a starting\n");
145 : :
146 [ # # ]: 0 : while (dead_fds) {
147 : : fd = dead_fds;
148 : 0 : dead_fds = fd->next;
149 : :
150 : 0 : ic = jffs2_get_ino_cache(c, fd->ino);
151 : :
152 [ # # ]: 0 : if (ic)
153 : 0 : jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
154 : 0 : jffs2_free_full_dirent(fd);
155 : : }
156 : :
157 : : dbg_fsbuild("pass 2a complete\n");
158 : : dbg_fsbuild("freeing temporary data structures\n");
159 : :
160 : : /* Finally, we can scan again and free the dirent structs */
161 [ # # ]: 0 : for_each_inode(i, c, ic) {
162 [ # # ]: 0 : while(ic->scan_dents) {
163 : : fd = ic->scan_dents;
164 : 0 : ic->scan_dents = fd->next;
165 : 0 : jffs2_free_full_dirent(fd);
166 : : }
167 : 0 : ic->scan_dents = NULL;
168 : 0 : cond_resched();
169 : : }
170 : 0 : jffs2_build_xattr_subsystem(c);
171 : 0 : c->flags &= ~JFFS2_SB_FLAG_BUILDING;
172 : :
173 : : dbg_fsbuild("FS build complete\n");
174 : :
175 : : /* Rotate the lists by some number to ensure wear levelling */
176 : 0 : jffs2_rotate_lists(c);
177 : :
178 : : ret = 0;
179 : :
180 : : exit:
181 [ # # ]: 0 : if (ret) {
182 [ # # ]: 0 : for_each_inode(i, c, ic) {
183 [ # # ]: 0 : while(ic->scan_dents) {
184 : : fd = ic->scan_dents;
185 : 0 : ic->scan_dents = fd->next;
186 : 0 : jffs2_free_full_dirent(fd);
187 : : }
188 : : }
189 : 0 : jffs2_clear_xattr_subsystem(c);
190 : : }
191 : :
192 : 0 : return ret;
193 : : }
194 : :
195 : 0 : static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
196 : : struct jffs2_inode_cache *ic,
197 : : struct jffs2_full_dirent **dead_fds)
198 : : {
199 : : struct jffs2_raw_node_ref *raw;
200 : : struct jffs2_full_dirent *fd;
201 : :
202 : : dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
203 : :
204 : 0 : raw = ic->nodes;
205 [ # # ]: 0 : while (raw != (void *)ic) {
206 : 0 : struct jffs2_raw_node_ref *next = raw->next_in_ino;
207 : : dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
208 : 0 : jffs2_mark_node_obsolete(c, raw);
209 : : raw = next;
210 : : }
211 : :
212 [ # # ]: 0 : if (ic->scan_dents) {
213 : : int whinged = 0;
214 : : dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
215 : :
216 [ # # ]: 0 : while(ic->scan_dents) {
217 : : struct jffs2_inode_cache *child_ic;
218 : :
219 : : fd = ic->scan_dents;
220 : 0 : ic->scan_dents = fd->next;
221 : :
222 [ # # ]: 0 : if (!fd->ino) {
223 : : /* It's a deletion dirent. Ignore it */
224 : : dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
225 : 0 : jffs2_free_full_dirent(fd);
226 : 0 : continue;
227 : : }
228 : : if (!whinged)
229 : : whinged = 1;
230 : :
231 : : dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
232 : :
233 : 0 : child_ic = jffs2_get_ino_cache(c, fd->ino);
234 [ # # ]: 0 : if (!child_ic) {
235 : : dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
236 : : fd->name, fd->ino);
237 : 0 : jffs2_free_full_dirent(fd);
238 : 0 : continue;
239 : : }
240 : :
241 : : /* Reduce nlink of the child. If it's now zero, stick it on the
242 : : dead_fds list to be cleaned up later. Else just free the fd */
243 : :
244 [ # # ]: 0 : if (fd->type == DT_DIR)
245 : 0 : child_ic->pino_nlink = 0;
246 : : else
247 : 0 : child_ic->pino_nlink--;
248 : :
249 [ # # ]: 0 : if (!child_ic->pino_nlink) {
250 : : dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
251 : : fd->ino, fd->name);
252 : 0 : fd->next = *dead_fds;
253 : 0 : *dead_fds = fd;
254 : : } else {
255 : : dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
256 : : fd->ino, fd->name, child_ic->pino_nlink);
257 : 0 : jffs2_free_full_dirent(fd);
258 : : }
259 : : }
260 : : }
261 : :
262 : : /*
263 : : We don't delete the inocache from the hash list and free it yet.
264 : : The erase code will do that, when all the nodes are completely gone.
265 : : */
266 : 0 : }
267 : :
268 : 0 : static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
269 : : {
270 : : uint32_t size;
271 : :
272 : : /* Deletion should almost _always_ be allowed. We're fairly
273 : : buggered once we stop allowing people to delete stuff
274 : : because there's not enough free space... */
275 : 0 : c->resv_blocks_deletion = 2;
276 : :
277 : : /* Be conservative about how much space we need before we allow writes.
278 : : On top of that which is required for deletia, require an extra 2%
279 : : of the medium to be available, for overhead caused by nodes being
280 : : split across blocks, etc. */
281 : :
282 : 0 : size = c->flash_size / 50; /* 2% of flash size */
283 : 0 : size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
284 : 0 : size += c->sector_size - 1; /* ... and round up */
285 : :
286 : 0 : c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
287 : :
288 : : /* When do we let the GC thread run in the background */
289 : :
290 : 0 : c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
291 : :
292 : : /* When do we allow garbage collection to merge nodes to make
293 : : long-term progress at the expense of short-term space exhaustion? */
294 : 0 : c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
295 : :
296 : : /* When do we allow garbage collection to eat from bad blocks rather
297 : : than actually making progress? */
298 : 0 : c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
299 : :
300 : : /* What number of 'very dirty' eraseblocks do we allow before we
301 : : trigger the GC thread even if we don't _need_ the space. When we
302 : : can't mark nodes obsolete on the medium, the old dirty nodes cause
303 : : performance problems because we have to inspect and discard them. */
304 : 0 : c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
305 : : if (jffs2_can_mark_obsolete(c))
306 : : c->vdirty_blocks_gctrigger *= 10;
307 : :
308 : : /* If there's less than this amount of dirty space, don't bother
309 : : trying to GC to make more space. It'll be a fruitless task */
310 : 0 : c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
311 : :
312 : : dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
313 : : c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
314 : : dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
315 : : c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
316 : : dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
317 : : c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
318 : : dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
319 : : c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
320 : : dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
321 : : c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
322 : : dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
323 : : c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
324 : : dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
325 : : c->nospc_dirty_size);
326 : : dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
327 : : c->vdirty_blocks_gctrigger);
328 : 0 : }
329 : :
330 : 0 : int jffs2_do_mount_fs(struct jffs2_sb_info *c)
331 : : {
332 : : int ret;
333 : : int i;
334 : : int size;
335 : :
336 : 0 : c->free_size = c->flash_size;
337 : 0 : c->nr_blocks = c->flash_size / c->sector_size;
338 : 0 : size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
339 : : #ifndef __ECOS
340 [ # # ]: 0 : if (jffs2_blocks_use_vmalloc(c))
341 : 0 : c->blocks = vzalloc(size);
342 : : else
343 : : #endif
344 : 0 : c->blocks = kzalloc(size, GFP_KERNEL);
345 [ # # ]: 0 : if (!c->blocks)
346 : : return -ENOMEM;
347 : :
348 [ # # ]: 0 : for (i=0; i<c->nr_blocks; i++) {
349 : 0 : INIT_LIST_HEAD(&c->blocks[i].list);
350 : 0 : c->blocks[i].offset = i * c->sector_size;
351 : 0 : c->blocks[i].free_size = c->sector_size;
352 : : }
353 : :
354 : 0 : INIT_LIST_HEAD(&c->clean_list);
355 : 0 : INIT_LIST_HEAD(&c->very_dirty_list);
356 : 0 : INIT_LIST_HEAD(&c->dirty_list);
357 : 0 : INIT_LIST_HEAD(&c->erasable_list);
358 : 0 : INIT_LIST_HEAD(&c->erasing_list);
359 : 0 : INIT_LIST_HEAD(&c->erase_checking_list);
360 : 0 : INIT_LIST_HEAD(&c->erase_pending_list);
361 : 0 : INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
362 : 0 : INIT_LIST_HEAD(&c->erase_complete_list);
363 : 0 : INIT_LIST_HEAD(&c->free_list);
364 : 0 : INIT_LIST_HEAD(&c->bad_list);
365 : 0 : INIT_LIST_HEAD(&c->bad_used_list);
366 : 0 : c->highest_ino = 1;
367 : 0 : c->summary = NULL;
368 : :
369 : 0 : ret = jffs2_sum_init(c);
370 [ # # ]: 0 : if (ret)
371 : : goto out_free;
372 : :
373 [ # # ]: 0 : if (jffs2_build_filesystem(c)) {
374 : : dbg_fsbuild("build_fs failed\n");
375 : 0 : jffs2_free_ino_caches(c);
376 : 0 : jffs2_free_raw_node_refs(c);
377 : : ret = -EIO;
378 : 0 : goto out_free;
379 : : }
380 : :
381 : 0 : jffs2_calc_trigger_levels(c);
382 : :
383 : 0 : return 0;
384 : :
385 : : out_free:
386 : : #ifndef __ECOS
387 [ # # ]: 0 : if (jffs2_blocks_use_vmalloc(c))
388 : 0 : vfree(c->blocks);
389 : : else
390 : : #endif
391 : 0 : kfree(c->blocks);
392 : :
393 : 0 : return ret;
394 : : }
|