Branch data Line data Source code
1 : : /*
2 : : * mm/readahead.c - address_space-level file readahead.
3 : : *
4 : : * Copyright (C) 2002, Linus Torvalds
5 : : *
6 : : * 09Apr2002 Andrew Morton
7 : : * Initial version.
8 : : */
9 : :
10 : : #include <linux/kernel.h>
11 : : #include <linux/fs.h>
12 : : #include <linux/gfp.h>
13 : : #include <linux/mm.h>
14 : : #include <linux/export.h>
15 : : #include <linux/blkdev.h>
16 : : #include <linux/backing-dev.h>
17 : : #include <linux/task_io_accounting_ops.h>
18 : : #include <linux/pagevec.h>
19 : : #include <linux/pagemap.h>
20 : : #include <linux/syscalls.h>
21 : : #include <linux/file.h>
22 : :
23 : : /*
24 : : * Initialise a struct file's readahead state. Assumes that the caller has
25 : : * memset *ra to zero.
26 : : */
27 : : void
28 : 0 : file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
29 : : {
30 : 2863258 : ra->ra_pages = mapping->backing_dev_info->ra_pages;
31 : 2863258 : ra->prev_pos = -1;
32 : 2863258 : }
33 : : EXPORT_SYMBOL_GPL(file_ra_state_init);
34 : :
35 : : #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
36 : :
37 : : /*
38 : : * see if a page needs releasing upon read_cache_pages() failure
39 : : * - the caller of read_cache_pages() may have set PG_private or PG_fscache
40 : : * before calling, such as the NFS fs marking pages that are cached locally
41 : : * on disk, thus we need to give the fs a chance to clean up in the event of
42 : : * an error
43 : : */
44 : 0 : static void read_cache_pages_invalidate_page(struct address_space *mapping,
45 : 0 : struct page *page)
46 : : {
47 [ # # ]: 0 : if (page_has_private(page)) {
48 [ # # ]: 0 : if (!trylock_page(page))
49 : 0 : BUG();
50 : 0 : page->mapping = mapping;
51 : 0 : do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
52 : 0 : page->mapping = NULL;
53 : 0 : unlock_page(page);
54 : : }
55 : 0 : page_cache_release(page);
56 : 0 : }
57 : :
58 : : /*
59 : : * release a list of pages, invalidating them first if need be
60 : : */
61 : 0 : static void read_cache_pages_invalidate_pages(struct address_space *mapping,
62 : : struct list_head *pages)
63 : : {
64 : : struct page *victim;
65 : :
66 [ # # ]: 0 : while (!list_empty(pages)) {
67 : 0 : victim = list_to_page(pages);
68 : : list_del(&victim->lru);
69 : 0 : read_cache_pages_invalidate_page(mapping, victim);
70 : : }
71 : 0 : }
72 : :
73 : : /**
74 : : * read_cache_pages - populate an address space with some pages & start reads against them
75 : : * @mapping: the address_space
76 : : * @pages: The address of a list_head which contains the target pages. These
77 : : * pages have their ->index populated and are otherwise uninitialised.
78 : : * @filler: callback routine for filling a single page.
79 : : * @data: private data for the callback routine.
80 : : *
81 : : * Hides the details of the LRU cache etc from the filesystems.
82 : : */
83 : 0 : int read_cache_pages(struct address_space *mapping, struct list_head *pages,
84 : : int (*filler)(void *, struct page *), void *data)
85 : : {
86 : : struct page *page;
87 : : int ret = 0;
88 : :
89 [ # # ]: 0 : while (!list_empty(pages)) {
90 : 0 : page = list_to_page(pages);
91 : : list_del(&page->lru);
92 [ # # ]: 0 : if (add_to_page_cache_lru(page, mapping,
93 : : page->index, GFP_KERNEL)) {
94 : 0 : read_cache_pages_invalidate_page(mapping, page);
95 : 0 : continue;
96 : : }
97 : 0 : page_cache_release(page);
98 : :
99 : 0 : ret = filler(data, page);
100 [ # # ]: 0 : if (unlikely(ret)) {
101 : 0 : read_cache_pages_invalidate_pages(mapping, pages);
102 : 0 : break;
103 : : }
104 : : task_io_account_read(PAGE_CACHE_SIZE);
105 : : }
106 : 0 : return ret;
107 : : }
108 : :
109 : : EXPORT_SYMBOL(read_cache_pages);
110 : :
111 : 0 : static int read_pages(struct address_space *mapping, struct file *filp,
112 : : struct list_head *pages, unsigned nr_pages)
113 : : {
114 : : struct blk_plug plug;
115 : : unsigned page_idx;
116 : : int ret;
117 : :
118 : 121827 : blk_start_plug(&plug);
119 : :
120 [ + + ]: 121878 : if (mapping->a_ops->readpages) {
121 : 121855 : ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
122 : : /* Clean up the remaining pages */
123 : 121949 : put_pages_list(pages);
124 : 121962 : goto out;
125 : : }
126 : :
127 [ + + ]: 691 : for (page_idx = 0; page_idx < nr_pages; page_idx++) {
128 : 668 : struct page *page = list_to_page(pages);
129 : : list_del(&page->lru);
130 [ + - ]: 668 : if (!add_to_page_cache_lru(page, mapping,
131 : : page->index, GFP_KERNEL)) {
132 : 668 : mapping->a_ops->readpage(filp, page);
133 : : }
134 : 668 : page_cache_release(page);
135 : : }
136 : : ret = 0;
137 : :
138 : : out:
139 : 121985 : blk_finish_plug(&plug);
140 : :
141 : 121977 : return ret;
142 : : }
143 : :
144 : : /*
145 : : * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
146 : : * the pages first, then submits them all for I/O. This avoids the very bad
147 : : * behaviour which would occur if page allocations are causing VM writeback.
148 : : * We really don't want to intermingle reads and writes like that.
149 : : *
150 : : * Returns the number of pages requested, or the maximum amount of I/O allowed.
151 : : */
152 : : static int
153 : 0 : __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
154 : : pgoff_t offset, unsigned long nr_to_read,
155 : : unsigned long lookahead_size)
156 : : {
157 : 142990 : struct inode *inode = mapping->host;
158 : : struct page *page;
159 : : unsigned long end_index; /* The last page we want to read */
160 : 142990 : LIST_HEAD(page_pool);
161 : : int page_idx;
162 : : int ret = 0;
163 : : loff_t isize = i_size_read(inode);
164 : :
165 [ + + ]: 142874 : if (isize == 0)
166 : : goto out;
167 : :
168 : 141024 : end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
169 : :
170 : : /*
171 : : * Preallocate as many pages as we will need.
172 : : */
173 [ + + ]: 657159 : for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
174 : 563007 : pgoff_t page_offset = offset + page_idx;
175 : :
176 [ + + ]: 563007 : if (page_offset > end_index)
177 : : break;
178 : :
179 : : rcu_read_lock();
180 : 515957 : page = radix_tree_lookup(&mapping->page_tree, page_offset);
181 : : rcu_read_unlock();
182 [ + + ]: 659128 : if (page)
183 : 58709 : continue;
184 : :
185 : : page = page_cache_alloc_readahead(mapping);
186 [ + + ]: 457413 : if (!page)
187 : : break;
188 : 457343 : page->index = page_offset;
189 : 457343 : list_add(&page->lru, &page_pool);
190 [ + + ]: 457343 : if (page_idx == nr_to_read - lookahead_size)
191 : : SetPageReadahead(page);
192 : 457426 : ret++;
193 : : }
194 : :
195 : : /*
196 : : * Now start the IO. We ignore I/O errors - if the page is not
197 : : * uptodate then the caller will launch readpage again, and
198 : : * will then handle the error.
199 : : */
200 [ + + ]: 141396 : if (ret)
201 : 121877 : read_pages(mapping, filp, &page_pool, ret);
202 [ - + ]: 141165 : BUG_ON(!list_empty(&page_pool));
203 : : out:
204 : 143015 : return ret;
205 : : }
206 : :
207 : : /*
208 : : * Chunk the readahead into 2 megabyte units, so that we don't pin too much
209 : : * memory at once.
210 : : */
211 : 0 : int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
212 : : pgoff_t offset, unsigned long nr_to_read)
213 : : {
214 : : int ret = 0;
215 : :
216 [ + + ][ + ]: 8 : if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
217 : : return -EINVAL;
218 : :
219 : : nr_to_read = max_sane_readahead(nr_to_read);
220 [ + + ]: 956 : while (nr_to_read) {
221 : : int err;
222 : :
223 : : unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
224 : :
225 [ + + ]: 952 : if (this_chunk > nr_to_read)
226 : : this_chunk = nr_to_read;
227 : 952 : err = __do_page_cache_readahead(mapping, filp,
228 : : offset, this_chunk, 0);
229 [ + - ]: 952 : if (err < 0) {
230 : : ret = err;
231 : : break;
232 : : }
233 : 952 : ret += err;
234 : 952 : offset += this_chunk;
235 : 952 : nr_to_read -= this_chunk;
236 : : }
237 : 4 : return ret;
238 : : }
239 : :
240 : : /*
241 : : * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
242 : : * sensible upper limit.
243 : : */
244 : 0 : unsigned long max_sane_readahead(unsigned long nr)
245 : : {
246 : 138691 : return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
247 : : + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
248 : : }
249 : :
250 : : /*
251 : : * Submit IO for the read-ahead request in file_ra_state.
252 : : */
253 : 0 : unsigned long ra_submit(struct file_ra_state *ra,
254 : : struct address_space *mapping, struct file *filp)
255 : : {
256 : : int actual;
257 : :
258 : 57069 : actual = __do_page_cache_readahead(mapping, filp,
259 : 57069 : ra->start, ra->size, ra->async_size);
260 : :
261 : 57055 : return actual;
262 : : }
263 : :
264 : : /*
265 : : * Set the initial window size, round to next power of 2 and square
266 : : * for small size, x 4 for medium, and x 2 for large
267 : : * for 128k (32 page) max ra
268 : : * 1-8 page = 32k initial, > 8 page = 128k initial
269 : : */
270 : 0 : static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
271 : : {
272 [ - + ][ # # ]: 24120 : unsigned long newsize = roundup_pow_of_two(size);
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ # # ]
273 : :
274 [ + + ]: 24120 : if (newsize <= max / 32)
275 : 21198 : newsize = newsize * 4;
276 [ + + ]: 2922 : else if (newsize <= max / 4)
277 : 2600 : newsize = newsize * 2;
278 : : else
279 : : newsize = max;
280 : :
281 : 0 : return newsize;
282 : : }
283 : :
284 : : /*
285 : : * Get the previous window size, ramp it up, and
286 : : * return it as the new window size.
287 : : */
288 : : static unsigned long get_next_ra_size(struct file_ra_state *ra,
289 : : unsigned long max)
290 : : {
291 : : unsigned long cur = ra->size;
292 : : unsigned long newsize;
293 : :
294 [ - + ][ - + ]: 21402 : if (cur < max / 16)
[ - + ]
295 : 0 : newsize = 4 * cur;
296 : : else
297 : 21402 : newsize = 2 * cur;
298 : :
299 : 21402 : return min(newsize, max);
300 : : }
301 : :
302 : : /*
303 : : * On-demand readahead design.
304 : : *
305 : : * The fields in struct file_ra_state represent the most-recently-executed
306 : : * readahead attempt:
307 : : *
308 : : * |<----- async_size ---------|
309 : : * |------------------- size -------------------->|
310 : : * |==================#===========================|
311 : : * ^start ^page marked with PG_readahead
312 : : *
313 : : * To overlap application thinking time and disk I/O time, we do
314 : : * `readahead pipelining': Do not wait until the application consumed all
315 : : * readahead pages and stalled on the missing page at readahead_index;
316 : : * Instead, submit an asynchronous readahead I/O as soon as there are
317 : : * only async_size pages left in the readahead window. Normally async_size
318 : : * will be equal to size, for maximum pipelining.
319 : : *
320 : : * In interleaved sequential reads, concurrent streams on the same fd can
321 : : * be invalidating each other's readahead state. So we flag the new readahead
322 : : * page at (start+size-async_size) with PG_readahead, and use it as readahead
323 : : * indicator. The flag won't be set on already cached pages, to avoid the
324 : : * readahead-for-nothing fuss, saving pointless page cache lookups.
325 : : *
326 : : * prev_pos tracks the last visited byte in the _previous_ read request.
327 : : * It should be maintained by the caller, and will be used for detecting
328 : : * small random reads. Note that the readahead algorithm checks loosely
329 : : * for sequential patterns. Hence interleaved reads might be served as
330 : : * sequential ones.
331 : : *
332 : : * There is a special-case: if the first page which the application tries to
333 : : * read happens to be the first page of the file, it is assumed that a linear
334 : : * read is about to happen and the window is immediately set to the initial size
335 : : * based on I/O request size and the max_readahead.
336 : : *
337 : : * The code ramps up the readahead size aggressively at first, but slow down as
338 : : * it approaches max_readhead.
339 : : */
340 : :
341 : : /*
342 : : * Count contiguously cached pages from @offset-1 to @offset-@max,
343 : : * this count is a conservative estimation of
344 : : * - length of the sequential read sequence, or
345 : : * - thrashing threshold in memory tight systems
346 : : */
347 : : static pgoff_t count_history_pages(struct address_space *mapping,
348 : : struct file_ra_state *ra,
349 : : pgoff_t offset, unsigned long max)
350 : : {
351 : : pgoff_t head;
352 : :
353 : : rcu_read_lock();
354 : 92953 : head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
355 : : rcu_read_unlock();
356 : :
357 : 92995 : return offset - 1 - head;
358 : : }
359 : :
360 : : /*
361 : : * page cache context based read-ahead
362 : : */
363 : 0 : static int try_context_readahead(struct address_space *mapping,
364 : : struct file_ra_state *ra,
365 : : pgoff_t offset,
366 : : unsigned long req_size,
367 : : unsigned long max)
368 : : {
369 : : pgoff_t size;
370 : :
371 : : size = count_history_pages(mapping, ra, offset, max);
372 : :
373 : : /*
374 : : * not enough history pages:
375 : : * it could be a random read
376 : : */
377 [ + ]: 92995 : if (size <= req_size)
378 : : return 0;
379 : :
380 : : /*
381 : : * starts from beginning of file:
382 : : * it is a strong indication of long-run stream (or whole-file-read)
383 : : */
384 [ + + ]: 100682 : if (size >= offset)
385 : 251 : size *= 2;
386 : :
387 : 7872 : ra->start = offset;
388 : 7872 : ra->size = min(size + req_size, max);
389 : 7872 : ra->async_size = 1;
390 : :
391 : 7872 : return 1;
392 : : }
393 : :
394 : : /*
395 : : * A minimal readahead algorithm for trivial sequential/random reads.
396 : : */
397 : : static unsigned long
398 : 0 : ondemand_readahead(struct address_space *mapping,
399 : : struct file_ra_state *ra, struct file *filp,
400 : : bool hit_readahead_marker, pgoff_t offset,
401 : : unsigned long req_size)
402 : : {
403 : 138687 : unsigned long max = max_sane_readahead(ra->ra_pages);
404 : : pgoff_t prev_offset;
405 : :
406 : : /*
407 : : * start of file
408 : : */
409 [ + + ]: 138687 : if (!offset)
410 : : goto initial_readahead;
411 : :
412 : : /*
413 : : * It's the expected callback offset, assume sequential access.
414 : : * Ramp up sizes, and push forward the readahead window.
415 : : */
416 [ + + ][ + + ]: 126203 : if ((offset == (ra->start + ra->size - ra->async_size) ||
417 : : offset == (ra->start + ra->size))) {
418 : 18404 : ra->start += ra->size;
419 : 18404 : ra->size = get_next_ra_size(ra, max);
420 : 18404 : ra->async_size = ra->size;
421 : 18404 : goto readit;
422 : : }
423 : :
424 : : /*
425 : : * Hit a marked page without valid readahead state.
426 : : * E.g. interleaved reads.
427 : : * Query the pagecache for async_size, which normally equals to
428 : : * readahead size. Ramp it up and use it as the new readahead size.
429 : : */
430 [ + + ]: 107799 : if (hit_readahead_marker) {
431 : : pgoff_t start;
432 : :
433 : : rcu_read_lock();
434 : 3311 : start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
435 : : rcu_read_unlock();
436 : :
437 [ + - ][ + + ]: 3311 : if (!start || start - offset > max)
438 : : return 0;
439 : :
440 : 2575 : ra->start = start;
441 : 2575 : ra->size = start - offset; /* old async_size */
442 : 2575 : ra->size += req_size;
443 : 2575 : ra->size = get_next_ra_size(ra, max);
444 : 2575 : ra->async_size = ra->size;
445 : 2575 : goto readit;
446 : : }
447 : :
448 : : /*
449 : : * oversize read
450 : : */
451 [ + + ]: 104488 : if (req_size > max)
452 : : goto initial_readahead;
453 : :
454 : : /*
455 : : * sequential cache miss
456 : : * trivial case: (offset - prev_offset) == 1
457 : : * unaligned reads: (offset - prev_offset) == 0
458 : : */
459 : 104481 : prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
460 [ + + ]: 104481 : if (offset - prev_offset <= 1UL)
461 : : goto initial_readahead;
462 : :
463 : : /*
464 : : * Query the page cache and look for the traces(cached history pages)
465 : : * that a sequential stream would leave behind.
466 : : */
467 [ + + ]: 92820 : if (try_context_readahead(mapping, ra, offset, req_size, max))
468 : : goto readit;
469 : :
470 : : /*
471 : : * standalone, small random read
472 : : * Read as is, and do not pollute the readahead state.
473 : : */
474 : 84959 : return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
475 : :
476 : : initial_readahead:
477 : 0 : ra->start = offset;
478 : 24152 : ra->size = get_init_ra_size(req_size, max);
479 [ + + ]: 24120 : ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
480 : :
481 : : readit:
482 : : /*
483 : : * Will this read hit the readahead marker made by itself?
484 : : * If so, trigger the readahead marker hit now, and merge
485 : : * the resulted next readahead window into the current one.
486 : : */
487 [ + + ][ + + ]: 52985 : if (offset == ra->start && ra->size == ra->async_size) {
488 : 423 : ra->async_size = get_next_ra_size(ra, max);
489 : 423 : ra->size += ra->async_size;
490 : : }
491 : :
492 : 52971 : return ra_submit(ra, mapping, filp);
493 : : }
494 : :
495 : : /**
496 : : * page_cache_sync_readahead - generic file readahead
497 : : * @mapping: address_space which holds the pagecache and I/O vectors
498 : : * @ra: file_ra_state which holds the readahead state
499 : : * @filp: passed on to ->readpage() and ->readpages()
500 : : * @offset: start offset into @mapping, in pagecache page-sized units
501 : : * @req_size: hint: total size of the read which the caller is performing in
502 : : * pagecache pages
503 : : *
504 : : * page_cache_sync_readahead() should be called when a cache miss happened:
505 : : * it will submit the read. The readahead logic may decide to piggyback more
506 : : * pages onto the read request if access patterns suggest it will improve
507 : : * performance.
508 : : */
509 : 0 : void page_cache_sync_readahead(struct address_space *mapping,
510 : : struct file_ra_state *ra, struct file *filp,
511 : : pgoff_t offset, unsigned long req_size)
512 : : {
513 : : /* no read-ahead */
514 [ + ]: 117481 : if (!ra->ra_pages)
515 : : return;
516 : :
517 : : /* be dumb */
518 [ + ][ - + ]: 117593 : if (filp && (filp->f_mode & FMODE_RANDOM)) {
519 : 0 : force_page_cache_readahead(mapping, filp, offset, req_size);
520 : 0 : return;
521 : : }
522 : :
523 : : /* do read-ahead */
524 : 117593 : ondemand_readahead(mapping, ra, filp, false, offset, req_size);
525 : : }
526 : : EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
527 : :
528 : : /**
529 : : * page_cache_async_readahead - file readahead for marked pages
530 : : * @mapping: address_space which holds the pagecache and I/O vectors
531 : : * @ra: file_ra_state which holds the readahead state
532 : : * @filp: passed on to ->readpage() and ->readpages()
533 : : * @page: the page at @offset which has the PG_readahead flag set
534 : : * @offset: start offset into @mapping, in pagecache page-sized units
535 : : * @req_size: hint: total size of the read which the caller is performing in
536 : : * pagecache pages
537 : : *
538 : : * page_cache_async_readahead() should be called when a page is used which
539 : : * has the PG_readahead flag; this is a marker to suggest that the application
540 : : * has used up enough of the readahead window that we should start pulling in
541 : : * more pages.
542 : : */
543 : : void
544 : 0 : page_cache_async_readahead(struct address_space *mapping,
545 : : struct file_ra_state *ra, struct file *filp,
546 : : struct page *page, pgoff_t offset,
547 : : unsigned long req_size)
548 : : {
549 : : /* no read-ahead */
550 [ + - ]: 21147 : if (!ra->ra_pages)
551 : : return;
552 : :
553 : : /*
554 : : * Same bit is used for PG_readahead and PG_reclaim.
555 : : */
556 [ + + ]: 21147 : if (PageWriteback(page))
557 : : return;
558 : :
559 : : ClearPageReadahead(page);
560 : :
561 : : /*
562 : : * Defer asynchronous read-ahead on IO congestion.
563 : : */
564 [ + + ]: 21132 : if (bdi_read_congested(mapping->backing_dev_info))
565 : : return;
566 : :
567 : : /* do read-ahead */
568 : 21131 : ondemand_readahead(mapping, ra, filp, true, offset, req_size);
569 : : }
570 : : EXPORT_SYMBOL_GPL(page_cache_async_readahead);
571 : :
572 : : static ssize_t
573 : 0 : do_readahead(struct address_space *mapping, struct file *filp,
574 : : pgoff_t index, unsigned long nr)
575 : : {
576 [ + - ][ + - ]: 5 : if (!mapping || !mapping->a_ops)
577 : : return -EINVAL;
578 : :
579 : 5 : force_page_cache_readahead(mapping, filp, index, nr);
580 : 5 : return 0;
581 : : }
582 : :
583 : 0 : SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
584 : : {
585 : : ssize_t ret;
586 : : struct fd f;
587 : :
588 : : ret = -EBADF;
589 : 7 : f = fdget(fd);
590 [ + + ]: 7 : if (f.file) {
591 [ + + ]: 6 : if (f.file->f_mode & FMODE_READ) {
592 : 5 : struct address_space *mapping = f.file->f_mapping;
593 : 5 : pgoff_t start = offset >> PAGE_CACHE_SHIFT;
594 : 5 : pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
595 : 5 : unsigned long len = end - start + 1;
596 : 5 : ret = do_readahead(mapping, f.file, start, len);
597 : : }
598 : : fdput(f);
599 : : }
600 : : return ret;
601 : : }
|