File: /Users/paulross/dev/linux/linux-3.13/include/linux/list_lru.h

Green shading in the line number column means the source is part of the translation unit, red means it is conditionally excluded. Highlighted line numbers link to the translation unit page. Highlighted macros link to the macro page.

       1: /*
       2:  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
       3:  * Authors: David Chinner and Glauber Costa
       4:  *
       5:  * Generic LRU infrastructure
       6:  */
       7: #ifndef _LRU_LIST_H
       8: #define _LRU_LIST_H
       9: 
      10: #include <linux/list.h>
      11: #include <linux/nodemask.h>
      12: 
      13: /* list_lru_walk_cb has to always return one of those */
      14: enum lru_status {
      15:     LRU_REMOVED,        /* item removed from list */
      16:     LRU_ROTATE,        /* item referenced, give another pass */
      17:     LRU_SKIP,        /* item cannot be locked, skip */
      18:     LRU_RETRY,        /* item not freeable. May drop the lock
      19:                    internally, but has to return locked. */
      20: };
      21: 
      22: struct list_lru_node {
      23:     spinlock_t        lock;
      24:     struct list_head    list;
      25:     /* kept as signed so we can catch imbalance bugs */
      26:     long            nr_items;
      27: } ____cacheline_aligned_in_smp;
      28: 
      29: struct list_lru {
      30:     struct list_lru_node    *node;
      31:     nodemask_t        active_nodes;
      32: };
      33: 
      34: void list_lru_destroy(struct list_lru *lru);
      35: int list_lru_init(struct list_lru *lru);
      36: 
      37: /**
      38:  * list_lru_add: add an element to the lru list's tail
      39:  * @list_lru: the lru pointer
      40:  * @item: the item to be added.
      41:  *
      42:  * If the element is already part of a list, this function returns doing
      43:  * nothing. Therefore the caller does not need to keep state about whether or
      44:  * not the element already belongs in the list and is allowed to lazy update
      45:  * it. Note however that this is valid for *a* list, not *this* list. If
      46:  * the caller organize itself in a way that elements can be in more than
      47:  * one type of list, it is up to the caller to fully remove the item from
      48:  * the previous list (with list_lru_del() for instance) before moving it
      49:  * to @list_lru
      50:  *
      51:  * Return value: true if the list was updated, false otherwise
      52:  */
      53: bool list_lru_add(struct list_lru *lru, struct list_head *item);
      54: 
      55: /**
      56:  * list_lru_del: delete an element to the lru list
      57:  * @list_lru: the lru pointer
      58:  * @item: the item to be deleted.
      59:  *
      60:  * This function works analogously as list_lru_add in terms of list
      61:  * manipulation. The comments about an element already pertaining to
      62:  * a list are also valid for list_lru_del.
      63:  *
      64:  * Return value: true if the list was updated, false otherwise
      65:  */
      66: bool list_lru_del(struct list_lru *lru, struct list_head *item);
      67: 
      68: /**
      69:  * list_lru_count_node: return the number of objects currently held by @lru
      70:  * @lru: the lru pointer.
      71:  * @nid: the node id to count from.
      72:  *
      73:  * Always return a non-negative number, 0 for empty lists. There is no
      74:  * guarantee that the list is not updated while the count is being computed.
      75:  * Callers that want such a guarantee need to provide an outer lock.
      76:  */
      77: unsigned long list_lru_count_node(struct list_lru *lru, int nid);
      78: static inline unsigned long list_lru_count(struct list_lru *lru)
      79: {
      80:     long count = 0;
      81:     int nid;
      82: 
      83:     for_each_node_mask(nid, lru->active_nodes)
      84:         count += list_lru_count_node(lru, nid);
      85: 
      86:     return count;
      87: }
      88: 
      89: typedef enum lru_status
      90: (*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
      91: /**
      92:  * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
      93:  * @lru: the lru pointer.
      94:  * @nid: the node id to scan from.
      95:  * @isolate: callback function that is resposible for deciding what to do with
      96:  *  the item currently being scanned
      97:  * @cb_arg: opaque type that will be passed to @isolate
      98:  * @nr_to_walk: how many items to scan.
      99:  *
     100:  * This function will scan all elements in a particular list_lru, calling the
     101:  * @isolate callback for each of those items, along with the current list
     102:  * spinlock and a caller-provided opaque. The @isolate callback can choose to
     103:  * drop the lock internally, but *must* return with the lock held. The callback
     104:  * will return an enum lru_status telling the list_lru infrastructure what to
     105:  * do with the object being scanned.
     106:  *
     107:  * Please note that nr_to_walk does not mean how many objects will be freed,
     108:  * just how many objects will be scanned.
     109:  *
     110:  * Return value: the number of objects effectively removed from the LRU.
     111:  */
     112: unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
     113:                  list_lru_walk_cb isolate, void *cb_arg,
     114:                  unsigned long *nr_to_walk);
     115: 
     116: static inline unsigned long
     117: list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
     118:           void *cb_arg, unsigned long nr_to_walk)
     119: {
     120:     long isolated = 0;
     121:     int nid;
     122: 
     123:     for_each_node_mask(nid, lru->active_nodes) {
     124:         isolated += list_lru_walk_node(lru, nid, isolate,
     125:                            cb_arg, &nr_to_walk);
     126:         if (nr_to_walk <= 0)
     127:             break;
     128:     }
     129:     return isolated;
     130: }
     131: #endif /* _LRU_LIST_H */
     132: