sgpool-128             3      4   3072    2    2 : tunables   24   12    8 : slabdata      2      2      0
sgpool-64              2      5   1536    5    2 : tunables   24   12    8 : slabdata      1      1      0

slab是如何分配其object的呢。创建一个slab时,每个slab占用几个page,每个slab中有几个object?这样的问题
不知道大家关注过没有。上面是我利用命令cat /proc/slabinfo得到的结果中的其中两行。从上面的结果来看,我们
知道这两个slab的名字叫做sgpool-128 sgpool-64,它们相关的信息如下
slab名字     object大小    有多少个object   占据几个page
sgpool-128    3072             2            2
sgpool-64        1536          5            2

下面的函数逐步增加page的order数目,遇到两个条件满足就ok

1、
        if (gfporder >= slab_break_gfp_order)
            break;
如果oreder大于0或者1,就推出,内存比较大的时候是1,具体是超过32M
        
2、
        
        /*
         * Acceptable internal fragmentation?
         */
        if (left_over * 8            break;
如果浪费的空间没有超过总空间的1/8也是算满足了要求
        
上面我们两个例子,都是当使用一个page是还没有满足剩余浪费的内存是总的内存的1/8要小,所以
果断选择了两个page的情况


  1. /**
  2.  * calculate_slab_order - calculate size (page order) of slabs
  3.  * @cachep: pointer to the cache that is being created
  4.  * @size: size of objects to be created in this cache.
  5.  * @align: required alignment for the objects.
  6.  * @flags: slab allocation flags
  7.  *
  8.  * Also calculates the number of objects per slab.
  9.  *
  10.  * This could be made much more intelligent. For now, try to avoid using
  11.  * high order pages for slabs. When the gfp() functions are more friendly
  12.  * towards high-order requests, this should be changed.
  13.  */
  14. static size_t calculate_slab_order(struct kmem_cache *cachep,
  15.             size_t size, size_t align, unsigned long flags)
  16. {
  17.     unsigned long offslab_limit;
  18.     size_t left_over = 0;
  19.     int gfporder;
  20.     
  21.     /*逐步增加oreder来估计*/
  22.     for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
  23.         unsigned int num;
  24.         size_t remainder;

  25.         cache_estimate(gfporder, size, align, flags, &remainder, &num);
  26.         if (!num)
  27.             continue;

  28.         if (flags & CFLGS_OFF_SLAB) {
  29.             /*
  30.              * Max number of objs-per-slab for caches which
  31.              * use off-slab slabs. Needed to avoid a possible
  32.              * looping condition in cache_grow().
  33.              */
  34.             offslab_limit = size - sizeof(struct slab);
  35.             offslab_limit /= sizeof(kmem_bufctl_t);

  36.              if (num > offslab_limit)
  37.                 break;
  38.         }

  39.         /* Found something acceptable - save it away */
  40.         cachep->num = num;
  41.         cachep->gfporder = gfporder;
  42.         left_over = remainder;

  43.         /*
  44.          * A VFS-reclaimable slab tends to have most allocations
  45.          * as GFP_NOFS and we really don't want to have to be allocating
  46.          * higher-order pages when we are unable to shrink dcache.
  47.          */
  48.         if (flags & SLAB_RECLAIM_ACCOUNT)
  49.             break;

  50.         /*
  51.          * Large number of objects is good, but very large slabs are
  52.          * currently bad for the gfp()s.
  53.          */
  54.         if (gfporder >= slab_break_gfp_order)
  55.             break;

  56.         /*
  57.          * Acceptable internal fragmentation?
  58.          */
  59.         if (left_over * 8 <= (PAGE_SIZE << gfporder))
  60.             break;
  61.     }
  62.     return left_over;
  63. }


  1. /*
  2.  * Calculate the number of objects and left-over bytes for a given buffer size.
  3.  */
  4. static void cache_estimate(unsigned long gfporder, size_t buffer_size,
  5.                size_t align, int flags, size_t *left_over,
  6.                unsigned int *num)
  7. {
  8.     int nr_objs;
  9.     size_t mgmt_size;
  10.     size_t slab_size = PAGE_SIZE << gfporder;

  11.     /*
  12.      * The slab management structure can be either off the slab or
  13.      * on it. For the latter case, the memory allocated for a
  14.      * slab is used for:
  15.      *
  16.      * - The struct slab
  17.      * - One kmem_bufctl_t for each object
  18.      * - Padding to respect alignment of @align
  19.      * - @buffer_size bytes for each object
  20.      *
  21.      * If the slab management structure is off the slab, then the
  22.      * alignment will already be calculated into the size. Because
  23.      * the slabs are all pages aligned, the objects will be at the
  24.      * correct alignment when allocated.
  25.      */
  26.     if (flags & CFLGS_OFF_SLAB) {
  27.         mgmt_size = 0;
  28.         nr_objs = slab_size / buffer_size;

  29.         if (nr_objs > SLAB_LIMIT)
  30.             nr_objs = SLAB_LIMIT;
  31.     } else {
  32.         /*
  33.          * Ignore padding for the initial guess. The padding
  34.          * is at most @align-1 bytes, and @buffer_size is at
  35.          * least @align. In the worst case, this result will
  36.          * be one greater than the number of objects that fit
  37.          * into the memory allocation when taking the padding
  38.          * into account.
  39.          */
  40.         nr_objs = (slab_size - sizeof(struct slab)) /
  41.               (buffer_size + sizeof(kmem_bufctl_t));

  42.         /*
  43.          * This calculated number will be either the right
  44.          * amount, or one greater than what we want.
  45.          */
  46.         if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
  47.                > slab_size)
  48.             nr_objs--;

  49.         if (nr_objs > SLAB_LIMIT)
  50.             nr_objs = SLAB_LIMIT;

  51.         mgmt_size = slab_mgmt_size(nr_objs, align);
  52.     }
  53.     *num = nr_objs;
  54.     *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
  55. }


01-04 01:45