blob: 2d9c59df60de03be87fc92a5482b7c3eb17a4463 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2#include <stdlib.h>
3#include <string.h>
4#include <malloc.h>
5#include <pthread.h>
6#include <unistd.h>
7#include <assert.h>
8
9#include <linux/gfp.h>
10#include <linux/poison.h>
11#include <linux/slab.h>
12#include <linux/radix-tree.h>
13#include <urcu/uatomic.h>
14
15int nr_allocated;
16int preempt_count;
17int kmalloc_verbose;
18int test_verbose;
19
20struct kmem_cache {
21 pthread_mutex_t lock;
Olivier Deprez157378f2022-04-04 15:47:50 +020022 unsigned int size;
23 unsigned int align;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024 int nr_objs;
25 void *objs;
26 void (*ctor)(void *);
27};
28
Olivier Deprez157378f2022-04-04 15:47:50 +020029void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030{
Olivier Deprez157378f2022-04-04 15:47:50 +020031 void *p;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032
Olivier Deprez157378f2022-04-04 15:47:50 +020033 if (!(gfp & __GFP_DIRECT_RECLAIM))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 return NULL;
35
36 pthread_mutex_lock(&cachep->lock);
37 if (cachep->nr_objs) {
Olivier Deprez157378f2022-04-04 15:47:50 +020038 struct radix_tree_node *node = cachep->objs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039 cachep->nr_objs--;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040 cachep->objs = node->parent;
41 pthread_mutex_unlock(&cachep->lock);
42 node->parent = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +020043 p = node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 } else {
45 pthread_mutex_unlock(&cachep->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +020046 if (cachep->align)
47 posix_memalign(&p, cachep->align, cachep->size);
48 else
49 p = malloc(cachep->size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000050 if (cachep->ctor)
Olivier Deprez157378f2022-04-04 15:47:50 +020051 cachep->ctor(p);
52 else if (gfp & __GFP_ZERO)
53 memset(p, 0, cachep->size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054 }
55
56 uatomic_inc(&nr_allocated);
57 if (kmalloc_verbose)
Olivier Deprez157378f2022-04-04 15:47:50 +020058 printf("Allocating %p from slab\n", p);
59 return p;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060}
61
62void kmem_cache_free(struct kmem_cache *cachep, void *objp)
63{
64 assert(objp);
65 uatomic_dec(&nr_allocated);
66 if (kmalloc_verbose)
67 printf("Freeing %p to slab\n", objp);
68 pthread_mutex_lock(&cachep->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +020069 if (cachep->nr_objs > 10 || cachep->align) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070 memset(objp, POISON_FREE, cachep->size);
71 free(objp);
72 } else {
73 struct radix_tree_node *node = objp;
74 cachep->nr_objs++;
75 node->parent = cachep->objs;
76 cachep->objs = node;
77 }
78 pthread_mutex_unlock(&cachep->lock);
79}
80
81void *kmalloc(size_t size, gfp_t gfp)
82{
83 void *ret;
84
85 if (!(gfp & __GFP_DIRECT_RECLAIM))
86 return NULL;
87
88 ret = malloc(size);
89 uatomic_inc(&nr_allocated);
90 if (kmalloc_verbose)
91 printf("Allocating %p from malloc\n", ret);
92 if (gfp & __GFP_ZERO)
93 memset(ret, 0, size);
94 return ret;
95}
96
97void kfree(void *p)
98{
99 if (!p)
100 return;
101 uatomic_dec(&nr_allocated);
102 if (kmalloc_verbose)
103 printf("Freeing %p to malloc\n", p);
104 free(p);
105}
106
107struct kmem_cache *
Olivier Deprez157378f2022-04-04 15:47:50 +0200108kmem_cache_create(const char *name, unsigned int size, unsigned int align,
109 unsigned int flags, void (*ctor)(void *))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000110{
111 struct kmem_cache *ret = malloc(sizeof(*ret));
112
113 pthread_mutex_init(&ret->lock, NULL);
114 ret->size = size;
Olivier Deprez157378f2022-04-04 15:47:50 +0200115 ret->align = align;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 ret->nr_objs = 0;
117 ret->objs = NULL;
118 ret->ctor = ctor;
119 return ret;
120}