blob: 2a430e713ce513242ce12da2def0bed5038e34bf [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * zsmalloc memory allocator
3 *
4 * Copyright (C) 2011 Nitin Gupta
5 * Copyright (C) 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the license that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 */
13
14#ifndef _ZS_MALLOC_H_
15#define _ZS_MALLOC_H_
16
17#include <linux/types.h>
18
19/*
20 * zsmalloc mapping modes
21 *
22 * NOTE: These only make a difference when a mapped object spans pages.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000023 */
24enum zs_mapmode {
25 ZS_MM_RW, /* normal read-write mapping */
26 ZS_MM_RO, /* read-only (no copy-out at unmap time) */
27 ZS_MM_WO /* write-only (no copy-in at map time) */
28 /*
29 * NOTE: ZS_MM_WO should only be used for initializing new
30 * (uninitialized) allocations. Partial writes to already
31 * initialized allocations should use ZS_MM_RW to preserve the
32 * existing data.
33 */
34};
35
36struct zs_pool_stats {
37 /* How many pages were migrated (freed) */
Olivier Deprez0e641232021-09-23 10:07:05 +020038 atomic_long_t pages_compacted;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000039};
40
41struct zs_pool;
42
43struct zs_pool *zs_create_pool(const char *name);
44void zs_destroy_pool(struct zs_pool *pool);
45
46unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
47void zs_free(struct zs_pool *pool, unsigned long obj);
48
49size_t zs_huge_class_size(struct zs_pool *pool);
50
51void *zs_map_object(struct zs_pool *pool, unsigned long handle,
52 enum zs_mapmode mm);
53void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
54
55unsigned long zs_get_total_pages(struct zs_pool *pool);
56unsigned long zs_compact(struct zs_pool *pool);
57
58void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
59#endif