Skip to content

added lsns plugin for linux #33

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions Pasquale95/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
## General info

Author: Pasquale Convertini

See https://github.com/Pasquale95/ for updates and license information.

## Project list

- [lsns](https://github.com/Pasquale95/lsns)
394 changes: 394 additions & 0 deletions Pasquale95/lsns.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,394 @@
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#

"""
@author: Pasquale Convertini
@license: GNU General Public License 2.0
@contact: pasqualeconvertini95@gmail.com
"""

import struct

import volatility.obj as obj
import volatility.utils as utils
import volatility.poolscan as poolscan
import volatility.debug as debug

import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as pslist


######################################################
## UTILITY ##
######################################################
class nsproxy_struct(object):
''' Store info contained in each nsproxy
and metadata about the owner process '''

def __init__(self, pid, ppid, nmps, command, arguments):
self.pid = pid
self.ppid = ppid
self.uts_ns = nmps[0]
self.ipc_ns = nmps[1]
self.mnt_ns = nmps[2]
self.pid_ns = nmps[3]
self.net_ns = nmps[4]
self.cgroup_ns = nmps[5]
self.user_ns = nmps[6]
self.command = command
self.arguments = arguments


######################################################
## UTILITY ##
######################################################
class ns_info(object):
''' Store summary info about a namespace
plus the list of owning processes '''

def __init__(self, addr, t, pid, inum=obj.NoneObject()):
self.addr = addr
self.t = t
self.pids = []
self.pids.append(pid)
self.inum = inum

def __str__(self):
return "0x{0:x} {1:6s} {2:6s} {3:5s}".format(self.addr, self.t, str(len(self.pids)), str(self.get_ppid()))

def add_pid(self, pid):
self.pids.append(pid)

def get_ppid(self):
return sorted(self.pids)[0]

def get_procs(self):
return sorted(self.pids)


######################################################
## PLUGIN CLASS ##
######################################################
class lsns(pslist.linux_pslist):
""" Scan memory for Linux namespaces """

NUM_NS = 6 #7 namespaces from kernel v4.4
POINTER_DIM = 8 #bytes
namespaces = {}
ns_structs = {}


################################################
## INIT CLASS and CONFIG OPTIONS ##
def __init__(self, config, *args, **kwargs):
linux_common.AbstractLinuxCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('TABLE', short_option = 't', default = None, help = 'print in tabular format', action = 'store_true')
self._config.add_option('INODE', short_option = 'i', default = None, help = 'print inode number instead of offset', action = 'store_true')
self._config.add_option('PID', short_option = 'p', default = None, help = 'Operate on these Process IDs (comma-separated)', action = 'store', type="str")
self._config.add_option('NS', short_option = 'n', default = None, help = 'Operate on these NameSpaces (comma-separated)', action = 'store', type="str")


###############################################
## STARTING ENGINE OF THE CLASS ##
def calculate(self):
linux_common.set_plugin_members(self)

#check if architecture is 32: if yes change pointer dim to 4 bytes
if self.addr_space.profile.get_symbol("init_cgroup_ns"):
self.NUM_NS = 7

# To enable if you want to give option PID to specify a pid list via cmd look at pslist.py
for task in self.allprocs():
yield task


################################################
## PRINT dim BYTES FROM addr ##
def print_data(self, addr, dim):
data = self.addr_space.read(addr, dim)
for offset, hexchars, chars in utils.Hexdump(data, self.POINTER_DIM):
print "{0:#010x} {1:<48} {2}".format(addr + offset, hexchars, ''.join(chars))


################################################
## RETURN VIRTUAL OR PHYSICAL ADDR ##
def get_address(self, el):
addr = None
if hasattr(self, "wants_physical") and el.obj_vm.base:
nsp = self.addr_space.vtop(el.obj_offset)
if addr == None:
addr = el.obj_offset
return addr


################################################
## FILL GLOBAL DICTIONARIES ##
def _get_task_values(self, task):

if task.parent.is_valid():
ppid = task.parent.pid
else:
ppid = "-"

## CHECK FOR NSPROXY EXISTENCE ##
if task.nsproxy == None:
return False
else:
# GET POINTER TO NSPROXY
nsproxy_obj = obj.Object("nsproxy", vm=self.addr_space, offset=int(task.nsproxy))

## UTS_NS ##
uts_ns = int(nsproxy_obj.uts_ns)
uts_inum = self.get_inode("uts_namespace", uts_ns)
if uts_ns in self.namespaces:
self.namespaces[uts_ns].add_pid(task.pid)
else:
self.namespaces[uts_ns] = ns_info(uts_ns, "uts", task.pid, uts_inum)

## IPC_NS ##
ipc_ns = int(nsproxy_obj.ipc_ns)
ipc_inum = self.get_inode("ipc_namespace", ipc_ns)
if ipc_ns in self.namespaces:
self.namespaces[ipc_ns].add_pid(task.pid)
else:
self.namespaces[ipc_ns] = ns_info(ipc_ns, "ipc", task.pid, ipc_inum)

## MNT_NS ##
mnt_ns = int(nsproxy_obj.mnt_ns)
mnt_inum = self.get_inode("mnt_namespace", mnt_ns)
if mnt_ns in self.namespaces:
self.namespaces[mnt_ns].add_pid(task.pid)
else:
self.namespaces[mnt_ns] = ns_info(mnt_ns, "mnt", task.pid, mnt_inum)

## PID_NS_FOR_CHILDREN ##
pid_ns = int(nsproxy_obj.pid_ns_for_children)
pid_inum = self.get_inode("pid_namespace", pid_ns)
if pid_ns in self.namespaces:
self.namespaces[pid_ns].add_pid(task.pid)
else:
self.namespaces[pid_ns] = ns_info(pid_ns, "pid", task.pid, pid_inum)

## NET_NS ##
net_ns = int(nsproxy_obj.net_ns)
net_inum = self.get_inode("net", net_ns)
if net_ns in self.namespaces:
self.namespaces[net_ns].add_pid(task.pid)
else:
self.namespaces[net_ns] = ns_info(net_ns, "net", task.pid, net_inum)

## CGROUP_NS -> implemented only from kernel v4.4 ##
if self.NUM_NS == 7:
cgroup_ns = int(nsproxy_obj.cgroup_ns)
cgroup_inum = self.get_inode("cgroup_namespace", cgroup_ns)
if cgroup_ns in self.namespaces:
self.namespaces[cgroup_ns].add_pid(task.pid)
else:
self.namespaces[cgroup_ns] = ns_info(cgroup_ns, "cgroup", task.pid, cgroup_inum)
else:
cgroup_ns = obj.NoneObject()

## CHECK FOR CRED STRUCT EXISTENCE ##
if task.cred == None:
user_ns = obj.NoneObject()
else:
# GET POINTER TO CERT
nsproxy_obj = obj.Object("cred", vm=self.addr_space, offset=int(task.cred))

## USER_NS ##
user_ns = int(nsproxy_obj.user_ns)
user_inum = self.get_inode("user_namespace", user_ns)
if user_ns in self.namespaces:
self.namespaces[user_ns].add_pid(task.pid)
else:
self.namespaces[user_ns] = ns_info(user_ns, "user", task.pid, user_inum)

self.ns_structs[task.pid] = nsproxy_struct(task.pid, ppid, [uts_ns, ipc_ns, mnt_ns, pid_ns, net_ns, cgroup_ns, user_ns], task.comm, task.get_commandline())
return True


################################################
## READ INODE VALUE ##
def get_inode(self, ns_name, offs):
if self.addr_space.profile.has_type(ns_name):
ns_struct = obj.Object(ns_name, vm=self.addr_space, offset=offs)
if hasattr(ns_struct, 'ns'):
ns_n = self.get_address(ns_struct.ns)
ns_obj = obj.Object("ns_common", vm=self.addr_space, offset=ns_n)
return int(ns_obj.inum)
return obj.NoneObject()


################################################
## PRINT CHUNK OF DATA ##
def print_data(self, addr, dim):
data = self.addr_space.read(addr, dim)
for offset, hexchars, chars in utils.Hexdump(data, self.POINTER_DIM):
print hexchars
print ""


################################################
## READ POINTER VALUE ##
def read_pointer(self, addr, dim):
#print addr
data = self.addr_space.read(addr, dim)
for offset, hexchars, chars in utils.Hexdump(data, dim):
pointer = "0x"+"".join(hexchars.split()[::-1])
return int(pointer, 16)


################################################
## Print for each process all namespaces ##
## table format ##
def table_format(self, outfd, data):

if self._config.INODE:
text = "10"
else:
text = "[addrpad]"

self.table_header(outfd, [("PROCESS", "15"),
("PID", "5"),
("uts_ns", text),
("ipc_ns", text),
("mnt_ns", text),
("pid_ns", text),
("net_ns", text),
("cgroup_ns", text),
("user_ns", text)])

for task in data:
self._get_task_values(task)

for key in sorted(self.ns_structs.keys()):
c_p = self.ns_structs[key]
if self._config.INODE:
self.table_row(outfd, c_p.command, c_p.pid, self.namespaces[c_p.uts_ns].inum, self.namespaces[c_p.ipc_ns].inum, self.namespaces[c_p.mnt_ns].inum, self.namespaces[c_p.pid_ns].inum, self.namespaces[c_p.net_ns].inum, self.namespaces[c_p.cgroup_ns].inum, self.namespaces[c_p.user_ns].inum)
else:
self.table_row(outfd, c_p.command, c_p.pid, c_p.uts_ns, c_p.ipc_ns, c_p.mnt_ns, c_p.pid_ns, c_p.net_ns, c_p.cgroup_ns, c_p.user_ns)


################################################
## lsns <namespace> PRINT FORMAT ##
def namespace_format(self, outfd, data):
for task in data:
self._get_task_values(task)

nslist = self._config.NS
if nslist:
nslist = [int(s, 16) for s in self._config.NS.split(',')]

## For each namespace
for ns in nslist:
## List processes in tree format
if ns in self.namespaces:
outfd.write("\nNAMESPACE: {0:6s} (TYPE: {1})".format(hex(ns), self.namespaces[ns].t))
if self.namespaces[ns].inum:
outfd.write(" (INODE: {0})".format(int(self.namespaces[ns].inum)))
## Write header
outfd.write("\n{0:6s} {1:6s} {2:64s}\n".format("PID", "PPID", "COMMAND"))

procs = [self.ns_structs[a] for a in self.namespaces[ns].get_procs()]
pids = [x.pid for x in procs]

hierarchy = {}
for proc in sorted(procs, key=lambda x: x.pid):
if proc.ppid not in hierarchy and proc.ppid in pids:
hierarchy[proc.ppid] = []
elif proc.ppid not in hierarchy and proc.ppid not in pids:
hierarchy[proc.pid] = []
if proc.ppid in hierarchy:
hierarchy[proc.ppid].append(proc.pid)
already_printed = []

for key in sorted(hierarchy.keys()):
if (key not in already_printed):
already_printed = self.printTree(key, hierarchy, already_printed, outfd, ns)


################################################
## print like a tree ##
def printTree(self, parent, tree, already_printed, outfd, ns, to_p = '', indent=''):
outfd.write("{0:6s} {1:6s} {2:64s}\n".format(str(parent), str(self.ns_structs[parent].ppid),
to_p+self.ns_structs[parent].arguments))
if parent not in tree:
already_printed.append(parent)
return already_printed
if tree[parent]:
for child in tree[parent][:-1]:
aa = indent + u'\u251C' + u'\u2500 '#|-
already_printed = self.printTree(child, tree, already_printed, outfd, ns, aa, indent + u'\u2502 ')
child = tree[parent][-1]
aa = indent + u'\u2514' + u'\u2500 '#`-
self.printTree(child, tree, already_printed, outfd, ns, aa, indent + ' '),
already_printed.append(parent)
return already_printed


################################################
## lsns print for each PID FORMAT ##
def pid_format(self, outfd, data):
for task in data:
self._get_task_values(task)

pidlist = self._config.PID
if pidlist:
pidlist = [a for a in self.ns_structs.keys() for p in self._config.PID.split(',') if int(p) == a]

for pid in pidlist:
if pid in self.ns_structs.keys():
outfd.write("\nPID: {0:6s}\n".format(str(pid)))
#Print header
self.table_header(outfd, [("NS_offset", "[addrpad]"), ("NS", "10"),("TYPE", "6"), ("NSPROC", "6"), ("PID", "5"), ("COMMAND", "100")])

#Print rows
curr_pid = self.ns_structs[pid]
ns = [curr_pid.uts_ns, curr_pid.ipc_ns, curr_pid.mnt_ns, curr_pid.pid_ns, curr_pid.net_ns, curr_pid.cgroup_ns, curr_pid.user_ns]
for i_n in ns:
if i_n in self.namespaces:
n = self.namespaces[i_n]
self.table_row(outfd, n.addr, n.inum, n.t, str(len(n.get_procs())), str(n.get_ppid()), self.ns_structs[n.get_ppid()].arguments)


################################################
## CLASSIC lsns PRINT FORMAT ##
def classic_format(self, outfd, data):
for task in data:
self._get_task_values(task)
self.table_header(outfd, [("NSPACE_Offset", "[addrpad]"), ("NS", "10"), ("TYPE", "6"), ("NSPROC", "6"), ("PID", "5"), ("COMMAND", "15"), ("ARGUMENTS", "100")])

for key in sorted(self.namespaces.keys(), reverse=True):
curr_ns = self.namespaces[key]
self.table_row(outfd, key, curr_ns.inum, curr_ns.t, len(curr_ns.get_procs()), curr_ns.get_ppid(), self.ns_structs[curr_ns.get_ppid()].command, self.ns_structs[curr_ns.get_ppid()].arguments)


######################################################
## OUTPUT ##
######################################################
def render_text(self, outfd, data):
if(self._config.TABLE):
self.table_format(outfd, data)
elif(self._config.NS):
self.namespace_format(outfd, data)
elif(self._config.PID):
self.pid_format(outfd, data)
else:
self.classic_format(outfd, data)

714 changes: 714 additions & 0 deletions Pasquale95/module.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,714 @@
/*
This module does absolutely nothings at all. We just build it with debugging
symbols and then read the DWARF symbols from it.
*/
#include <linux/module.h>
#include <linux/version.h>

#include <linux/ioport.h>
#include <linux/fs_struct.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/utsname.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/udp.h>
#include <linux/mount.h>
#include <linux/inetdevice.h>
#include <net/protocol.h>

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#include <linux/ipc_namespace.h>
struct ipc_namespace ipc_namespace;

#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0)
struct xa_node xa;
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)
#include <linux/lockref.h>
struct lockref lockref;
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
#include <linux/fdtable.h>
#else
#include <linux/file.h>
#endif

#include <net/ip_fib.h>
#include <linux/un.h>
#include <net/af_unix.h>
#include <linux/pid.h>

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
#include <linux/pid_namespace.h>
struct pid_namespace pid_namespace;
#endif


#ifdef CONFIG_NETFILTER
#include <linux/netfilter.h>

struct nf_hook_ops nf_hook_ops;
struct nf_sockopt_ops nf_sockopt_ops;

#ifdef CONFIG_NETFILTER_XTABLES
#include <linux/netfilter/x_tables.h>
struct xt_table xt_table;
#endif

#endif

#include <linux/radix-tree.h>
#include <net/tcp.h>
#include <net/udp.h>

#include <linux/termios.h>
#include <asm/termbits.h>

#include <linux/notifier.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
struct atomic_notifier_head atomic_notifier_head;
#endif

#include <linux/tty_driver.h>
struct tty_driver tty_driver;

#include <linux/tty.h>
struct tty_struct tty_struct;

struct udp_seq_afinfo udp_seq_afinfo;
struct tcp_seq_afinfo tcp_seq_afinfo;

struct files_struct files_struct;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
struct uts_namespace uts_namespace;
#endif

struct sock sock;
struct inet_sock inet_sock;
struct vfsmount vfsmount;
struct in_device in_device;
struct fib_table fib_table;
struct unix_sock unix_sock;
struct pid pid;
struct radix_tree_root radix_tree_root;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
#ifdef CONFIG_NET_SCHED
#include <net/sch_generic.h>
struct Qdisc qdisc;
#endif
#endif

struct inet_protosw inet_protosw;

/********************************************************************
The following structs are not defined in headers, so we cant import
them. Hopefully they dont change too much.
*********************************************************************/

struct kthread_create_info
{
/* Information passed to kthread() from kthreadd. */
int (*threadfn)(void *data);
void *data;
int node;

/* Result passed back to kthread_create() from kthreadd. */
struct task_struct *result;
struct completion done;

struct list_head list;
};

struct kthread_create_info kthread_create_info;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
#include <net/net_namespace.h>
#endif

#include <net/ip.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <linux/compiler.h>

#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))

#define __rcu

struct fn_zone {
struct fn_zone *fz_next; /* Next not empty zone */
struct hlist_head *fz_hash; /* Hash table pointer */
seqlock_t fz_lock;
u32 fz_hashmask; /* (fz_divisor - 1) */
u8 fz_order; /* Zone order (0..32) */
u8 fz_revorder; /* 32 - fz_order */
__be32 fz_mask; /* inet_make_mask(order) */

struct hlist_head fz_embedded_hash[EMBEDDED_HASH_SIZE];

int fz_nent; /* Number of entries */
int fz_divisor; /* Hash size (mask+1) */
} fn_zone;

struct fn_hash {
struct fn_zone *fn_zones[33];
struct fn_zone *fn_zone_list;
} fn_hash;

struct fib_alias
{
struct list_head fa_list;
struct fib_info *fa_info;
u8 fa_tos;
u8 fa_type;
u8 fa_scope;
u8 fa_state;
#ifdef CONFIG_IP_FIB_TRIE
struct rcu_head rcu;
#endif
};

struct fib_node
{
struct hlist_node fn_hash;
struct list_head fn_alias;
__be32 fn_key;
struct fib_alias fn_embedded_alias;
};


struct fib_node fib_node;
struct fib_alias fib_alias;

struct rt_hash_bucket {
struct rtable __rcu *chain;
} rt_hash_bucket;

#ifndef RADIX_TREE_MAP_SHIFT

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
#define RADIX_TREE_MAP_SHIFT 6
#else
#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
#endif
#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
#define RADIX_TREE_TAG_LONGS ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
#define RADIX_TREE_MAX_TAGS 2

struct radix_tree_node {
unsigned int height; /* Height from the bottom */
unsigned int count;
struct rcu_head rcu_head;
void *slots[RADIX_TREE_MAP_SIZE];
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#define OUR_OWN_MOD_STRUCTS
#endif

#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
#define OUR_OWN_MOD_STRUCTS
#endif

#ifdef OUR_OWN_MOD_STRUCTS
struct module_sect_attr
{
struct module_attribute mattr;
char *name;
unsigned long address;
};

struct module_sect_attrs
{
struct attribute_group grp;
unsigned int nsections;
struct module_sect_attr attrs[0];
};

struct module_sect_attrs module_sect_attrs;

#else

struct module_sections module_sect_attrs;

#endif

struct module_kobject module_kobject;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
// we can't get the defintion of mod_tree_root directly
// because it is declared in module.c as a static struct
// the latch_tree_root struct has the variables we want
// immediately after it though

#include <linux/rbtree_latch.h>

struct latch_tree_root ltr;

#endif

#ifdef CONFIG_SLAB

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
/*
* struct kmem_cache
*
* manages a cache.
*/

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;

unsigned int buffer_size;
u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */

unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */

/* 4) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;

/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;

size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */

/* constructor func */
void (*ctor)(void *obj);

/* 5) cache creation/removal */
const char *name;
struct list_head next;

/* 6) statistics */
#if STATS
unsigned long num_active;
unsigned long num_allocations;
unsigned long high_mark;
unsigned long grown;
unsigned long reaped;
unsigned long errors;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
#endif
#if DEBUG
/*
* If debugging is enabled, then the allocator can add additional
* fields and/or padding to every object. buffer_size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
int obj_size;
#endif
/*
* We put nodelists[] at the end of kmem_cache, because we want to size
* this array to nr_node_ids slots instead of MAX_NUMNODES
* (see kmem_cache_init())
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of nodes.
*/
struct kmem_list3 *nodelists[MAX_NUMNODES];
/*
* Do not add fields after nodelists[]
*/
};
#else

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;

unsigned int buffer_size;
/* 3) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];

unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */

/* 4) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;

/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;

size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */

/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);

/* de-constructor func */
void (*dtor) (void *, struct kmem_cache *, unsigned long);

/* 5) cache creation/removal */
const char *name;
struct list_head next;

/* 6) statistics */
#if STATS
unsigned long num_active;
unsigned long num_allocations;
unsigned long high_mark;
unsigned long grown;
unsigned long reaped;
unsigned long errors;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
#endif
#if DEBUG
/*
* If debugging is enabled, then the allocator can add additional
* fields and/or padding to every object. buffer_size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
int obj_size;
#endif
};

#endif /*kmem_cache decl*/

struct kmem_cache kmem_cache;
#endif

struct kmem_list3 {
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
};

struct kmem_list3 kmem_list3;

struct slab {
struct list_head list;
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
unsigned int free;
unsigned short nodeid;
};

struct slab slab;
#endif

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
/* Starting with Linux kernel 3.7 the struct timekeeper is defined in include/linux/timekeeper_internal.h */
#include <linux/timekeeper_internal.h>
#else
/* Before Linux kernel 3.7 the struct timekeeper has to be taken from kernel/time/timekeeping.c */

typedef u64 cycle_t;

struct timekeeper {
/* Current clocksource used for timekeeping. */
struct clocksource *clock;
/* NTP adjusted clock multiplier */
u32 mult;
/* The shift value of the current clocksource. */
int shift;

/* Number of clock cycles in one NTP interval. */
cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
s64 xtime_remainder;
/* Raw nano seconds accumulated per NTP interval. */
u32 raw_interval;

/* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
u64 xtime_nsec;
/* Difference between accumulated time and NTP time in ntp
* shifted nano seconds. */
s64 ntp_error;
/* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds. */
int ntp_error_shift;

/* The current time */
struct timespec xtime;
/*
* wall_to_monotonic is what we need to add to xtime (or xtime corrected
* for sub jiffie times) to get to monotonic time. Monotonic is pegged
* at zero at system boot time, so wall_to_monotonic will be negative,
* however, we will ALWAYS keep the tv_nsec part positive so we can use
* the usual normalization.
*
* wall_to_monotonic is moved after resume from suspend for the
* monotonic time not to jump. We need to add total_sleep_time to
* wall_to_monotonic to get the real boot based time offset.
*
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
struct timespec wall_to_monotonic;
/* time spent in suspend */
struct timespec total_sleep_time;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;

/* Offset clock monotonic -> clock realtime */
ktime_t offs_real;

/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;

/* Seqlock for all timekeeper values */
seqlock_t lock;
};

#endif

struct timekeeper my_timekeeper;

struct log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
u16 dict_len; /* length of dictionary buffer */
u8 facility; /* syslog facility */
u8 flags:5; /* internal record flags */
u8 level:3; /* syslog level */
};

struct log my_log;

#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)
//deal with the __randomize_layout ?
struct mnt_namespace {
atomic_t count;
struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
unsigned int mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
} __randomize_layout;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)
struct mnt_namespace {
atomic_t count;
struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
struct ucounts *ucounts;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
unsigned int mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)
struct mnt_namespace {
atomic_t count;
struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,65)
struct mnt_namespace {
atomic_t count;
struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
unsigned int mounts; /* # of mounts in the namespace */
unsigned int pending_mounts;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
struct mnt_namespace {
atomic_t count;
struct ns_common ns;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
struct mnt_namespace {
atomic_t count;
unsigned int proc_inum;
struct mount * root;
struct list_head list;
struct user_namespace *user_ns;
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
struct mnt_namespace {
atomic_t count;
struct mount * root;
struct list_head list;
wait_queue_head_t poll;
int event;
};
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)
struct mnt_pcp {
int mnt_count;
int mnt_writers;
};
struct mount {
struct list_head mnt_hash;
struct mount *mnt_parent;
struct dentry *mnt_mountpoint;
struct vfsmount mnt;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0)
union {
struct rcu_head mnt_rcu;
struct llist_node mnt_llist;
};
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
struct callback_head rcu;
#endif
#ifdef CONFIG_SMP
struct mnt_pcp __percpu *mnt_pcp;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
atomic_t mnt_longterm; /* how many of the refs are longterm */
#endif
#else
int mnt_count;
int mnt_writers;
#endif
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
struct list_head mnt_instance; /* mount instance on sb->s_mounts */
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
struct list_head mnt_share; /* circular list of shared mounts */
struct list_head mnt_slave_list;/* list of slave mounts */
struct list_head mnt_slave; /* slave list entry */
struct mount *mnt_master; /* slave is on master->mnt_slave_list */
struct mnt_namespace *mnt_ns; /* containing namespace */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
#endif
int mnt_id; /* mount identifier */
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
int mnt_pinned;
int mnt_ghosts;
};

#endif

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
struct proc_dir_entry {
unsigned int low_ino;
umode_t mode;
nlink_t nlink;
kuid_t uid;
kgid_t gid;
loff_t size;
const struct inode_operations *proc_iops;
const struct file_operations *proc_fops;
struct proc_dir_entry *next, *parent, *subdir;
void *data;
atomic_t count; /* use count */
atomic_t in_use; /* number of callers into module in progress; */
/* negative -> it's going away RSN */
struct completion *pde_unload_completion;
struct list_head pde_openers; /* who did ->open, but not ->release */
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
u8 namelen;
char name[];
};
#else
struct proc_dir_entry {
unsigned int low_ino;
umode_t mode;
nlink_t nlink;
kuid_t uid;
kgid_t gid;
loff_t size;
const struct inode_operations *proc_iops;
const struct file_operations *proc_fops;
struct proc_dir_entry *parent;
struct rb_root subdir;
struct rb_node subdir_node;
void *data;
atomic_t count; /* use count */
atomic_t in_use; /* number of callers into module in progress; */
/* negative -> it's going away RSN */
struct completion *pde_unload_completion;
struct list_head pde_openers; /* who did ->open, but not ->release */
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
u8 namelen;
char name[];
};
#endif
#endif

struct resource resource;