#ifndef MODULE /** * module_init() - driver initialization entry point * @x: function to be run at kernel boot time or module insertion * * module_init() will either be called during do_initcalls() (if * builtin) or at module insertion time (if a module). There can only * be one per module. */ #define module_init(x) __initcall(x);
/** * module_exit() - driver exit entry point * @x: function to be run when driver is removed * * module_exit() will wrap the driver clean-up code * with cleanup_module() when used with rmmod when * the driver is a module. If the driver is statically * compiled into the kernel, module_exit() has no effect. * There can only be one per module. */ #define module_exit(x) __exitcall(x);
#else/* MODULE */
/* * In most cases loadable modules do not need custom * initcall levels. There are still some valid cases where * a driver may be needed early if built in, and does not * matter when built as a loadable module. Like bus * snooping debug drivers. */ #define early_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn) #define core_initcall_sync(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn) #define postcore_initcall_sync(fn) module_init(fn) #define arch_initcall(fn) module_init(fn) #define subsys_initcall(fn) module_init(fn) #define subsys_initcall_sync(fn) module_init(fn) #define fs_initcall(fn) module_init(fn) #define fs_initcall_sync(fn) module_init(fn) #define rootfs_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn) #define device_initcall_sync(fn) module_init(fn) #define late_initcall(fn) module_init(fn) #define late_initcall_sync(fn) module_init(fn)
#define console_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
noinline void __ref rest_init(void) { structtask_struct *tsk; int pid;
rcu_scheduler_starting(); /* * We need to spawn init first so that it obtains pid 1, however * the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ pid = kernel_thread(kernel_init, NULL, CLONE_FS); // 后面的省略 }
staticint __ref kernel_init(void *unused) { int ret;
/* * The asmlinkage stub is aliased to a function named __se_sys_*() which * sign-extends 32-bit ints to longs whenever needed. The actual work is * done within __do_sys_*(). */ #ifndef __SYSCALL_DEFINEx #define __SYSCALL_DEFINEx(x, name, ...) \ __diag_push(); \ __diag_ignore(GCC, 8, "-Wattribute-alias", \ "Type aliasing is used to sanitize syscall arguments");\ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ __attribute__((alias(__stringify(__se_sys##name)))); \ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ { \ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__));\ __MAP(x,__SC_TEST,__VA_ARGS__); \ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ return ret; \ } \ __diag_pop(); \ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) #endif/* __SYSCALL_DEFINEx */
/* * In most cases loadable modules do not need custom * initcall levels. There are still some valid cases where * a driver may be needed early if built in, and does not * matter when built as a loadable module. Like bus * snooping debug drivers. */ #define early_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn) #define core_initcall_sync(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn) #define postcore_initcall_sync(fn) module_init(fn) #define arch_initcall(fn) module_init(fn) #define subsys_initcall(fn) module_init(fn) #define subsys_initcall_sync(fn) module_init(fn) #define fs_initcall(fn) module_init(fn) #define fs_initcall_sync(fn) module_init(fn) #define rootfs_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn) #define device_initcall_sync(fn) module_init(fn) #define late_initcall(fn) module_init(fn) #define late_initcall_sync(fn) module_init(fn)
#define console_initcall(fn) module_init(fn)
/* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
/* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ staticintload_module(struct load_info *info, constchar __user *uargs, int flags) { structmodule *mod; long err = 0; char *after_dashes;
/* * Do the signature check (if any) first. All that * the signature check needs is info->len, it does * not need any of the section info. That can be * set up later. This will minimize the chances * of a corrupt module causing problems before * we even get to the signature check. * * The check will also adjust info->len by stripping * off the sig length at the end of the module, making * checks against info->len more correct. */ err = module_sig_check(info, flags); if (err) goto free_copy;
/* * Do basic sanity checks against the ELF header and * sections. */ err = elf_validity_check(info); if (err) { pr_err("Module has invalid ELF structures\n"); goto free_copy; }
/* * Everything checks out, so set up the section info * in the info structure. */ err = setup_load_info(info, flags); if (err) goto free_copy;
/* * Now that we know we have the correct module name, check * if it's blacklisted. */ if (blacklisted(info->name)) { err = -EPERM; pr_err("Module %s is blacklisted\n", info->name); goto free_copy; }
err = rewrite_section_headers(info, flags); if (err) goto free_copy;
/* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info, info->mod)) { err = -ENOEXEC; goto free_copy; }
/* Figure out module layout, and allocate all the memory. */ mod = layout_and_allocate(info, flags); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_copy; }
audit_log_kern_module(mod->name);
/* Reserve our place in the list. */ err = add_unformed_module(mod); if (err) goto free_module;
/* To avoid stressing percpu allocator, do this once we're unique. */ err = percpu_modalloc(mod, info); if (err) goto unlink_mod;
/* Now module is in final location, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) goto unlink_mod;
init_param_lock(mod);
/* Now we've got everything in the final locations, we can * find optional sections. */ err = find_module_sections(mod, info); if (err) goto free_unload;
err = check_module_license_and_versions(mod); if (err) goto free_unload;
/* Set up MODINFO_ATTR fields */ setup_modinfo(mod, info);
/* Fix up syms, so that st_value is a pointer to location. */ err = simplify_symbols(mod, info); if (err < 0) goto free_modinfo;
err = apply_relocations(mod, info); if (err < 0) goto free_modinfo;
err = post_relocation(mod, info); if (err < 0) goto free_modinfo;
flush_module_icache(mod);
/* Now copy in args */ mod->args = strndup_user(uargs, ~0UL >> 1); if (IS_ERR(mod->args)) { err = PTR_ERR(mod->args); goto free_arch_cleanup; }
/* * This is where the real work happens. * * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb * helper command 'lx-symbols'. */ static noinline intdo_init_module(struct module *mod) { int ret = 0; structmod_initfree *freeinit;
freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); if (!freeinit) { ret = -ENOMEM; goto fail; } freeinit->module_init = mod->init_layout.base;
do_mod_ctors(mod); /* Start the module */ //-------->这里mod->init就是模块入口函数,然后调用do_one_initcall if (mod->init != NULL) ret = do_one_initcall(mod->init); if (ret < 0) { goto fail_free_freeinit; } if (ret > 0) { pr_warn("%s: '%s'->init suspiciously returned %d, it should " "follow 0/-E convention\n" "%s: loading module anyway...\n", __func__, mod->name, ret, __func__); dump_stack(); }
/* Now it's a first class citizen! */ mod->state = MODULE_STATE_LIVE; blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod);
/* Delay uevent until module has finished its init routine */ kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
/* * We need to finish all async code before the module init sequence * is done. This has potential to deadlock if synchronous module * loading is requested from async (which is not allowed!). * * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous * request_module() from async workers") for more details. */ if (!mod->async_probe_requested) async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base + mod->init_layout.size); mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS /* Switch to core kallsyms now init is done: kallsyms may be walking! */ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); #endif module_enable_ro(mod, true); mod_tree_remove_init(mod); module_arch_freeing_init(mod); mod->init_layout.base = NULL; mod->init_layout.size = 0; mod->init_layout.ro_size = 0; mod->init_layout.ro_after_init_size = 0; mod->init_layout.text_size = 0; /* * We want to free module_init, but be aware that kallsyms may be * walking this with preempt disabled. In all the failure paths, we * call synchronize_rcu(), but we don't want to slow down the success * path. module_memfree() cannot be called in an interrupt, so do the * work and call synchronize_rcu() in a work queue. * * Note that module_alloc() on most architectures creates W+X page * mappings which won't be cleaned up until do_free_init() runs. Any * code such as mark_rodata_ro() which depends on those mappings to * be cleaned up needs to sync with the queued work - ie * rcu_barrier() */ if (llist_add(&freeinit->node, &init_free_list)) schedule_work(&init_free_wq);
#ifdef CONFIG_MODULE_SIG /* Signature was verified. */ bool sig_ok; #endif
bool async_probe_requested;
/* symbols that will be GPL-only in the near future. */ conststructkernel_symbol *gpl_future_syms; const s32 *gpl_future_crcs; unsignedint num_gpl_future_syms;
#ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ bool klp_alive;
/* Elf information */ structklp_modinfo *klp_info; #endif
#ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ structlist_headsource_list; /* What modules do I depend on? */ structlist_headtarget_list;
/* Destruction function. */ void (*exit)(void);
atomic_t refcnt; #endif
#ifdef CONFIG_MITIGATION_ITS int its_num_pages; void **its_page_array; #endif
/* vi: set sw=4 ts=4: */ /* * Mini insmod implementation for busybox * * Copyright (C) 2008 Timo Teras <timo.teras@iki.fi> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //config:config INSMOD //config: bool "insmod (22 kb)" //config: default y //config: help //config: insmod is used to load specified modules in the running kernel.
/* Compat note: * 2.6 style insmod has no options and required filename * (not module name - .ko can't be omitted). * 2.4 style insmod can take module name without .o * and performs module search in default directories * or in $MODPATH. */
int FAST_FUNC bb_init_module(constchar *filename, constchar *options) { size_t image_size; char *image; int rc; bool mmaped;
if (!options) options = "";
//TODO: audit bb_init_module_24 to match error code convention #if ENABLE_FEATURE_2_4_MODULES if (get_linux_version_code() < KERNEL_VERSION(2,6,0)) return bb_init_module_24(filename, options); #endif
/* * First we try finit_module if available. Some kernels are configured * to only allow loading of modules off of secure storage (like a read- * only rootfs) which needs the finit_module call. If it fails, we fall * back to normal module loading to support compressed modules. */ # ifdef __NR_finit_module { // 方法1:通过句柄打开文件 int fd = open(filename, O_RDONLY | O_CLOEXEC); if (fd >= 0) { int flags = is_suffixed_with(filename, ".ko") ? 0 : MODULE_INIT_COMPRESSED_FILE; for (;;) { // 然后调用finit_module rc = finit_module(fd, options, flags); if (rc == 0 || flags == 0) break; /* Loading non-.ko named uncompressed module? Not likely, but let's try it */ flags = 0; } close(fd); if (rc == 0) return rc; } } # endif
image_size = INT_MAX - 4095; mmaped = 0; // 方法2:将ko文件映射到内存 image = try_to_mmap_module(filename, &image_size); if (image) { mmaped = 1; } else { errno = ENOMEM; /* may be changed by e.g. open errors below */ // 如果映射失败,尝试将ko文件malloc到内存 image = xmalloc_open_zipped_read_close(filename, &image_size); if (!image) return -errno; }