opt
/
kaspersky
/
kav4fs
/
src
/
kernel
/
module.linux
➕ New
📤 Upload
✎ Editing:
interceptor_rfs.c
← Back
#include "module.h" #include "kavumount.h" #include "interceptor.h" #include "redirfs/redirfs.h" #include "redirfs/rfs.h" #include <linux/security.h> #include <linux/sched.h> #include <linux/mount.h> #include <linux/utsname.h> #include <linux/kthread.h> #include <linux/seq_file.h> #include <linux/ptrace.h> #include <linux/delay.h> #include <asm/cacheflush.h> #include <linux/pagemap.h> #ifdef CONFIG_UTRACE #include <linux/utrace.h> #endif #ifdef DEBUG #define DEBUG_IDS "<kav4fs_oas:interceptor_rfs>" #define dump_message(format, args...) printk(DEBUG_IDS format "\n", ##args) #else #define dump_message(...) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) #include <linux/namespace.h> #define mnt_namespace namespace #else #include <linux/mnt_namespace.h> #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) #define kernel_dequeue_signal(info) \ dequeue_signal_lock(current, ¤t->blocked, info) #endif static redirfs_filter klflt; static struct redirfs_filter_info klflt_info = { .owner = THIS_MODULE, .name = "klflt", .priority = 500000000, }; int Process_trusted(struct task_struct * tsk); static enum redirfs_rv klflt_open(redirfs_context context, struct redirfs_args* args); static enum redirfs_rv klflt_release(redirfs_context context, struct redirfs_args* args); static int register_filter(void); static void unregister_filter(void); static void cifs_calls_replace(struct dentry* d_root); static void cifs_calls_restore(void); static atomic_t stopped = ATOMIC_INIT(0); static atomic_t loaded = ATOMIC_INIT(0); struct kavoas_mnt { struct vfsmount* mnt; struct list_head list; }; static LIST_HEAD(kavoas_mnt_list); static RFS_DEFINE_MUTEX(kavoas_mnt_mutex); #define MNT_SKIP_KAVOAS 0x02000000 static int registered = 0; static RFS_DEFINE_MUTEX(registered_mutex); // ---- sync mount monitor ---- static struct mnt_namespace* nsp = NULL; extern struct file* mounts; extern const struct seq_operations* nsops; static struct task_struct* mountsd = NULL; static int event = 0; // ---- sync mount monitor ---- enum { SC_OPEN, SC_CLOSE, SC_RENAME, SC_UNLINK, SC_ACCESS, SC_ATTR }; struct syscall_info { char const* name; atomic_t counter; }; static struct syscall_info syscall_info[] = { /* SC_OPEN */ {"open", ATOMIC_INIT(0)}, /* SC_CLOSE */ {"close", ATOMIC_INIT(0)}, /* SC_RENAME */ {"rename", ATOMIC_INIT(0)}, /* SC_UNLINK */ {"unlink", ATOMIC_INIT(0)}, /* SC_ACCESS */ {"access", ATOMIC_INIT(0)}, /* SC_ATTR */ {"attrib", ATOMIC_INIT(0)}, }; static int dump_syscall(unsigned int scid, unsigned int state, struct dentry* dentry, int error) { if (state) atomic_inc(&syscall_info[scid].counter); else atomic_dec(&syscall_info[scid].counter); dump_message("[%02i] %c%7s: task %16s status %02i file %s\n", atomic_read(&syscall_info[scid].counter), state ? '+' : '-', syscall_info[scid].name, current->comm, error, dentry ? dentry->d_name.name : NULL); return 0; } static enum redirfs_rv klflt_open(redirfs_context context, struct redirfs_args* args) { dump_syscall(SC_OPEN, 1, args->args.f_open.file->f_dentry, 0); args->rv.rv_int = kavoas_before_open(args->args.f_open.inode, args->args.f_open.file); dump_syscall(SC_OPEN, 0, args->args.f_open.file->f_dentry, args->rv.rv_int); return args->rv.rv_int ? REDIRFS_STOP : REDIRFS_CONTINUE; } static enum redirfs_rv klflt_release(redirfs_context context, struct redirfs_args* args) { dump_syscall(SC_CLOSE, 1, args->args.f_release.file->f_dentry, 0); args->rv.rv_int = kavoas_after_close(args->args.f_release.inode, args->args.f_release.file); dump_syscall(SC_CLOSE, 0, args->args.f_release.file->f_dentry, args->rv.rv_int); return args->rv.rv_int ? REDIRFS_STOP : REDIRFS_CONTINUE; } static enum redirfs_rv klflt_rename(redirfs_context context, struct redirfs_args* args) { dump_syscall(SC_RENAME, 1, args->args.i_rename.old_dentry, 0); kavoas_rename(args->args.i_rename.old_dir, args->args.i_rename.old_dentry, args->args.i_rename.new_dir, args->args.i_rename.new_dentry); dump_syscall(SC_RENAME, 0, args->args.i_rename.old_dentry, 0); return REDIRFS_CONTINUE; } static enum redirfs_rv klflt_unlink(redirfs_context context, struct redirfs_args* args) { dump_syscall(SC_UNLINK, 1, args->args.i_unlink.dentry, 0); kavoas_unlink(args->args.i_unlink.dir, args->args.i_unlink.dentry); dump_syscall(SC_UNLINK, 0, args->args.i_unlink.dentry, 0); return REDIRFS_CONTINUE; } static enum redirfs_rv klflt_access(redirfs_context context, struct redirfs_args* args) { struct inode* inode = args->args.i_permission.inode; if (Process_trusted(current) && args->rv.rv_int && is_acs_magic(inode->i_sb->s_magic)) args->rv.rv_int = 0; return REDIRFS_CONTINUE; } static enum redirfs_rv klflt_getattr(redirfs_context context, struct redirfs_args* args) { struct dentry* dentry = args->args.i_getattr.dentry; if (Process_trusted(current) && args->rv.rv_int && is_acs_magic(dentry->d_sb->s_magic)) { generic_fillattr(dentry->d_inode, args->args.i_getattr.stat); args->rv.rv_int = 0; } return REDIRFS_CONTINUE; } static struct redirfs_op_info klflt_op_info[] = { {REDIRFS_REG_FOP_OPEN, klflt_open, NULL}, {REDIRFS_REG_FOP_RELEASE, NULL, klflt_release}, {REDIRFS_DIR_IOP_RENAME, klflt_rename, NULL}, {REDIRFS_DIR_IOP_UNLINK, klflt_unlink, NULL}, {REDIRFS_REG_IOP_PERMISSION, NULL, klflt_access}, {REDIRFS_DIR_IOP_PERMISSION, NULL, klflt_access}, {REDIRFS_LNK_IOP_PERMISSION, NULL, klflt_access}, {REDIRFS_REG_IOP_GETATTR, NULL, klflt_getattr}, {REDIRFS_DIR_IOP_GETATTR, NULL, klflt_getattr}, {REDIRFS_LNK_IOP_GETATTR, NULL, klflt_getattr}, {REDIRFS_OP_END, NULL, NULL}}; static int Update_Include_Dirs(void); int Monitor_intercept_init(void) { DEBUG_MSG("intercepting syscalls begin"); rfs_mutex_lock(®istered_mutex); atomic_xchg(&stopped, Monitor_intercept_construct()); rfs_mutex_unlock(®istered_mutex); return atomic_read(&stopped); } int Monitor_intercept_exit(void) { if (atomic_read(&stopped)) return 0; DEBUG_MSG("intercepting syscalls finished"); atomic_xchg(&stopped, 1); return 0; } // Returns 0 if successful int Monitor_intercept_construct(void) { DEBUG_MSG("Constructing interceptor"); if (registered == 0) register_filter(); if (registered) atomic_xchg(&loaded, 1); return !registered; } int Monitor_intercept_destroy(void) { DEBUG_MSG("Destructing interceptor"); Monitor_intercept_exit(); rfs_mutex_lock(®istered_mutex); if (registered) unregister_filter(); if (!registered) atomic_xchg(&loaded, 0); rfs_mutex_unlock(®istered_mutex); return 0; } int Monitor_intercept_ping(void) { return 0; } // ---- mount binaries wildcard list management ---- struct umount_bin { struct list_head list; char wildcard[0]; }; static LIST_HEAD(umount_bin_list); static RFS_DEFINE_MUTEX(umount_bin_mutex); static int match_wildcard(const char* text, const char* wildcard) { const char* afterLastStar = NULL; const char* firstMatched = NULL; if (!text || !wildcard) return 0; while (*text) { if ((*wildcard == *text) || (*wildcard == '?')) { ++text; ++wildcard; } else if (*wildcard == '*') { afterLastStar = ++wildcard; firstMatched = text; if (*wildcard == '\0') return 1; } else if (afterLastStar) { wildcard = afterLastStar; text = ++firstMatched; } else { return 0; } } while (*wildcard == '*') ++wildcard; return *wildcard == '\0'; } static struct umount_bin* match_any_wildcard(const char* text) { struct umount_bin* ubin = NULL; list_for_each_entry(ubin, &umount_bin_list, list) if (match_wildcard(text, ubin->wildcard)) return ubin; return NULL; } int interceptor_fn_trace(char const* text) { int ret = 0; rfs_mutex_lock(&umount_bin_mutex); { struct umount_bin* ubin = match_any_wildcard(text); if (ubin) ret = 1; } rfs_mutex_unlock(&umount_bin_mutex); return ret; } static struct umount_bin* find_umount_bin(char const* wildcard) { struct umount_bin* ubin = NULL; list_for_each_entry(ubin, &umount_bin_list, list) if (!strcmp(wildcard, ubin->wildcard)) return ubin; return NULL; } int interceptor_in_trace(char const* wildcard) { int ret = 0; rfs_mutex_lock(&umount_bin_mutex); { struct umount_bin* ubin = find_umount_bin(wildcard); if (ubin) goto itr_unlock; else ubin = MEM_ALLOC(sizeof(struct umount_bin) + strlen(wildcard) + 1); if (ubin) { strcpy(ubin->wildcard, wildcard); list_add_tail(&ubin->list, &umount_bin_list); } else ret = -ENOMEM; } itr_unlock: rfs_mutex_unlock(&umount_bin_mutex); return ret; } int interceptor_de_trace(char const* wildcard) { int ret = 0; rfs_mutex_lock(&umount_bin_mutex); { struct umount_bin* ubin = find_umount_bin(wildcard); if (ubin) { list_del_init(&ubin->list); MEM_FREE(ubin); } else ret = -ENOENT; } rfs_mutex_unlock(&umount_bin_mutex); return ret; } int interceptor_rs_trace(char const* wildcard) { rfs_mutex_lock(&umount_bin_mutex); { struct umount_bin *ubin, *tmp; size_t umount_len = wildcard ? strlen(wildcard) : 0; list_for_each_entry_safe(ubin, tmp, &umount_bin_list, list) { list_del_init(&ubin->list); MEM_FREE(ubin); } if (umount_len && (ubin = MEM_ALLOC(sizeof(struct umount_bin) + umount_len + 1))) { strcpy(ubin->wildcard, wildcard); list_add_tail(&ubin->list, &umount_bin_list); } } rfs_mutex_unlock(&umount_bin_mutex); return 0; } // ---- umountd ---- int (*copy_process_vm)(struct task_struct* tsk, unsigned long addr, void* buf, int len, int write) = (void*)ACCESS_VM_ADDR; #ifndef __NR_umount #define __NR_umount __NR_umount2 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) #define path_arg rdi #define sysc_num orig_rax #define sysc_ret rax #else #define path_arg di #define sysc_num orig_ax #define sysc_ret ax #endif #else #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) #define path_arg ebx #define sysc_num orig_eax #define sysc_ret eax #else #define path_arg bx #define sysc_num orig_ax #define sysc_ret ax #endif #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)) #define SAVE_PATH(p) \ struct dentry* p = current->fs->pwd; \ struct vfsmount* p##mnt = current->fs->pwdmnt; #define LOAD_PATH(p) \ current->fs->pwd = pwd; \ current->fs->pwdmnt = p##mnt; #define SWAP_PATH(t) \ current->fs->pwd = t->fs->pwd; \ current->fs->pwdmnt = t->fs->pwdmnt; #else #define SAVE_PATH(p) struct path p = current->fs->pwd; #define LOAD_PATH(p) current->fs->pwd = p; #define SWAP_PATH(t) current->fs->pwd = t->fs->pwd; #endif int interceptor_get_path(char const* mntfile); int interceptor_put_path(char const* mntfile); static void check_umount_call(struct task_struct* tsk, struct pt_regs* regs) { unsigned long origax = regs->sysc_num; if ((origax == __NR_umount) || (origax == __NR_umount2)) { char* buf = MEM_ALLOC(PAGE_SIZE); int done = buf ? copy_process_vm(tsk, regs->path_arg, buf, PAGE_SIZE, 0) : 0; SAVE_PATH(pwd); if (!done) { if (buf) MEM_FREE(buf); return; } SWAP_PATH(tsk); if (regs->sysc_ret == -ENOSYS) { interceptor_put_path(buf); } else if (regs->sysc_ret) { interceptor_get_path(buf); } LOAD_PATH(pwd); MEM_FREE(buf); } return; } // ---- utrace ---- #ifdef CONFIG_UTRACE #ifdef UTRACE_API_VERSION #define UTRACE_ENGINE struct utrace_engine #else #define UTRACE_ENGINE struct utrace_attached_engine #define UTRACE_API_VERSION 0 #endif #ifdef UTRACE_RESUME_MASK #define RESUME_ACTION UTRACE_RESUME #else #define RESUME_ACTION UTRACE_ACTION_RESUME #endif static u32 umount_report_syscall( #ifdef UTRACE_RESUME_MASK enum utrace_resume_action action, #endif UTRACE_ENGINE* engine, #if UTRACE_API_VERSION < 20091216 struct task_struct* tsk, #endif struct pt_regs* regs) { check_umount_call(current, regs); return RESUME_ACTION; } #ifdef UTRACE_RESUME_MASK #define umount_report_signal NULL #else static u32 umount_report_signal(UTRACE_ENGINE* engine, struct task_struct* tsk, struct pt_regs* regs, u32 action, siginfo_t* info, const struct k_sigaction* orig_ka, struct k_sigaction* return_ka) { return RESUME_ACTION; } #endif struct utrace_engine_ops umount_engine_ops = { .report_syscall_entry = umount_report_syscall, .report_syscall_exit = umount_report_syscall, .report_signal = umount_report_signal, }; #define UMOUNT_UTRACE_FLAGS \ (UTRACE_ATTACH_CREATE | UTRACE_ATTACH_MATCH_OPS | UTRACE_ATTACH_EXCLUSIVE) #ifdef UTRACE_RESUME_MASK #define UMOUNT_UTRACE_EVENTS \ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT)) #define UTRACE_ATTACH(tsk) \ utrace_attach_task(tsk, UMOUNT_UTRACE_FLAGS, &umount_engine_ops, NULL) #define UTRACE_REPORT(tsk, engine) \ utrace_set_events(tsk, engine, UMOUNT_UTRACE_EVENTS) #define UTRACE_FINISH(engine) utrace_engine_put(engine) #else #define UMOUNT_UTRACE_EVENTS \ (UTRACE_EVENT(SYSCALL_ENTRY) | UTRACE_EVENT(SYSCALL_EXIT) | \ UTRACE_EVENT(SIGNAL_CORE) | UTRACE_EVENT(SIGNAL_TERM)) #define UTRACE_ATTACH(tsk) \ utrace_attach(tsk, UMOUNT_UTRACE_FLAGS, &umount_engine_ops, NULL) #if defined(utrace_lock) && defined(utrace_unlock) #define UTRACE_REPORT(tsk, engine) \ (utrace_set_flags(tsk, engine, UMOUNT_UTRACE_EVENTS), 0) #else #define UTRACE_REPORT(tsk, engine) \ utrace_set_flags(tsk, engine, UMOUNT_UTRACE_EVENTS) #endif #define UTRACE_FINISH(engine) #endif static int kavoas_exec_traced(struct task_struct* tsk) { int ret = 0; UTRACE_ENGINE* engine = UTRACE_ATTACH(tsk); if (!IS_ERR(engine)) { ret = UTRACE_REPORT(tsk, engine); UTRACE_FINISH(engine); } return ret; } // ---- ptrace ---- #elif PTRACE_DOLINK_ADDR && PTRACE_UNLINK_ADDR static struct task_struct* umountd = NULL; int (*ptrace_command)(struct task_struct* child, long request, long addr, long data) = (void*)PTRACE_COMMAND_ADDR; void (*ptrace_collect)(struct task_struct* child, struct task_struct* parent) = (void*)PTRACE_DOLINK_ADDR; void (*ptrace_release)(struct task_struct* child) = (void*)PTRACE_UNLINK_ADDR; #ifndef PT_PTRACE_CAP #define PT_PTRACE_CAP 0 #endif #define UMOUNT_PTRACE_FLAGS \ (PT_PTRACED | PT_TRACE_FORK | PT_TRACE_VFORK | PT_TRACE_CLONE | PT_PTRACE_CAP) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)) #define wake_up_parent(parent, child) \ (wake_up_interruptible_sync(&parent->signal->wait_chldexit)) #else #define wake_up_parent(parent, child) \ (__wake_up_sync_key(&parent->signal->wait_chldexit, TASK_INTERRUPTIBLE, 1, \ (child))) #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)) #define for_each_ptraced(p, n) \ list_for_each_entry_safe(p, n, ¤t->children, sibling) #else #define for_each_ptraced(p, n) \ list_for_each_entry_safe(p, n, ¤t->ptraced, ptrace_entry) #endif #ifndef task_is_stopped #define task_is_stopped(tsk) ((tsk->state & TASK_STOPPED) != 0) #endif #ifndef task_is_traced #define task_is_traced(tsk) ((tsk->state & TASK_TRACED) != 0) #endif static int umount_wake_function(wait_queue_t* wait, unsigned mode, int sync, void* key) { struct task_struct* tracee = wait->private; if (!task_is_stopped(tracee)) { __remove_wait_queue(&tracee->real_parent->signal->wait_chldexit, wait); MEM_FREE(wait); tracee->ptrace = UMOUNT_PTRACE_FLAGS; ptrace_command(tracee, PTRACE_SYSCALL, 0, 0); } return 1; } static void kavoas_grab(struct task_struct* task) { if (!Process_trusted(task) && !task->ptrace && !task->exit_state) { task->ptrace = UMOUNT_PTRACE_FLAGS; ptrace_collect(task, umountd); } } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) #if defined(CONFIG_X86_64) #define PT_REGS(tsk) \ (struct pt_regs*)((unsigned long)tsk->thread.rsp0 - sizeof(struct pt_regs)) #else #define PT_REGS(tsk) \ (struct pt_regs*)((unsigned long)tsk->thread_info + THREAD_SIZE - \ sizeof(struct pt_regs) - 8) #endif #else #define PT_REGS(tsk) task_pt_regs(tsk) #endif static int umount_wait(void* arg) { allow_signal(SIGCHLD); allow_signal(SIGRTMAX - 3); allow_signal(SIGRTMAX - 2); allow_signal(SIGRTMAX - 1); allow_signal(SIGRTMAX); while (!kthread_should_stop()) { while (signal_pending(current)) { siginfo_t info; struct task_struct *tracee, *next; int signr = kernel_dequeue_signal(&info) & 0xFF; switch (signr) { case SIGRTMAX - 1: case SIGRTMAX - 2: case SIGRTMAX - 3: case SIGRTMAX: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) next = find_task_by_pid(info.si_value.sival_int); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) next = find_task_by_pid_type_ns( PIDTYPE_PID, info.si_value.sival_int, &init_pid_ns); #else next = pid_task(find_vpid(info.si_value.sival_int), PIDTYPE_PID); #endif if (next && next->mm && !IS_ERR(next) && next->state == TASK_UNINTERRUPTIBLE) { if (signr == SIGRTMAX) kavoas_grab(next); wake_up_process(next); } break; case SIGCHLD: for_each_ptraced(tracee, next) { if (!tracee->ptrace) continue; if (tracee->exit_state) { ptrace_release(tracee); send_sig(SIGCHLD, tracee->real_parent, 0); wake_up_parent(tracee->real_parent, tracee); } else if (task_is_traced(tracee)) { int snum = tracee->exit_code; int signal = (snum & 0x7f); int call = (signal == SIGTRAP); int skip = ((snum == SIGTRAP) || (snum == SIGSTOP) || !valid_signal(snum)); if (call) check_umount_call(tracee, PT_REGS(tracee)); ptrace_command(tracee, PTRACE_SYSCALL, 0, skip ? 0 : snum); if (signal == SIGSTOP) ptrace_release(tracee); } else if (task_is_stopped(tracee)) { wait_queue_t* wait_continue = MEM_ALLOC(sizeof(wait_queue_t)); init_waitqueue_func_entry(wait_continue, umount_wake_function); wait_continue->private = tracee; tracee->ptrace = 0; send_sig(SIGCHLD, tracee->real_parent, 0); wake_up_parent(tracee->real_parent, tracee); add_wait_queue( &tracee->real_parent->signal->wait_chldexit, wait_continue); } } break; } } set_current_state(TASK_INTERRUPTIBLE); schedule(); } return 0; } static int kavoas_exec_traced(struct task_struct* tsk) { if (umountd) { siginfo_t info = {.si_signo = SIGRTMAX, .si_int = current->pid}; set_current_state(TASK_UNINTERRUPTIBLE); send_sig_info(SIGRTMAX, &info, umountd); schedule_timeout(HZ); } return 0; } #else // ---- nohook ---- static int kavoas_exec_traced(struct task_struct* tsk) { return 0; } #endif int kavoas_exec(struct file* file) { int retval = -ENOEXEC; char* exe_name = current->mm ? get_path_by_dentry(file->f_dentry, file->f_vfsmnt, NULL, NULL) : NULL; if (!exe_name) return retval; else if (interceptor_fn_trace(exe_name)) kavoas_exec_traced(current); MEM_FREE(exe_name); return retval; } int kavoas_ulib(struct file* file) { return -ENOEXEC; } #if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) static int check_mnt_ns(void) { int ret = 0; struct seq_file* m = mounts->private_data; loff_t pos = 0; if (kthread_should_stop()) return 1; nsops->start(m, &pos); ret = !(nsp->event == event); nsops->stop(m, NULL); return ret; } static int mounts_poll(void* arg) { while (!kthread_should_stop()) { if (Update_Include_Dirs()) msleep(1000); wait_event_interruptible(nsp->poll, check_mnt_ns()); } return 0; } #else static int mounts_poll(void *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(&nsp->poll, &wait); while (!kthread_should_stop()) { if (nsp->event != event) { Update_Include_Dirs(); } wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } remove_wait_queue(&nsp->poll, &wait); return 0; } #endif static struct mnt_namespace* current_nsp(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) return current->namespace; #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) return current->nsproxy->namespace; #else return current->nsproxy->mnt_ns; #endif } enum register_status { REG_NONE, REG_KLFLT, REG_MNTS, REG_KTHR, REG_OPER, REG_DONE, }; int register_filter(void) { int rv = 0; switch (registered) { case REG_NONE: klflt = redirfs_register_filter(&klflt_info); if (IS_ERR(klflt)) { rv = PTR_ERR(klflt); printk(KERN_ERR "klflt: register filter failed, err=%d\n", rv); break; } registered++; case REG_KLFLT: rv = redirfs_set_operations(klflt, klflt_op_info); if (rv) { printk(KERN_ERR "klflt: set operations failed, err=%d\n", rv); break; } registered++; case REG_MNTS: rv = open_proc_mounts(); if (rv) { printk(KERN_ERR "klflt: open proc mounts poll failed, err=%d\n", rv); break; } registered++; case REG_KTHR: nsp = current_nsp(); mountsd = kthread_run(mounts_poll, NULL, "mountsd"); if (IS_ERR(mountsd)) { rv = PTR_ERR(mountsd); printk(KERN_ERR "klflt: init mounts thread failed, err=%d", rv); break; } registered++; case REG_OPER: #ifdef UMOUNT_PTRACE_FLAGS umountd = kthread_run(umount_wait, NULL, "umountd"); if (IS_ERR(umountd)) { rv = PTR_ERR(umountd); printk(KERN_ERR "klflt: init umount thread failed, err=%d", rv); break; } #endif binary_handler_init(); registered++; case REG_DONE: break; default: printk(KERN_ERR "klflt: bad state on init %i\n", registered); rv = -EINVAL; break; } if (rv) unregister_filter(); return rv; } /* * kavoas_mnt_mutex should be locked by caller! */ static struct kavoas_mnt* is_mnt_marked(struct vfsmount* vfs, struct list_head* kmntlist) { struct kavoas_mnt* kmnt; list_for_each_entry(kmnt, kmntlist, list) if (vfs == kmnt->mnt) return kmnt; return NULL; } static int make_kmnt(struct vfsmount* mnt) { void* err = redirfs_add_path(klflt, mnt, mnt->mnt_root, REDIRFS_PATH_INCLUDE); if (!IS_ERR(err)) { struct kavoas_mnt* kmnt = MEM_ALLOC(sizeof(struct kavoas_mnt)); kmnt->mnt = mntget(mnt); list_add_tail(&kmnt->list, &kavoas_mnt_list); return 0; } else { return PTR_ERR(err); } } static void free_kmnt(struct kavoas_mnt* kmnt, int noflt) { if (!noflt) { int err = redirfs_rem_path(klflt, kmnt->mnt, kmnt->mnt->mnt_root); if (err) printk("klflt: rem path failed: %i\n", err); kmnt->mnt->mnt_flags &= ~MNT_SKIP_KAVOAS; } list_del_init(&kmnt->list); mntput(kmnt->mnt); MEM_FREE(kmnt); } void unregister_filter(void) { int rv = 0; struct kavoas_mnt *kmnt, *tmp; switch (registered) { case REG_DONE: binary_handler_exit(); #ifdef UMOUNT_PTRACE_FLAGS kthread_stop(umountd); umountd = NULL; #endif registered--; case REG_OPER: kthread_stop(mountsd); mountsd = NULL; nsp = NULL; registered--; case REG_KTHR: cifs_calls_restore(); rfs_mutex_lock(&kavoas_mnt_mutex); list_for_each_entry_safe(kmnt, tmp, &kavoas_mnt_list, list) free_kmnt(kmnt, 0); rfs_mutex_unlock(&kavoas_mnt_mutex); drop_proc_mounts(); event = 0; registered--; case REG_MNTS: registered--; case REG_KLFLT: rv = redirfs_unregister_filter(klflt); if (rv) { printk(KERN_ERR "klflt: unregister filter failed, err=%d\n", rv); break; } registered--; case REG_NONE: break; default: printk(KERN_ERR "klflt: bad state on exit %i\n", registered); rv = -EINVAL; break; } } /* * kavoas_mnt_mutex should be locked by caller! */ static int interceptor_get_path_helper(struct vfsmount* vfs, int force) { if (is_smb_magic(vfs->mnt_root->d_inode->i_sb->s_magic)) { cifs_calls_replace(vfs->mnt_root); return 0; } else if (is_exclude_magic(vfs->mnt_root->d_inode->i_sb->s_magic)) { return 0; } else if (is_mnt_marked(vfs, &kavoas_mnt_list)) { return 0; } else if (vfs->mnt_flags & MNT_SKIP_KAVOAS) { if (force) { vfs->mnt_flags &= ~MNT_SKIP_KAVOAS; } else return -ENOENT; } return make_kmnt(vfs); } /* * kavoas_mnt_mutex should be locked by caller! */ static int interceptor_put_path_helper(struct vfsmount* vfs, int force) { struct kavoas_mnt* kmnt; int found = 0; while ((kmnt = is_mnt_marked(vfs, &kavoas_mnt_list))) free_kmnt(kmnt, found++); if (force) vfs->mnt_flags |= MNT_SKIP_KAVOAS; return 0; } static struct vfsmount* find_mnt_by_dev(char const* name, int action) { struct vfsmount* res = ERR_PTR(-ENOENT); struct seq_file* m = mounts->private_data; loff_t pos = 0; void* p = NULL; if (!name) return ERR_PTR(-ENOMEM); p = nsops->start(m, &pos); while (p && !IS_ERR(p)) { struct vfsmount* vfs = GET_VFSMNT(p); struct kavoas_mnt* in_kavoas = is_mnt_marked(vfs, &kavoas_mnt_list); if (real_mount(vfs)->mnt_devname && !strcmp(real_mount(vfs)->mnt_devname, name)) { if (action) { if (!in_kavoas) { res = vfs; break; } else if (IS_ERR(res)) { res = ERR_PTR(-EBUSY); } } else { if (in_kavoas) { res = vfs; } else if (IS_ERR(res)) { res = ERR_PTR(-EBUSY); } } } p = nsops->next(m, p, &pos); } nsops->stop(m, p); return res; } static struct vfsmount* find_mnt_by_mnt(struct vfsmount* vfs, int action) { struct vfsmount* res = ERR_PTR(-EBUSY); do { struct kavoas_mnt* in_kavoas = is_mnt_marked(vfs, &kavoas_mnt_list); if (action) { if (!in_kavoas) res = vfs; } else { if (in_kavoas) { res = vfs; break; } } if (IS_ROOT(real_mount(vfs)->mnt_mountpoint)) vfs = base_mount(real_mount(vfs)->mnt_parent); else break; } while (!same_as_current_fs_root(vfs, vfs->mnt_root) && !(real_mount(vfs) == real_mount(vfs)->mnt_parent)); return res; } static struct vfsmount* interceptor_find_mnt(char const* mntfile, int action) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) struct nameidata nd; int badpath = rfs_path_lookup(mntfile, LOOKUP_FOLLOW, nd); #else struct path nd; int badpath = kern_path(mntfile, LOOKUP_FOLLOW, &nd); #endif char const* name = badpath ? kstrdup(mntfile, GFP_KERNEL) : get_path_by_dentry(GET_NAMEIDATA_PATH(nd).dentry, GET_NAMEIDATA_PATH(nd).mnt, NULL, NULL); int isdevname = (badpath || (GET_NAMEIDATA_PATH(nd).dentry != GET_NAMEIDATA_PATH(nd).mnt->mnt_root)); struct vfsmount* vfs = isdevname ? find_mnt_by_dev(mntfile, action) : find_mnt_by_mnt(GET_NAMEIDATA_PATH(nd).mnt, action); if (IS_ERR(vfs) && isdevname && strcmp(mntfile, name)) vfs = find_mnt_by_dev(name, action); if (name) MEM_FREE(name); if (!badpath) path_release(&GET_NAMEIDATA_PATH(nd)); return vfs; } typedef int(mnt_walk_helper_t)(struct vfsmount* vfs, int force); static int interceptor_mnt_walk(struct vfsmount* parent, mnt_walk_helper_t helperfunc) { int submounts = 0; #ifdef MNT_SHRINKABLE vfsmount_t* this_parent = real_mount(parent); struct list_head* next; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head* tmp = next; vfsmount_t* mnt = list_entry(tmp, vfsmount_t, mnt_child); next = tmp->next; if (!(base_mount(mnt)->mnt_flags & MNT_SHRINKABLE)) continue; if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } helperfunc(base_mount(mnt), 1); submounts++; } if (this_parent != real_mount(parent)) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } #endif helperfunc(parent, 1); return submounts; } int interceptor_get_path(char const* mntfile) { struct vfsmount* vfs; int ret = 0; rfs_mutex_lock(&kavoas_mnt_mutex); vfs = interceptor_find_mnt(mntfile, 1); if (!IS_ERR(vfs)) { interceptor_mnt_walk(vfs, interceptor_get_path_helper); } else { ret = PTR_ERR(vfs); } rfs_mutex_unlock(&kavoas_mnt_mutex); return ret; } int interceptor_put_path(char const* mntfile) { struct vfsmount* vfs; int ret = 0; rfs_mutex_lock(&kavoas_mnt_mutex); vfs = interceptor_find_mnt(mntfile, 0); if (!IS_ERR(vfs)) { if (vfs->mnt_flags & MNT_SKIP_KAVOAS) BUG(); interceptor_mnt_walk(vfs, interceptor_put_path_helper); } else { ret = PTR_ERR(vfs); } rfs_mutex_unlock(&kavoas_mnt_mutex); return ret; } static int Send_mtab_event(void) { check_req_data_t* reqd; int err = 0; u_short queue_id = Monitor_queue_get_id(); enum FileAccessType answer = FILE_ACCESS_ACCEPT; if (!atomic_read(&monitor_started)) return false; reqd = (check_req_data_t*)MEM_ALLOC(sizeof(*reqd)); if (!reqd) return -ENOMEM; fill_check_req_common_fields(reqd, CHECK_FILE, 0, 0, NULL, NULL, "/etc/mtab"); reqd->file_op_type = FILE_CLOSE_OPER; err = Monitor_queue_add(queue_id, 0, reqd, &answer); check_req_queue_put(reqd); return err; } int Update_Include_Dirs(void) { struct kavoas_mnt *kmnt, *tmp; struct seq_file* m = mounts->private_data; loff_t pos = 0; void* p = NULL; if (IS_ERR(klflt)) return 1; p = nsops->start(m, &pos); if (nsp->event == event) { nsops->stop(m, p); return 0; } else { event = nsp->event; } rfs_mutex_lock(&kavoas_mnt_mutex); list_for_each_entry_safe(kmnt, tmp, &kavoas_mnt_list, list) if (list_empty(&real_mount(kmnt->mnt)->mnt_list)) free_kmnt(kmnt, 0); while (p && !IS_ERR(p)) { struct vfsmount* vfs = GET_VFSMNT(p); interceptor_get_path_helper(vfs, 0); p = nsops->next(m, p, &pos); } rfs_mutex_unlock(&kavoas_mnt_mutex); nsops->stop(m, p); Send_mtab_event(); return 0; } /* * CIFS interceptor. It works correct only if cifs compiled as a module. */ #ifdef __CONFIG_CIFS_MODULE static int change_writeable(unsigned long addr, int write, int* error) { pgd_t* pgdp; pud_t pud, *pudp; pmd_t pmd, *pmdp; pte_t pte, *ptep; spinlock_t* ptl; int protset = 0; struct mm_struct* mm = current->active_mm; down_write(&mm->mmap_sem); flush_cache_mm(mm); pgdp = pgd_offset(mm, addr); if (pgd_none(*pgdp) || pgd_bad(*pgdp)) { *error = -ENOMEM; return protset; } pudp = pud_offset(pgdp, addr); pud = *pudp; if (pud_none(*pudp) || pud_bad(*pudp)) { *error = -ENOMEM; return protset; } pmdp = pmd_offset(pudp, addr); pmd = *pmdp; if (pmd_none(*pmdp) || pmd_bad(*pmdp)) { *error = -ENOMEM; return protset; } ptl = pte_lockptr(mm, pmdp); ptep = pte_offset_map(pmdp, addr); /* god bless spin locking. actually this spinlock (without try) permanently blocks * the execution on kernels before 2.6.25. but on the other hand we use mmap_sem * and moreover cifs on this kernels isn't write protected. So this code will be * robustly working. However any approach to apply clr/set write protection to * other regions i.e. syscalls table will require more deep analysis */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) spin_lock(ptl); #endif pte = *ptep; protset = !pte_write(pte); if (protset ^ !write) { pte_t ptent = ptep_modify_prot_start(mm, addr, ptep); ptent = pte_mkdirty(ptent); ptent = protset ? pte_mkwrite(ptent) : pte_wrprotect(ptent); ptep_modify_prot_commit(mm, addr, ptep, ptent); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 24) spin_unlock(ptl); #endif pte_unmap(ptep); *error = 0; up_write(&mm->mmap_sem); return protset; } #define WRITE_ENABLE(addr, error) change_writeable((unsigned long)(addr), 1, (error)) #define WRITE_DISABLE(addr, error) \ change_writeable((unsigned long)(addr), 0, (error)) #if 0 /* cifs interceptor dbg messages */ #define CIFS_DBGPRN(format, args...) printk(format, ##args) #else /* dbg off */ #define CIFS_DBGPRN(format, args...) #endif typedef void (*pfunc_t)(void); struct call_restore_item { pfunc_t* loc; /* location of the call that is replaced */ pfunc_t old; /* old value of the replaced function pointer */ }; #define CR_ARR_SIZE 16 static struct call_restore_item calls_restore_array[CR_ARR_SIZE]; static int calls_restore_count = 0; static RFS_DEFINE_MUTEX(calls_restore_mutex); static struct module* cifs_module = NULL; static void call_replace_nolock(pfunc_t* loc, pfunc_t new_call) { if (calls_restore_count >= CR_ARR_SIZE) { printk( KERN_ERR "klflt: increase calls_restore_array size to properly intercept CIFS\n"); return; } calls_restore_array[calls_restore_count].loc = loc; calls_restore_array[calls_restore_count].old = *loc; *loc = new_call; ++calls_restore_count; } typedef int (*fop_open_t)(struct inode* inode, struct file* file); static volatile fop_open_t cifs_open = NULL; static int smbcheck_open(struct inode* inode, struct file* file) { int ret, res; fop_open_t lf_open = cifs_open; if (!lf_open) return -ERESTARTSYS; dump_syscall(SC_OPEN, 1, file->f_dentry, 0); res = kavoas_before_open(inode, file); if ((res == -ENOENT) && (file->f_flags & O_CREAT)) res = 0; dump_syscall(SC_OPEN, 0, file->f_dentry, res); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) if ((file->f_flags & O_EXCL) && (file->f_dentry->d_fsdata == (void *)-1UL)) { file->f_dentry->d_fsdata = NULL; file->f_flags &= ~O_EXCL; } #endif if (0 == res) ret = lf_open(inode, file); else ret = res; return ret; } typedef int (*fop_release_t)(struct inode* inode, struct file* file); static volatile fop_release_t cifs_close = NULL; static int smbcheck_release(struct inode* inode, struct file* file) { int ret, res; fop_release_t lf_close = cifs_close; if (!lf_close) return -ERESTARTSYS; ret = lf_close(inode, file); dump_syscall(SC_CLOSE, 1, file->f_dentry, 0); res = kavoas_after_close(inode, file); dump_syscall(SC_CLOSE, 0, file->f_dentry, res); return ret; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) typedef int (*iop_rename_t)(struct inode* i_src, struct dentry* d_src, struct inode* i_tgt, struct dentry* d_tgt); static volatile iop_rename_t cifs_rename = NULL; static int smbcheck_rename(struct inode* i_src, struct dentry* d_src, struct inode* i_tgt, struct dentry* d_tgt) #else typedef int (*iop_rename_t)(struct inode* i_src, struct dentry* d_src, struct inode* i_tgt, struct dentry* d_tgt, unsigned int flags); static volatile iop_rename_t cifs_rename = NULL; static int smbcheck_rename(struct inode* i_src, struct dentry* d_src, struct inode* i_tgt, struct dentry* d_tgt, unsigned int flags) #endif { int res; iop_rename_t lf_rename = cifs_rename; if (!lf_rename) return -ERESTARTSYS; CIFS_DBGPRN("smbcheck_rename: src:%s tgt:%s\n", d_src->d_name.name, d_tgt->d_name.name); #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) kavoas_rename(i_src, d_src, i_tgt, d_tgt); res = lf_rename(i_src, d_src, i_tgt, d_tgt); #else kavoas_rename(i_src, d_src, i_tgt, d_tgt); res = lf_rename(i_src, d_src, i_tgt, d_tgt, flags); #endif return res; } typedef int (*iop_unlink_t)(struct inode*, struct dentry*); static volatile iop_unlink_t cifs_unlink = NULL; static int smbcheck_unlink(struct inode* inode, struct dentry* direntry) { int res; iop_unlink_t lf_unlink = cifs_unlink; if (!lf_unlink) return -ERESTARTSYS; CIFS_DBGPRN("smbcheck_unlink: direntry:%s\n", direntry->d_name.name); kavoas_unlink(inode, direntry); res = lf_unlink(inode, direntry); return res; } static void replace_file_ops_nolock(struct inode* fi) { int protected_iop, protected_fop, error = 0; protected_iop = WRITE_ENABLE(fi->i_op, &error); if (error) { printk(KERN_ERR "klflt: failed to replace cifs file inode operations\n"); return; } protected_fop = WRITE_ENABLE(fi->i_fop, &error); if (error) { printk(KERN_ERR "klflt: failed to replace cifs file file operations\n"); if (protected_iop) WRITE_DISABLE(fi->i_op, &error); return; } if (fi->i_fop->open && smbcheck_open != fi->i_fop->open) { CIFS_DBGPRN("replace cifs_open\n"); if (cifs_open && cifs_open != fi->i_fop->open) printk(KERN_ERR "klflt: different cifs_open functions\n"); cifs_open = fi->i_fop->open; call_replace_nolock((pfunc_t*)&fi->i_fop->open, (pfunc_t)smbcheck_open); } if (fi->i_fop->release && smbcheck_release != fi->i_fop->release) { CIFS_DBGPRN("replace cifs_close\n"); if (cifs_close && cifs_close != fi->i_fop->release) printk(KERN_ERR "klflt: different cifs_close functions\n"); cifs_close = fi->i_fop->release; call_replace_nolock((pfunc_t*)&fi->i_fop->release, (pfunc_t)smbcheck_release); } if (fi->i_op->rename && smbcheck_rename != fi->i_op->rename) { CIFS_DBGPRN("replace cifs_rename\n"); if (cifs_rename && cifs_rename != fi->i_op->rename) printk(KERN_ERR "klflt: different cifs_rename functions\n"); cifs_rename = fi->i_op->rename; call_replace_nolock((pfunc_t*)&fi->i_op->rename, (pfunc_t)smbcheck_rename); } if (fi->i_op->unlink && smbcheck_unlink != fi->i_op->unlink) { CIFS_DBGPRN("replace cifs_unlink\n"); if (cifs_unlink && cifs_unlink != fi->i_op->unlink) printk(KERN_ERR "klflt: different cifs_unlink functions\n"); cifs_unlink = fi->i_op->unlink; call_replace_nolock((pfunc_t*)&fi->i_op->unlink, (pfunc_t)smbcheck_unlink); } if (protected_fop) WRITE_DISABLE(fi->i_fop, &error); if (protected_iop) WRITE_DISABLE(fi->i_op, &error); } static int __dir_walk_replace_file_ops_nolock(struct dentry* dir, int recursive) { struct dentry* de; rfs_for_each_d_child(de, &dir->d_subdirs) { if (de->d_inode) { if (S_IFREG == (de->d_inode->i_mode & S_IFMT)) { DENTRY_NESTED(de); replace_file_ops_nolock(de->d_inode); DENTRY_UNLOCK(de); return 1; } else if (recursive && S_IFDIR == (de->d_inode->i_mode & S_IFMT)) { int ret = 0; DENTRY_NESTED(de); ret = __dir_walk_replace_file_ops_nolock(de, 1); DENTRY_UNLOCK(de); if (ret) return ret; } } } return 0; } static void dir_walk_replace_file_ops_recursive_nolock(struct dentry* dir) { DCACHE_LOCK(dir); __dir_walk_replace_file_ops_nolock(dir, 1); DCACHE_UNLOCK(dir); } static void dir_walk_replace_file_ops(struct dentry* dir) { rfs_mutex_lock(&calls_restore_mutex); DCACHE_LOCK(dir); __dir_walk_replace_file_ops_nolock(dir, 0); DCACHE_UNLOCK(dir); rfs_mutex_unlock(&calls_restore_mutex); } #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) typedef struct dentry* (*iop_lookup_t)(struct inode* parent_dir_inode, struct dentry* direntry, struct nameidata* nd); static volatile iop_lookup_t cifs_lookup = NULL; static struct dentry* smbcheck_lookup(struct inode* parent_dir_inode, struct dentry* direntry, struct nameidata* nd) #else typedef int (*iop_atomic_open_t)(struct inode* dir, struct dentry* direntry, struct file* file, unsigned int flags, umode_t mode, int* opened); static volatile iop_atomic_open_t cifs_atomic_open = NULL; #if 0 static const iop_atomic_open_t smbcheck_atomic_open = NULL; #else // smb_atomic_open returns 0 if file is created, 1 if file was found (still needs cifs_open to open it) // or an error otherwise static int smbcheck_atomic_open(struct inode *inode, struct dentry *direntry, struct file *file, unsigned int flags, umode_t mode, int *opened) { int res; iop_atomic_open_t lf_atomic_open = cifs_atomic_open; if(!lf_atomic_open) return -ERESTARTSYS; res = lf_atomic_open(inode, direntry, file, flags, mode, opened); return res; } #endif typedef struct dentry* (*iop_lookup_t)(struct inode* parent_dir_inode, struct dentry* direntry, unsigned int flags); static volatile iop_lookup_t cifs_lookup = NULL; static struct dentry* smbcheck_lookup(struct inode* parent_dir_inode, struct dentry* direntry, unsigned int flags) #endif { struct dentry* res; iop_lookup_t lf_lookup = cifs_lookup; if (!lf_lookup) return ERR_PTR(-ERESTARTSYS); #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) res = lf_lookup(parent_dir_inode, direntry, nd); #else res = lf_lookup(parent_dir_inode, direntry, flags); if (0 == res && !direntry->d_inode && (flags & LOOKUP_EXCL)) direntry->d_fsdata = (void*)-1UL; #endif if (0 == res && direntry->d_inode) { CIFS_DBGPRN("smbcheck_lookup: i_mode:%o iop:%p fop:%p\n", direntry->d_inode->i_mode, direntry->d_inode->i_op, direntry->d_inode->i_fop); if (S_IFREG == (direntry->d_inode->i_mode & S_IFMT)) { rfs_mutex_lock(&calls_restore_mutex); replace_file_ops_nolock(direntry->d_inode); rfs_mutex_unlock(&calls_restore_mutex); } } return res; } #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) typedef int (*fop_readdir_t)(struct file* file, void* direntry, filldir_t filldir); #else typedef int (*fop_readdir_t)(struct file* file, struct dir_context* ctx); #endif static volatile fop_readdir_t cifs_readdir = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) static int smbcheck_readdir(struct file* file, void* direntry, filldir_t filldir) #else static int smbcheck_readdir(struct file* file, struct dir_context* ctx) #endif { int res; fop_readdir_t lf_readdir = cifs_readdir; if (!lf_readdir) return -ERESTARTSYS; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) res = lf_readdir(file, direntry, filldir); #else res = lf_readdir(file, ctx); #endif dir_walk_replace_file_ops(file->f_dentry); return res; } static void replace_dir_ops_nolock(struct inode* di) { int protected_iop, protected_fop, error = 0; protected_iop = WRITE_ENABLE(di->i_op, &error); if (error) { printk(KERN_ERR "klflt: failed to replace cifs dir inode operations\n"); return; } protected_fop = WRITE_ENABLE(di->i_fop, &error); if (error) { printk(KERN_ERR "klflt: failed to replace cifs dir file operations\n"); if (protected_iop) WRITE_DISABLE(di->i_op, &error); return; } #if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) if (di->i_op->atomic_open && smbcheck_atomic_open != di->i_op->atomic_open) { CIFS_DBGPRN("replace dir iop cifs_atomic_open\n"); if (cifs_atomic_open && cifs_atomic_open != di->i_op->atomic_open) printk(KERN_ERR "klflt: different cifs_atomic_open functions\n"); cifs_atomic_open = di->i_op->atomic_open; call_replace_nolock((pfunc_t*)&di->i_op->atomic_open, (pfunc_t)smbcheck_atomic_open); } #endif if (di->i_op->lookup && smbcheck_lookup != di->i_op->lookup) { CIFS_DBGPRN("replace dir iop cifs_lookup\n"); if (cifs_lookup && cifs_lookup != di->i_op->lookup) printk(KERN_ERR "klflt: different cifs_lookup functions\n"); cifs_lookup = di->i_op->lookup; call_replace_nolock((pfunc_t*)&di->i_op->lookup, (pfunc_t)smbcheck_lookup); } #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) if (di->i_fop->readdir && smbcheck_readdir != di->i_fop->readdir) { CIFS_DBGPRN("replace dir fop cifs_readdir\n"); if (cifs_readdir && cifs_readdir != di->i_fop->readdir) printk(KERN_ERR "klflt: different cifs_readdir functions\n"); cifs_readdir = di->i_fop->readdir; call_replace_nolock((pfunc_t*)&di->i_fop->readdir, (pfunc_t)smbcheck_readdir); } #else if (di->i_fop->iterate && smbcheck_readdir != di->i_fop->iterate) { CIFS_DBGPRN("replace dir fop cifs_iterate\n"); if (cifs_readdir && cifs_readdir != di->i_fop->iterate) printk(KERN_ERR "klflt: different cifs_iterate functions\n"); cifs_readdir = di->i_fop->iterate; call_replace_nolock((pfunc_t*)&di->i_fop->iterate, (pfunc_t)smbcheck_readdir); } #endif if (di->i_op->rename && smbcheck_rename != di->i_op->rename) { CIFS_DBGPRN("replace dir iop cifs_rename\n"); if (cifs_rename && cifs_rename != di->i_op->rename) printk(KERN_ERR "klflt: different cifs_rename functions\n"); cifs_rename = di->i_op->rename; call_replace_nolock((pfunc_t*)&di->i_op->rename, (pfunc_t)smbcheck_rename); } if (di->i_op->unlink && smbcheck_unlink != di->i_op->unlink) { CIFS_DBGPRN("replace dir iop cifs_unlink\n"); if (cifs_unlink && cifs_unlink != di->i_op->unlink) printk(KERN_ERR "klflt: different cifs_unlink functions\n"); cifs_unlink = di->i_op->unlink; call_replace_nolock((pfunc_t*)&di->i_op->unlink, (pfunc_t)smbcheck_unlink); } if (protected_fop) WRITE_DISABLE(di->i_fop, &error); if (protected_iop) WRITE_DISABLE(di->i_op, &error); } static void cifs_calls_replace(struct dentry* d_root) { struct dentry* de = dget(d_root); struct inode* di; if (!de || IS_ERR(de)) return; di = igrab(d_root->d_inode); if (!di || IS_ERR(di)) { dput(de); return; } rfs_mutex_lock(&calls_restore_mutex); if (!cifs_module) { cifs_module = di->i_sb->s_type->owner; if (!(cifs_module && try_module_get(cifs_module))) { printk(KERN_ERR "klflt: can't get cifs module\n"); cifs_module = NULL; iput(di); dput(de); return; } } replace_dir_ops_nolock(di); dir_walk_replace_file_ops_recursive_nolock(d_root); rfs_mutex_unlock(&calls_restore_mutex); iput(di); dput(de); } static void cifs_calls_restore(void) { int i; rfs_mutex_lock(&calls_restore_mutex); for (i = 0; i < calls_restore_count; ++i) { int protected, error = 0; protected = WRITE_ENABLE(calls_restore_array[i].loc, &error); if (error) continue; *calls_restore_array[i].loc = calls_restore_array[i].old; if (protected) WRITE_DISABLE(calls_restore_array[i].loc, &error); } calls_restore_count = 0; if (cifs_module) { module_put(cifs_module); cifs_module = NULL; } cifs_open = NULL; cifs_close = NULL; cifs_lookup = NULL; cifs_rename = NULL; cifs_unlink = NULL; cifs_readdir = NULL; #if !(LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) cifs_atomic_open = NULL; #endif rfs_mutex_unlock(&calls_restore_mutex); } #else /* CIFS is not compiled as a module so below are empty stubs */ #ifdef __CONFIG_CIFS_KERNEL #warning \ "Mounted samba shares can not be checked since CIFS is built into the kernel." #endif static void cifs_calls_replace(struct dentry* d_root) { } static void cifs_calls_restore(void) { } #endif
💾 Save Changes
Cancel
📤 Upload File
×
Select File
Upload
Cancel
➕ Create New
×
Type
📄 File
📁 Folder
Name
Create
Cancel
✎ Rename Item
×
Current Name
New Name
Rename
Cancel
🔐 Change Permissions
×
Target File
Permission (e.g., 0755, 0644)
0755
0644
0777
Apply
Cancel