1 /*
2 * Copyright 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 // Don't lint the next line, as cpplint will suggest adding
18 // /tools/security as an include_dir
19 // NOLINTNEXTLINE
20 #include "fuzz_ffi.h"
21
22 #include <vector>
23 #include "include/ffi_common.h"
24
25 // Empty functions we can use for our function targets
fn(int num_args,...)26 void fn(int num_args, ...) {}
closure_fn(ffi_cif * cif __UNUSED__,void * resp,void ** args,void * userdata)27 void closure_fn(ffi_cif* cif __UNUSED__,
28 void* resp, void** args, void* userdata) {}
raw_closure_fn(ffi_cif * cif __UNUSED__,void * resp,ffi_raw * args,void * userdata)29 void raw_closure_fn(ffi_cif* cif __UNUSED__,
30 void* resp, ffi_raw* args, void* userdata) {}
java_raw_closure_fn(ffi_cif * cif __UNUSED__,void * resp,ffi_java_raw * args,void * userdata)31 void java_raw_closure_fn(ffi_cif* cif __UNUSED__,
32 void* resp, ffi_java_raw* args, void* userdata) {}
33
generateCustomType(FuzzedDataProvider * dataProvider)34 ffi_type* generateCustomType(FuzzedDataProvider* dataProvider) {
35 // Set our flag so we don't call a java-related function (triggers an abort)
36 args_contain_struct = true;
37
38 ffi_type* new_type = reinterpret_cast<ffi_type*>(malloc(sizeof(ffi_type)));
39 ffi_alloc_vector.push_back(new_type);
40
41 new_type->size = 0;
42 new_type->alignment = 0;
43 new_type->type = FFI_TYPE_STRUCT;
44
45 // Generate our subobjects
46 size_t num_elements = dataProvider->ConsumeIntegralInRange<size_t>(0,
47 MAX_NUM_ELEMENTS);
48 new_type->elements = reinterpret_cast<ffi_type**>(
49 malloc(sizeof(ffi_type*)*(num_elements+1)));
50
51 // Nested custom structs will cause an assert, so disable them
52 // TODO(michael.ensing@leviathansecurity.com):
53 // change the 'false' here to true once libffi supports nested structs.
54 // It'll just throw an assert currently.
55 for (size_t i=0; i < num_elements; i++) {
56 new_type->elements[i] = getRandomType(dataProvider, false);
57 }
58
59 // The final element must be a nullptr
60 new_type->elements[num_elements] = NULL;
61
62 // Get our size/alignment
63 ffi_get_struct_offsets(abi, new_type, NULL);
64
65 return new_type;
66 }
67
getTotalSize(ffi_type * type)68 size_t getTotalSize(ffi_type* type) {
69 if (type == NULL) {
70 return 0;
71 }
72
73 // Start the total as the size of the object itself
74 size_t total_size = type->size > sizeof(void*) ?
75 type->size : sizeof(void*);
76
77 // Recursively add the size of the subelements
78 if (type->elements != NULL) {
79 for (size_t i=0; type->elements[i] != NULL; i++) {
80 total_size += getTotalSize(type->elements[i]);
81 }
82 }
83
84 return total_size;
85 }
86
getRandomType(FuzzedDataProvider * dataProvider,bool allowCustomTypes)87 ffi_type* getRandomType(FuzzedDataProvider* dataProvider,
88 bool allowCustomTypes) {
89 // Which type? Let type==NUM_TYPES be our custom struct case
90 size_t type_index = dataProvider->ConsumeIntegralInRange<size_t>(0,
91 NUM_TYPES);
92 ffi_type* type;
93 if (type_index == NUM_TYPES) {
94 if (allowCustomTypes) {
95 type = generateCustomType(dataProvider);
96 } else {
97 return NULL;
98 }
99 } else {
100 type = ffi_types[type_index];
101 }
102
103 return type;
104 }
105
genArg(ffi_type * type,FuzzedDataProvider * dataProvider)106 void* genArg(ffi_type* type, FuzzedDataProvider* dataProvider) {
107 // Allocate the space for our arg
108 // TODO(michael.ensing@leviathansecurity.com):
109 // Properly allocate the correct amount of aligned-space,
110 // don't just double (which should contain any alignment)
111 size_t type_size = getTotalSize(type)*2;
112
113 if (type_size == 0) {
114 return NULL;
115 }
116
117 void* ret = malloc(type_size);
118
119 std::vector<uint8_t> bytes = dataProvider->ConsumeBytes<uint8_t>(type_size);
120 memcpy(ret, bytes.data(), bytes.size());
121
122 return ret;
123 }
124
buildArgArrays(ffi_type * arg_types[],void * arg_array[],size_t num_args,FuzzedDataProvider * dataProvider)125 bool buildArgArrays(ffi_type* arg_types[], void* arg_array[], size_t num_args,
126 FuzzedDataProvider* dataProvider) {
127 // The first value in our array should be the number of arguments
128 arg_types[0] = &ffi_type_sint;
129 size_t* size_ptr = reinterpret_cast<size_t*>(malloc(sizeof(size_t)));
130 *size_ptr = num_args;
131 arg_array[0] = size_ptr;
132
133 // Grab our arguments
134 for (size_t i = 1; i <= num_args; i++) {
135 // Determine what type we're using
136 ffi_type* type = getRandomType(dataProvider, true);
137 if (type == NULL) {
138 return false;
139 }
140 arg_types[i] = type;
141
142 // Generate a value for it and add to our arguments array
143 arg_array[i] = genArg(type, dataProvider);
144 }
145
146 // Our arrays of pointers need to be nullptr-terminated
147 arg_types[num_args+1] = NULL;
148 arg_array[num_args+1] = NULL;
149
150 return true;
151 }
152
runMainFunctions(ffi_cif * cif,void * resp_buf,void ** arg_array,FuzzedDataProvider * dataProvider)153 void runMainFunctions(ffi_cif* cif, void* resp_buf, void** arg_array,
154 FuzzedDataProvider* dataProvider) {
155 // Call function
156 ffi_call(cif, FFI_FN(fn), resp_buf, arg_array);
157
158 // Prep Closure
159 ffi_closure* pcl = NULL;
160 void* code;
161 ffi_status ret;
162
163 pcl = reinterpret_cast<ffi_closure*>(
164 ffi_closure_alloc(sizeof(ffi_closure), &code));
165 if (pcl == NULL) {
166 return;
167 }
168
169 size_t buf_size = dataProvider->ConsumeIntegralInRange<size_t>(0,
170 MAX_RESP_SIZE);
171 std::vector<uint8_t> data_vector =
172 dataProvider->ConsumeBytes<uint8_t>(buf_size);
173 ret = ffi_prep_closure_loc(
174 pcl,
175 cif,
176 closure_fn,
177 data_vector.data(),
178 code);
179 if (ret != FFI_OK) {
180 ffi_closure_free(pcl);
181 }
182 }
183
runRawFunctions(ffi_cif * cif,void * resp_buf,void ** arg_array,FuzzedDataProvider * dataProvider)184 void runRawFunctions(ffi_cif* cif, void* resp_buf, void** arg_array,
185 FuzzedDataProvider* dataProvider) {
186 #if !FFI_NO_RAW_API && !FFI_NATIVE_RAW_API
187 // Allocate our ffi_raw and put our args there
188 size_t rsize = ffi_raw_size(cif);
189 ffi_raw* raw_args = reinterpret_cast<ffi_raw*>(malloc(rsize));
190 raw_alloc_vector.push_back(raw_args);
191 ffi_ptrarray_to_raw(cif, arg_array, raw_args);
192
193 // Call
194 ffi_raw_call(cif, FFI_FN(fn), resp_buf, raw_args);
195
196 // Prep Closure
197 #if FFI_CLOSURES
198 ffi_raw_closure* pcl = NULL;
199 void* code;
200 ffi_status ret;
201
202 pcl = static_cast<ffi_raw_closure*>(
203 ffi_closure_alloc(sizeof(ffi_raw_closure), &code));
204 if (pcl == NULL) {
205 return;
206 }
207 size_t buf_size = dataProvider->ConsumeIntegralInRange<size_t>(0,
208 MAX_RESP_SIZE);
209 std::vector<uint8_t> data_vector =
210 dataProvider->ConsumeBytes<uint8_t>(buf_size);
211 ret = ffi_prep_raw_closure_loc(
212 pcl,
213 cif,
214 raw_closure_fn,
215 data_vector.data(),
216 code);
217 if (ret != FFI_OK) {
218 ffi_closure_free(pcl);
219 }
220
221 #endif // FFI_CLOSURES
222 #endif // !FFI_NO_RAW_API && !FFI_NATIVE_RAW_API
223 }
224
runJavaFunctions(ffi_cif * cif,void * resp_buf,void ** arg_array,FuzzedDataProvider * dataProvider)225 void runJavaFunctions(ffi_cif* cif, void* resp_buf, void** arg_array,
226 FuzzedDataProvider* dataProvider) {
227 #if !defined(NO_JAVA_RAW_API)
228 #if !FFI_NO_RAW_API && !FFI_NATIVE_RAW_API
229
230 // Allocate our ffi_java_raw and put our args there
231 size_t rsize = ffi_java_raw_size(cif);
232 // NOTE: a buffer overread will occasionally happen if we don't
233 // increase rsize.
234 ffi_java_raw* raw_args = reinterpret_cast<ffi_raw*>(malloc(rsize*2));
235 raw_alloc_vector.push_back(raw_args);
236 ffi_ptrarray_to_raw(cif, arg_array, raw_args);
237
238 // Call
239 ffi_java_raw_call(cif, FFI_FN(fn), resp_buf, raw_args);
240
241 // Prep Closure
242 #if FFI_CLOSURES
243 ffi_java_raw_closure* pcl = NULL;
244 void* code;
245 ffi_status ret;
246
247 pcl = static_cast<ffi_java_raw_closure*>(
248 ffi_closure_alloc(sizeof(ffi_java_raw_closure), &code));
249 if (pcl == NULL) {
250 return;
251 }
252 size_t buf_size = dataProvider->ConsumeIntegralInRange<size_t>(0,
253 MAX_RESP_SIZE);
254 std::vector<uint8_t> data_vector =
255 dataProvider->ConsumeBytes<uint8_t>(buf_size);
256 ret = ffi_prep_java_raw_closure_loc(
257 pcl,
258 cif,
259 raw_closure_fn,
260 data_vector.data(),
261 code);
262 if (ret != FFI_OK) {
263 ffi_closure_free(pcl);
264 }
265
266 #endif // FFI_CLOSURES
267 #endif // !FFI_NATIVE_RAW_API
268 #endif // !NO_JAVA_RAW_API
269 }
270
freeFFI(ffi_type * ffi_type)271 void freeFFI(ffi_type* ffi_type) {
272 // Make sure it's one of our structs
273 if (ffi_type == NULL || ffi_type->type != FFI_TYPE_STRUCT) {
274 return;
275 }
276
277 if (ffi_type->elements != NULL) {
278 free(ffi_type->elements);
279 }
280
281 // Finally, free our object
282 free(ffi_type);
283 }
284
freeAll(void * arg_array[],size_t num_args,void * resp_buf)285 void freeAll(void* arg_array[], size_t num_args, void* resp_buf) {
286 // Free our custom struct objects
287 for (const auto& ffi : ffi_alloc_vector) {
288 freeFFI(ffi);
289 }
290 ffi_alloc_vector.clear();
291 for (const auto& raw : raw_alloc_vector) {
292 free(raw);
293 }
294 raw_alloc_vector.clear();
295
296 for (size_t i=0; i <= num_args; i++) {
297 free(arg_array[i]);
298 }
299
300 if (resp_buf) {
301 free(resp_buf);
302 }
303 }
304
LLVMFuzzerTestOneInput(const uint8_t * Data,size_t Size)305 extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
306 // Init our wrapper
307 FuzzedDataProvider dataProvider(Data, Size);
308 ffi_cif cif;
309 ffi_status ret;
310 void* resp_buf = NULL;
311 args_contain_struct = false;
312 ffi_type* rtype;
313
314 // How many args are we sending?
315 size_t num_args = dataProvider.ConsumeIntegralInRange<size_t>(0,
316 MAX_NUM_ARGS);
317
318 // Build our array of args (+2 for leading arg_count and trailing nullptr)
319 ffi_type* arg_types[num_args+2];
320 void* arg_array[num_args+2];
321 bool success = buildArgArrays(arg_types, arg_array, num_args,
322 &dataProvider);
323 if (!success) {
324 goto free;
325 }
326
327 // Get return type
328 rtype = dataProvider.PickValueInArray<ffi_type*, NUM_TYPES>(ffi_types);
329
330 // Create a buffer for our return value
331 resp_buf = malloc(MAX_RESP_SIZE);
332 if (resp_buf == NULL) {
333 goto free;
334 }
335
336 // Set up our ABI
337 // NOTE: fuzzing abi triggers an abort on linux-x86_64,
338 // so only fuzz it on ARM
339 #if MAX_ABI > 0 && defined(ARM)
340 abi = static_cast<ffi_abi>(
341 dataProvider.ConsumeIntegralInRange<uint32_t>(0, MAX_ABI));
342 #endif
343 #if HAVE_LONG_DOUBLE_VARIANT
344 ffi_prep_types(abi);
345 #endif
346
347 // ============= Call Functions =============
348 ret = ffi_prep_cif_var(&cif, abi, 1, num_args, rtype,
349 arg_types);
350 if (ret != FFI_OK) {
351 goto free;
352 }
353
354 runMainFunctions(&cif, resp_buf, arg_array, &dataProvider);
355 runRawFunctions(&cif, resp_buf, arg_array, &dataProvider);
356 if (!args_contain_struct) {
357 runJavaFunctions(&cif, resp_buf, arg_array, &dataProvider);
358 }
359
360 free:
361 freeAll(arg_array, num_args, resp_buf);
362 return 0;
363 }
364