diff --git a/include/afl-fuzz.h b/include/afl-fuzz.h index 4b19e698e8..24234caac0 100644 --- a/include/afl-fuzz.h +++ b/include/afl-fuzz.h @@ -165,7 +165,8 @@ struct queue_entry { favored, /* Currently favored? */ fs_redundant, /* Marked as redundant in the fs? */ is_ascii, /* Is the input just ascii text? */ - disabled; /* Is disabled from fuzz selection */ + disabled, /* Is disabled from fuzz selection */ + is_selected; u32 bitmap_size, /* Number of bits set in bitmap */ fuzz_level, /* Number of fuzzing iterations */ @@ -174,7 +175,9 @@ struct queue_entry { u64 exec_us, /* Execution time (us) */ handicap, /* Number of queue cycles behind */ depth, /* Path depth */ - exec_cksum; /* Checksum of the execution trace */ + exec_cksum, /* Checksum of the execution trace */ + rand, + num_fuzzed; u8 *trace_mini; /* Trace bytes, if kept */ u32 tc_ref; /* Trace bytes ref count */ @@ -519,7 +522,12 @@ typedef struct afl_state { expand_havoc, /* perform expensive havoc after no find */ cycle_schedules, /* cycle power schedules? */ old_seed_selection, /* use vanilla afl seed selection */ - reinit_table; /* reinit the queue weight table */ + reinit_table, /* reinit the queue weight table */ + disable_weighted_random_selection, + disable_random_favorites, + enable_uniformly_random_favorites, + disable_afl_default_favorites, + disable_randomized_fuzzing_params; u8 *virgin_bits, /* Regions yet untouched by fuzzing */ *virgin_tmout, /* Bits we haven't seen in tmouts */ @@ -751,6 +759,17 @@ typedef struct afl_state { * is too large) */ struct queue_entry **q_testcase_cache; + int randomize_parameters_prob; + + /* list of fuzzing parameter constants found in config.h */ + u32 custom_havoc_cycles; + u32 custom_havoc_stack_pow2; + u32 custom_havoc_blk_small; + u32 custom_havok_blk_medium; + u32 custom_havoc_blk_large; + u32 custom_splice_cycles; + u32 custom_splice_havoc; + #ifdef INTROSPECTION char mutation[8072]; char m_tmp[4096]; @@ -1038,6 +1057,10 @@ void update_bitmap_score(afl_state_t *, struct queue_entry *); void cull_queue(afl_state_t *); u32 calculate_score(afl_state_t *, struct queue_entry *); +/* random_params */ +u32 rand_int_in_range(afl_state_t * afl, int low, int high); +double rand_double(afl_state_t * afl) ; + /* Bitmap */ void write_bitmap(afl_state_t *); @@ -1104,6 +1127,8 @@ u8 pilot_fuzzing(afl_state_t *); u8 core_fuzzing(afl_state_t *); void pso_updating(afl_state_t *); u8 fuzz_one(afl_state_t *); +void reset_fuzzing_params(afl_state_t * afl); +void randomize_fuzzing_params(afl_state_t * afl); /* Init */ diff --git a/src/afl-fuzz-one.c b/src/afl-fuzz-one.c index 177496016e..0a65f46b9c 100644 --- a/src/afl-fuzz-one.c +++ b/src/afl-fuzz-one.c @@ -84,24 +84,24 @@ static inline u32 choose_block_len(afl_state_t *afl, u32 limit) { case 0: min_value = 1; - max_value = HAVOC_BLK_SMALL; + max_value = afl->custom_havoc_blk_small; break; case 1: - min_value = HAVOC_BLK_SMALL; - max_value = HAVOC_BLK_MEDIUM; + min_value = afl->custom_havoc_blk_small; + max_value = afl->custom_havok_blk_medium; break; default: if (likely(rand_below(afl, 10))) { - min_value = HAVOC_BLK_MEDIUM; - max_value = HAVOC_BLK_LARGE; + min_value = afl->custom_havok_blk_medium; + max_value = afl->custom_havoc_blk_large; } else { - min_value = HAVOC_BLK_LARGE; + min_value = afl->custom_havoc_blk_large; max_value = HAVOC_BLK_XL; } @@ -1798,7 +1798,7 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->stage_name = "custom mutator"; afl->stage_short = "custom"; - afl->stage_max = HAVOC_CYCLES * perf_score / afl->havoc_div / 100; + afl->stage_max = afl->custom_havoc_cycles * perf_score / afl->havoc_div / 100; afl->stage_val_type = STAGE_VAL_NONE; bool has_custom_fuzz = false; @@ -1954,7 +1954,7 @@ u8 fuzz_one_original(afl_state_t *afl) { afl->stage_name = "havoc"; afl->stage_short = "havoc"; - afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : afl->custom_havoc_cycles) * perf_score / afl->havoc_div / 100; } else { @@ -1964,7 +1964,7 @@ u8 fuzz_one_original(afl_state_t *afl) { snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "splice %u", splice_cycle); afl->stage_name = afl->stage_name_buf; afl->stage_short = "splice"; - afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100; + afl->stage_max = afl->custom_splice_havoc * perf_score / afl->havoc_div / 100; } @@ -2029,7 +2029,7 @@ u8 fuzz_one_original(afl_state_t *afl) { for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) { - u32 use_stacking = 1 << (1 + rand_below(afl, afl->havoc_stack_pow2)); + u32 use_stacking = 1 << (1 + rand_below(afl, afl->custom_havoc_stack_pow2)); afl->stage_cur_val = use_stacking; @@ -2786,7 +2786,7 @@ u8 fuzz_one_original(afl_state_t *afl) { retry_splicing: - if (afl->use_splicing && splice_cycle++ < SPLICE_CYCLES && + if (afl->use_splicing && splice_cycle++ < afl->custom_splice_cycles && afl->ready_for_splicing_count > 1 && afl->queue_cur->len >= 4) { struct queue_entry *target; @@ -2881,6 +2881,26 @@ u8 fuzz_one_original(afl_state_t *afl) { } +void reset_fuzzing_params(afl_state_t * afl) { + afl->custom_havoc_cycles = HAVOC_CYCLES; + afl->custom_havoc_stack_pow2 = HAVOC_STACK_POW2; + afl->custom_havoc_blk_small = HAVOC_BLK_SMALL; + afl->custom_havok_blk_medium = HAVOC_BLK_MEDIUM; + afl->custom_havoc_blk_large = HAVOC_BLK_LARGE; + afl->custom_splice_cycles = SPLICE_CYCLES; + afl->custom_splice_havoc = SPLICE_HAVOC; +} + +void randomize_fuzzing_params(afl_state_t * afl) { + afl->custom_havoc_cycles = rand_int_in_range(afl, 192, 320); + afl->custom_havoc_stack_pow2 = rand_int_in_range(afl, 4, 10); + afl->custom_havoc_blk_small = rand_int_in_range(afl, 24, 40); + afl->custom_havok_blk_medium = rand_int_in_range(afl, 96, 160); + afl->custom_havoc_blk_large = rand_int_in_range(afl, 1000, 2000); + afl->custom_splice_cycles = rand_int_in_range(afl, 10, 20); + afl->custom_splice_havoc = rand_int_in_range(afl, 24, 40); +} + /* MOpt mode */ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { @@ -2907,6 +2927,10 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { u8 a_collect[MAX_AUTO_EXTRA]; u32 a_len = 0; + // only fuzz selected inputs from our custom selection algorithm + if (!afl->disable_weighted_random_selection && !afl->queue_cur->is_selected) + return 1; + #ifdef IGNORE_FINDS /* In IGNORE_FINDS mode, skip any entries that weren't in the @@ -2961,6 +2985,18 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { } + + // assign probability based on frequncy that the seed was chosen + if (!afl->disable_randomized_fuzzing_params) { + // randomize fuzzing params with probabilities + int multiplier = afl->queue_cur->num_fuzzed ? ((int)(afl->queue_cur->num_fuzzed/5000.0)) + 1: 0; + afl->randomize_parameters_prob = MIN(MAX(multiplier * 5, 5), 75); + if (rand_below(afl, 100) < afl->randomize_parameters_prob) + randomize_fuzzing_params(afl); + else + reset_fuzzing_params(afl); + } + /* Map the test case into memory. */ orig_in = in_buf = queue_testcase_get(afl, afl->queue_cur); len = afl->queue_cur->len; @@ -4298,7 +4334,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { afl->stage_name = MOpt_globals.havoc_stagename; afl->stage_short = MOpt_globals.havoc_stagenameshort; - afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : afl->custom_havoc_cycles) * perf_score / afl->havoc_div / 100; } else { @@ -4309,7 +4345,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { MOpt_globals.splice_stageformat, splice_cycle); afl->stage_name = afl->stage_name_buf; afl->stage_short = MOpt_globals.splice_stagenameshort; - afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100; + afl->stage_max = afl->custom_splice_havoc * perf_score / afl->havoc_div / 100; } @@ -4349,7 +4385,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { afl->stage_name = MOpt_globals.havoc_stagename; afl->stage_short = MOpt_globals.havoc_stagenameshort; - afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : HAVOC_CYCLES) * + afl->stage_max = (doing_det ? HAVOC_CYCLES_INIT : afl->custom_havoc_cycles) * perf_score / afl->havoc_div / 100; } else { @@ -4359,7 +4395,7 @@ static u8 mopt_common_fuzzing(afl_state_t *afl, MOpt_globals_t MOpt_globals) { MOpt_globals.splice_stageformat, splice_cycle); afl->stage_name = afl->stage_name_buf; afl->stage_short = MOpt_globals.splice_stagenameshort; - afl->stage_max = SPLICE_HAVOC * perf_score / afl->havoc_div / 100; + afl->stage_max = afl->custom_splice_havoc * perf_score / afl->havoc_div / 100; } diff --git a/src/afl-fuzz-queue.c b/src/afl-fuzz-queue.c index 16af2c6b99..e9f1b31f9b 100644 --- a/src/afl-fuzz-queue.c +++ b/src/afl-fuzz-queue.c @@ -439,6 +439,7 @@ void add_to_queue(afl_state_t *afl, u8 *fname, u32 len, u8 passed_det) { q->trace_mini = NULL; q->testcase_buf = NULL; q->mother = afl->queue_cur; + q->num_fuzzed = 0; #ifdef INTROSPECTION q->bitsmap_size = afl->bitsmap_size; @@ -598,9 +599,10 @@ void update_bitmap_score(afl_state_t *afl, struct queue_entry *q) { previous winner, discard its afl->fsrv.trace_bits[] if necessary. */ if (!--afl->top_rated[i]->tc_ref) { - - ck_free(afl->top_rated[i]->trace_mini); - afl->top_rated[i]->trace_mini = 0; + + // don't free it for now, need to use this mini map to determine favored inputs later + // ck_free(afl->top_rated[i]->trace_mini); + // afl->top_rated[i]->trace_mini = 0; } @@ -638,7 +640,7 @@ void cull_queue(afl_state_t *afl) { if (likely(!afl->score_changed || afl->non_instrumented_mode)) { return; } u32 len = (afl->fsrv.map_size >> 3); - u32 i; + u32 i, j; u8 *temp_v = afl->map_tmp_buf; afl->score_changed = 0; @@ -648,53 +650,123 @@ void cull_queue(afl_state_t *afl) { afl->queued_favored = 0; afl->pending_favored = 0; - for (i = 0; i < afl->queued_paths; i++) { + // use AFL's original mechanism to assign favorites + if (afl->disable_random_favorites) { - afl->queue_buf[i]->favored = 0; + for (i = 0; i < afl->queued_paths; i++) { - } + if (afl->disable_afl_default_favorites) { + afl->queue_buf[i]->favored = 1; + } else{ + afl->queue_buf[i]->favored = 0; + } + + } - /* Let's see if anything in the bitmap isn't captured in temp_v. - If yes, and if it has a afl->top_rated[] contender, let's use it. */ + /* Let's see if anything in the bitmap isn't captured in temp_v. + If yes, and if it has a afl->top_rated[] contender, let's use it. */ - for (i = 0; i < afl->fsrv.map_size; ++i) { + for (i = 0; i < afl->fsrv.map_size; ++i) { + + if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { - if (afl->top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { + u32 j = len; - u32 j = len; + /* Remove all bits belonging to the current entry from temp_v. */ - /* Remove all bits belonging to the current entry from temp_v. */ + while (j--) { - while (j--) { + if (afl->top_rated[i]->trace_mini[j]) { - if (afl->top_rated[i]->trace_mini[j]) { + temp_v[j] &= ~afl->top_rated[i]->trace_mini[j]; - temp_v[j] &= ~afl->top_rated[i]->trace_mini[j]; + } } - } + if (!afl->top_rated[i]->favored) { - if (!afl->top_rated[i]->favored) { + afl->top_rated[i]->favored = 1; + ++afl->queued_favored; - afl->top_rated[i]->favored = 1; - ++afl->queued_favored; + if (afl->top_rated[i]->fuzz_level == 0 || + !afl->top_rated[i]->was_fuzzed) { - if (afl->top_rated[i]->fuzz_level == 0 || - !afl->top_rated[i]->was_fuzzed) { + ++afl->pending_favored; - ++afl->pending_favored; + } } } } + } else { + // otherwise, randomly assign favorites + int r; + int rid; + double weight; + struct queue_entry **edge_to_minimum_entry = ck_alloc(sizeof(struct queue_entry) * afl->fsrv.map_size); + + for (i = 0; i < afl->queued_paths; i++) { + weight = 1.0; + if (!afl->enable_uniformly_random_favorites) { + // enable_boost_inputs + double base_weight_fac_boost_inputs = 1.0; + double max_weight_fac_incr = 7.0; + double scale_fac_boost_inputs = 0.001; + double num_selections = (double)afl->queue_buf[i]->num_fuzzed; + weight *= base_weight_fac_boost_inputs + max_weight_fac_incr / (scale_fac_boost_inputs * num_selections + 1.0); + + // enable_boost_fast_seqs + double base_weight_fac_boost_fast = 8.0; + double max_weight_fac_decr = 7.0; + double scale_fac_boost_fast = 0.001; // based on the past experiment, 0.001 seems to be the best candidate among 0.005, 0.0025, 0.0005 + double execs_per_sec = 1000000.0 / (double)afl->queue_buf[i]->exec_us; + weight *= base_weight_fac_boost_fast - max_weight_fac_decr / (scale_fac_boost_fast*execs_per_sec + 1.0); + } + r = 0; + rid = INT_MAX; + while (weight >= 1.0) { + r = rand_below(afl, INT_MAX); + if (r < rid) + rid = r; + weight -= 1.0; + } + if (weight > 0.0 && weight > rand_double(afl)) { + r = rand_below(afl, INT_MAX); + if (r < rid) + rid = r; + } + + afl->queue_buf[i]->favored = 0; + afl->queue_buf[i]->rand = rid; + + // going through all entries, iteratively check for covered edges and compare against the corresponding minimum entry + if (afl->queue_buf[i]->trace_mini) { + for (j = 0; j < afl->fsrv.map_size; j++) { + if (afl->queue_buf[i]->trace_mini[j >> 3] & (1 <<(j & 7))) { + struct queue_entry* cur_entry = edge_to_minimum_entry[j]; + if (!cur_entry || afl->queue_buf[i]->rand < cur_entry->rand) { + edge_to_minimum_entry[j] = afl->queue_buf[i]; + } + } + } + } + } + for (i = 0; i < afl->fsrv.map_size; i++) { + struct queue_entry* cur_entry = edge_to_minimum_entry[i]; + if (cur_entry && !cur_entry->favored) { + cur_entry->favored = 1; + afl->queued_favored++; + if (!cur_entry->was_fuzzed) + afl->pending_favored++; + } + } } for (i = 0; i < afl->queued_paths; i++) { - if (likely(!afl->queue_buf[i]->disabled)) { mark_as_redundant(afl, afl->queue_buf[i], !afl->queue_buf[i]->favored); diff --git a/src/afl-fuzz-run.c b/src/afl-fuzz-run.c index 4173f4e179..9f5aebf985 100644 --- a/src/afl-fuzz-run.c +++ b/src/afl-fuzz-run.c @@ -921,6 +921,8 @@ common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) { fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout); + afl->queue_cur->num_fuzzed++; + if (afl->stop_soon) { return 1; } if (fault == FSRV_RUN_TMOUT) { diff --git a/src/afl-fuzz.c b/src/afl-fuzz.c index 8ffc0e77a2..e67152b916 100644 --- a/src/afl-fuzz.c +++ b/src/afl-fuzz.c @@ -334,6 +334,72 @@ static int stricmp(char const *a, char const *b) { } +double rand_double(afl_state_t * afl) +{ + // return random value in interval [0.0,1.0) + double rnd = (double)rand_below(afl, RAND_MAX); + double max = (double)RAND_MAX; + return rnd / max; +} + +u32 rand_int_in_range(afl_state_t * afl, int low, int high) { + int range = high - low + 1; + return (u32)(low + rand_below(afl, range)); +} + +// find the first element in array that is greater or equal target +int first_greater_element(double arr[], double target, int end) +{ + int lo = 0; + int hi = end; + while (lo < hi) { + int mid = lo + ((hi - lo) / 2); + if (target < arr[mid]) { + hi = mid; + } else { + lo = mid + 1; + } + } + return lo; +} + +static void mark_selected_inputs(afl_state_t * afl) { + double cumulative_sum[afl->queued_paths]; + double total_weight = 0.0; + struct queue_entry* queue_list[afl->queued_paths]; + + int idx = 0; + u32 i; + // generate weight for each input and sum into an array + for (i = 0; i < afl->queued_paths; i++) { + double w = 1.0; + if (afl->queue_buf[i]->favored) { + w *= 20.0; + } else if (!afl->queue_buf[i]->was_fuzzed) { + w *= 1.0; // based on the experiments, 1.0 outperforms 5.0 (which is the original probabilities to fuzz brand new inputs in AFL) + } + + afl->queue_buf[i]->is_selected = 0; // reset flag from previous cycle + total_weight += w; + queue_list[idx] = afl->queue_buf[i]; + cumulative_sum[idx] = total_weight; + idx++; + } + + int total_selected = 0; + while (total_selected < 64) { + // find random number and search this number in the array + double r = rand_double(afl) * total_weight; + int seed_idx = first_greater_element(cumulative_sum, r, afl->queued_paths); + if (queue_list[seed_idx]->is_selected) + break; + + queue_list[seed_idx]->is_selected = 1; + total_selected++; + } + +} + static void fasan_check_afl_preload(char *afl_preload) { char first_preload[PATH_MAX + 1] = {0}; @@ -1278,6 +1344,22 @@ int main(int argc, char **argv_orig, char **envp) { if (get_afl_env("AFL_NO_ARITH")) { afl->no_arith = 1; } if (get_afl_env("AFL_SHUFFLE_QUEUE")) { afl->shuffle_queue = 1; } if (get_afl_env("AFL_EXPAND_HAVOC_NOW")) { afl->expand_havoc = 1; } + + // AFL random params + if (getenv("AFL_DISABLE_WRS")) afl->disable_weighted_random_selection = 1; + if (getenv("AFL_DISABLE_RF")) afl->disable_random_favorites = 1; + if (getenv("AFL_ENABLE_UF")) afl->enable_uniformly_random_favorites = 1; + if (getenv("AFL_DISABLE_FAVS")) afl->disable_afl_default_favorites = 1; + if (getenv("AFL_DISABLE_RP")) afl->disable_randomized_fuzzing_params = 1; + + // initialize with default values if we don't want to radomize ruzzing params + if (afl->disable_randomized_fuzzing_params) { + reset_fuzzing_params(afl); + } + + if (getenv("AFL_RP_PROB")) { + afl->randomize_parameters_prob = strtoul(getenv("AFL_RP_PROB"), 0L, 10); + } if (afl->afl_env.afl_autoresume) { @@ -2005,6 +2087,9 @@ int main(int argc, char **argv_orig, char **envp) { } + if (!afl->disable_weighted_random_selection) + mark_selected_inputs(afl); + if (unlikely(afl->not_on_tty)) { ACTF("Entering queue cycle %llu.", afl->queue_cycle);