diff --git a/backend.c b/backend.c index fe03eab38..894f97f87 100644 --- a/backend.c +++ b/backend.c @@ -1000,6 +1000,7 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) while ((td->o.read_iolog_file && !flist_empty(&td->io_log_list)) || (!flist_empty(&td->trim_list)) || !io_issue_bytes_exceeded(td) || + time_to_ddir_sync(td) || td->o.time_based) { struct timespec comp_time; struct io_u *io_u; @@ -1032,8 +1033,8 @@ static void do_io(struct thread_data *td, uint64_t *bytes_done) */ if (bytes_issued >= total_bytes && !td->o.read_iolog_file && - (!td->o.time_based || - (td->o.time_based && td->o.verify != VERIFY_NONE))) + ((!td->o.time_based && !time_to_ddir_sync(td)) || + (td->o.time_based && td->o.verify != VERIFY_NONE))) break; io_u = get_io_u(td); diff --git a/fio.h b/fio.h index 4bb6cfa7f..14163e2d2 100644 --- a/fio.h +++ b/fio.h @@ -628,7 +628,7 @@ static inline bool multi_range_trim(struct thread_data *td, struct io_u *io_u) return false; } -static inline bool should_fsync(struct thread_data *td) +static inline bool should_fsync(const struct thread_data *td) { if (ddir_sync(td->last_ddir_issued)) return false; diff --git a/io_u.c b/io_u.c index f81086b65..4c8d78d3a 100644 --- a/io_u.c +++ b/io_u.c @@ -742,19 +742,8 @@ static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) return ddir; } -/* - * Return the data direction for the next io_u. If the job is a - * mixed read/write workload, check the rwmix cycle and switch if - * necessary. - */ -static enum fio_ddir get_rw_ddir(struct thread_data *td) +static inline enum fio_ddir get_sync_ddir(const struct thread_data *td) { - enum fio_ddir ddir; - - /* - * See if it's time to fsync/fdatasync/sync_file_range first, - * and if not then move on to check regular I/Os. - */ if (should_fsync(td) && td->last_ddir_issued == DDIR_WRITE) { if (td->o.fsync_blocks && td->io_issues[DDIR_WRITE] && !(td->io_issues[DDIR_WRITE] % td->o.fsync_blocks)) @@ -768,6 +757,30 @@ static enum fio_ddir get_rw_ddir(struct thread_data *td) !(td->io_issues[DDIR_WRITE] % td->sync_file_range_nr)) return DDIR_SYNC_FILE_RANGE; } + return DDIR_INVAL; +} + +bool time_to_ddir_sync(const struct thread_data *td) +{ + return get_sync_ddir(td) != DDIR_INVAL; +} + +/* + * Return the data direction for the next io_u. If the job is a + * mixed read/write workload, check the rwmix cycle and switch if + * necessary. + */ +static enum fio_ddir get_rw_ddir(struct thread_data *td) +{ + enum fio_ddir ddir; + + /* + * See if it's time to fsync/fdatasync/sync_file_range first, + * and if not then move on to check regular I/Os. + */ + ddir = get_sync_ddir(td); + if (ddir != DDIR_INVAL) + return ddir; if (td_rw(td)) { /* diff --git a/io_u.h b/io_u.h index ab93d50f9..f77eb30cb 100644 --- a/io_u.h +++ b/io_u.h @@ -169,6 +169,8 @@ bool queue_full(const struct thread_data *); int do_io_u_sync(const struct thread_data *, struct io_u *); int do_io_u_trim(struct thread_data *, struct io_u *); +bool time_to_ddir_sync(const struct thread_data *); + #ifdef FIO_INC_DEBUG static inline void dprint_io_u(struct io_u *io_u, const char *p) {