Way back in 2007 I posted a blog about testing ZFS/FUSE with Bonnie++ using random data rather than 0’s, and I said:
it’s not ready for production use as it isn’t controlled by a command line switch and relies on /dev/urandom existing. yes, I’m going to send the patch to Russell to look at
I didn’t get any feedback on the patch, so I’ve decided to post it here in case people are interested.
diff -ur bonnie++-1.03a/bonnie++.cpp bonnie++-1.03a-urand/bonnie++.cpp --- bonnie++-1.03a/bonnie++.cpp 2002-12-04 00:40:35.000000000 +1100 +++ bonnie++-1.03a-urand/bonnie++.cpp 2007-01-01 13:03:41.644378000 +1100 @@ -41,6 +41,9 @@ #include <string.h> #include <sys/utsname.h> #include <signal.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> #ifdef AIX_MEM_SIZE #include <cf.h> @@ -148,6 +151,28 @@ } } +void load_random_data(char *temp_buffer,int length) +{ + int filedes, numbytes; + + filedes=open("/dev/urandom",O_RDONLY); + if(filedes<0) + { + perror("Open of /dev/urandom failed, falling back to 0's"); + memset(temp_buffer, 0, length); + } + else + { + numbytes=read(filedes,temp_buffer,length); + if(numbytes!=length) + { + perror("Read from /dev/urandom failed, falling back to 0's"); + memset(temp_buffer, 0, length); + } + close(filedes); + } +} + int main(int argc, char *argv[]) { int file_size = DefaultFileSize; @@ -477,7 +502,8 @@ return 1; globals.decrement_and_wait(FastWrite); if(!globals.quiet) fprintf(stderr, "Writing intelligently..."); - memset(buf, 0, globals.chunk_size()); + // memset(buf, 0, globals.chunk_size()); + load_random_data(buf, globals.chunk_size()); globals.timer.timestamp(); bufindex = 0; // for the number of chunks of file data