1 // Copyright 2006 Google Inc. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 // worker.cc : individual tasks that can be run in combination to
29 #include <sys/select.h>
31 #include <sys/types.h>
32 #include <sys/times.h>
34 // These are necessary, but on by default
36 // #define __USE_LARGEFILE64
38 #include <sys/socket.h>
40 #include <arpa/inet.h>
41 #include <linux/unistd.h> // for gettid
43 // For size of block device
44 #include <sys/ioctl.h>
46 // For asynchronous I/O
51 #include <sys/syscall.h>
56 // This file must work with autoconf on its public version,
57 // so these includes are correct.
58 #include "error_diag.h" // NOLINT
59 #include "os.h" // NOLINT
60 #include "pattern.h" // NOLINT
61 #include "queue.h" // NOLINT
62 #include "sat.h" // NOLINT
63 #include "sattypes.h" // NOLINT
64 #include "worker.h" // NOLINT
67 // Why ubuntu, do you hate gettid so bad?
68 #if !defined(__NR_gettid)
69 #define __NR_gettid 224
72 #define gettid() syscall(__NR_gettid)
73 #if !defined(CPU_SETSIZE)
74 _syscall3(int, sched_getaffinity, pid_t, pid,
75 unsigned int, len, cpu_set_t*, mask)
76 _syscall3(int, sched_setaffinity, pid_t, pid,
77 unsigned int, len, cpu_set_t*, mask)
81 // Work around the sad fact that there are two (gnu, xsi) incompatible
82 // versions of strerror_r floating around google. Awesome.
83 bool sat_strerror(int err, char *buf, int len) {
85 char *errmsg = reinterpret_cast<char*>(strerror_r(err, buf, len));
86 int retval = reinterpret_cast<int64>(errmsg);
92 strncpy(buf, errmsg, len);
99 inline uint64 addr_to_tag(void *address) {
100 return reinterpret_cast<uint64>(address);
104 #if !defined(O_DIRECT)
105 // Sometimes this isn't available.
106 // Disregard if it's not defined.
110 // A struct to hold captured errors, for later reporting.
112 uint64 actual; // This is the actual value read.
113 uint64 reread; // This is the actual value, reread.
114 uint64 expected; // This is what it should have been.
115 uint64 *vaddr; // This is where it was (or wasn't).
116 char *vbyteaddr; // This is byte specific where the data was (or wasn't).
117 uint64 paddr; // This is the bus address, if available.
118 uint64 *tagvaddr; // This holds the tag value if this data was tagged.
119 uint64 tagpaddr; // This holds the physical address corresponding to the tag.
122 // This is a helper function to create new threads with pthreads.
123 static void *ThreadSpawnerGeneric(void *ptr) {
124 WorkerThread *worker = static_cast<WorkerThread*>(ptr);
125 worker->StartRoutine();
129 void WorkerStatus::Initialize() {
130 sat_assert(0 == pthread_mutex_init(&num_workers_mutex_, NULL));
131 sat_assert(0 == pthread_rwlock_init(&status_rwlock_, NULL));
132 #ifdef HAVE_PTHREAD_BARRIERS
133 sat_assert(0 == pthread_barrier_init(&pause_barrier_, NULL,
138 void WorkerStatus::Destroy() {
139 sat_assert(0 == pthread_mutex_destroy(&num_workers_mutex_));
140 sat_assert(0 == pthread_rwlock_destroy(&status_rwlock_));
141 #ifdef HAVE_PTHREAD_BARRIERS
142 sat_assert(0 == pthread_barrier_destroy(&pause_barrier_));
146 void WorkerStatus::PauseWorkers() {
147 if (SetStatus(PAUSE) != PAUSE)
148 WaitOnPauseBarrier();
151 void WorkerStatus::ResumeWorkers() {
152 if (SetStatus(RUN) == PAUSE)
153 WaitOnPauseBarrier();
156 void WorkerStatus::StopWorkers() {
157 if (SetStatus(STOP) == PAUSE)
158 WaitOnPauseBarrier();
161 bool WorkerStatus::ContinueRunning(bool *paused) {
162 // This loop is an optimization. We use it to immediately re-check the status
163 // after resuming from a pause, instead of returning and waiting for the next
164 // call to this function.
169 switch (GetStatus()) {
173 // Wait for the other workers to call this function so that
174 // PauseWorkers() can return.
175 WaitOnPauseBarrier();
176 // Wait for ResumeWorkers() to be called.
177 WaitOnPauseBarrier();
178 // Indicate that a pause occurred.
189 bool WorkerStatus::ContinueRunningNoPause() {
190 return (GetStatus() != STOP);
193 void WorkerStatus::RemoveSelf() {
194 // Acquire a read lock on status_rwlock_ while (status_ != PAUSE).
196 AcquireStatusReadLock();
197 if (status_ != PAUSE)
199 // We need to obey PauseWorkers() just like ContinueRunning() would, so that
200 // the other threads won't wait on pause_barrier_ forever.
202 // Wait for the other workers to call this function so that PauseWorkers()
204 WaitOnPauseBarrier();
205 // Wait for ResumeWorkers() to be called.
206 WaitOnPauseBarrier();
209 // This lock would be unnecessary if we held a write lock instead of a read
210 // lock on status_rwlock_, but that would also force all threads calling
211 // ContinueRunning() to wait on this one. Using a separate lock avoids that.
212 AcquireNumWorkersLock();
213 // Decrement num_workers_ and reinitialize pause_barrier_, which we know isn't
214 // in use because (status != PAUSE).
215 #ifdef HAVE_PTHREAD_BARRIERS
216 sat_assert(0 == pthread_barrier_destroy(&pause_barrier_));
217 sat_assert(0 == pthread_barrier_init(&pause_barrier_, NULL, num_workers_));
220 ReleaseNumWorkersLock();
222 // Release status_rwlock_.
227 // Parent thread class.
228 WorkerThread::WorkerThread() {
232 runduration_usec_ = 1;
234 worker_status_ = NULL;
235 thread_spawner_ = &ThreadSpawnerGeneric;
239 WorkerThread::~WorkerThread() {}
241 // Constructors. Just init some default values.
242 FillThread::FillThread() {
243 num_pages_to_fill_ = 0;
246 // Initialize file name to empty.
247 FileThread::FileThread() {
256 // If file thread used bounce buffer in memory, account for the extra
257 // copy for memory bandwidth calculation.
258 float FileThread::GetMemoryCopiedData() {
259 if (!os_->normal_mem())
260 return GetCopiedData();
265 // Initialize target hostname to be invalid.
266 NetworkThread::NetworkThread() {
267 snprintf(ipaddr_, sizeof(ipaddr_), "Unknown");
272 NetworkSlaveThread::NetworkSlaveThread() {
276 NetworkListenThread::NetworkListenThread() {
279 // Init member variables.
280 void WorkerThread::InitThread(int thread_num_init,
282 class OsLayer *os_init,
283 class PatternList *patternlist_init,
284 WorkerStatus *worker_status) {
285 sat_assert(worker_status);
286 worker_status->AddWorkers(1);
288 thread_num_ = thread_num_init;
291 patternlist_ = patternlist_init;
292 worker_status_ = worker_status;
294 AvailableCpus(&cpu_mask_);
297 tag_mode_ = sat_->tag_mode();
301 // Use pthreads to prioritize a system thread.
302 bool WorkerThread::InitPriority() {
303 // This doesn't affect performance that much, and may not be too safe.
305 bool ret = BindToCpus(&cpu_mask_);
307 logprintf(11, "Log: Bind to %s failed.\n",
308 cpuset_format(&cpu_mask_).c_str());
310 logprintf(11, "Log: Thread %d running on core ID %d mask %s (%s).\n",
311 thread_num_, sched_getcpu(),
312 CurrentCpusFormat().c_str(),
313 cpuset_format(&cpu_mask_).c_str());
315 if (priority_ == High) {
317 param.sched_priority = 1;
318 // Set the priority; others are unchanged.
319 logprintf(0, "Log: Changing priority to SCHED_FIFO %d\n",
320 param.sched_priority);
321 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
323 sat_strerror(errno, buf, sizeof(buf));
324 logprintf(0, "Process Error: sched_setscheduler "
325 "failed - error %d %s\n",
333 // Use pthreads to create a system thread.
334 int WorkerThread::SpawnThread() {
335 // Create the new thread.
336 int result = pthread_create(&thread_, NULL, thread_spawner_, this);
339 sat_strerror(result, buf, sizeof(buf));
340 logprintf(0, "Process Error: pthread_create "
341 "failed - error %d %s\n", result,
347 // 0 is pthreads success.
351 // Kill the worker thread with SIGINT.
352 bool WorkerThread::KillThread() {
353 return (pthread_kill(thread_, SIGINT) == 0);
356 // Block until thread has exited.
357 bool WorkerThread::JoinThread() {
358 int result = pthread_join(thread_, NULL);
361 logprintf(0, "Process Error: pthread_join failed - error %d\n", result);
365 // 0 is pthreads success.
370 void WorkerThread::StartRoutine() {
375 worker_status_->RemoveSelf();
379 // Thread work loop. Execute until marked finished.
380 bool WorkerThread::Work() {
382 logprintf(9, "Log: ...\n");
383 // Sleep for 1 second.
385 } while (IsReadyToRun());
391 // Returns CPU mask of CPUs available to this process,
392 // Conceptually, each bit represents a logical CPU, ie:
393 // mask = 3 (11b): cpu0, 1
394 // mask = 13 (1101b): cpu0, 2, 3
395 bool WorkerThread::AvailableCpus(cpu_set_t *cpuset) {
397 #ifdef HAVE_SCHED_GETAFFINITY
398 return sched_getaffinity(getppid(), sizeof(*cpuset), cpuset) == 0;
405 // Returns CPU mask of CPUs this thread is bound to,
406 // Conceptually, each bit represents a logical CPU, ie:
407 // mask = 3 (11b): cpu0, 1
408 // mask = 13 (1101b): cpu0, 2, 3
409 bool WorkerThread::CurrentCpus(cpu_set_t *cpuset) {
411 #ifdef HAVE_SCHED_GETAFFINITY
412 return sched_getaffinity(0, sizeof(*cpuset), cpuset) == 0;
419 // Bind worker thread to specified CPU(s)
421 // thread_mask: cpu_set_t representing CPUs, ie
422 // mask = 1 (01b): cpu0
423 // mask = 3 (11b): cpu0, 1
424 // mask = 13 (1101b): cpu0, 2, 3
426 // Returns true on success, false otherwise.
427 bool WorkerThread::BindToCpus(const cpu_set_t *thread_mask) {
428 cpu_set_t process_mask;
429 AvailableCpus(&process_mask);
430 if (cpuset_isequal(thread_mask, &process_mask))
433 logprintf(11, "Log: available CPU mask - %s\n",
434 cpuset_format(&process_mask).c_str());
435 if (!cpuset_issubset(thread_mask, &process_mask)) {
436 // Invalid cpu_mask, ie cpu not allocated to this process or doesn't exist.
437 logprintf(0, "Log: requested CPUs %s not a subset of available %s\n",
438 cpuset_format(thread_mask).c_str(),
439 cpuset_format(&process_mask).c_str());
442 #ifdef HAVE_SCHED_GETAFFINITY
443 return (sched_setaffinity(gettid(), sizeof(*thread_mask), thread_mask) == 0);
450 // A worker thread can yield itself to give up CPU until it's scheduled again.
451 // Returns true on success, false on error.
452 bool WorkerThread::YieldSelf() {
453 return (sched_yield() == 0);
457 // Fill this page with its pattern.
458 bool WorkerThread::FillPage(struct page_entry *pe) {
459 // Error check arguments.
461 logprintf(0, "Process Error: Fill Page entry null\n");
465 // Mask is the bitmask of indexes used by the pattern.
466 // It is the pattern size -1. Size is always a power of 2.
467 uint64 *memwords = static_cast<uint64*>(pe->addr);
468 int length = sat_->page_length();
471 // Select tag or data as appropriate.
472 for (int i = 0; i < length / wordsize_; i++) {
475 if ((i & 0x7) == 0) {
476 data.l64 = addr_to_tag(&memwords[i]);
478 data.l32.l = pe->pattern->pattern(i << 1);
479 data.l32.h = pe->pattern->pattern((i << 1) + 1);
481 memwords[i] = data.l64;
484 // Just fill in untagged data directly.
485 for (int i = 0; i < length / wordsize_; i++) {
488 data.l32.l = pe->pattern->pattern(i << 1);
489 data.l32.h = pe->pattern->pattern((i << 1) + 1);
490 memwords[i] = data.l64;
498 // Tell the thread how many pages to fill.
499 void FillThread::SetFillPages(int64 num_pages_to_fill_init) {
500 num_pages_to_fill_ = num_pages_to_fill_init;
503 // Fill this page with a random pattern.
504 bool FillThread::FillPageRandom(struct page_entry *pe) {
505 // Error check arguments.
507 logprintf(0, "Process Error: Fill Page entry null\n");
510 if ((patternlist_ == 0) || (patternlist_->Size() == 0)) {
511 logprintf(0, "Process Error: No data patterns available\n");
515 // Choose a random pattern for this block.
516 pe->pattern = patternlist_->GetRandomPattern();
517 if (pe->pattern == 0) {
518 logprintf(0, "Process Error: Null data pattern\n");
522 // Actually fill the page.
527 // Memory fill work loop. Execute until alloted pages filled.
528 bool FillThread::Work() {
531 logprintf(9, "Log: Starting fill thread %d\n", thread_num_);
533 // We want to fill num_pages_to_fill pages, and
534 // stop when we've filled that many.
535 // We also want to capture early break
536 struct page_entry pe;
538 while (IsReadyToRun() && (loops < num_pages_to_fill_)) {
539 result = result && sat_->GetEmpty(&pe);
541 logprintf(0, "Process Error: fill_thread failed to pop pages, "
546 // Fill the page with pattern
547 result = result && FillPageRandom(&pe);
550 // Put the page back on the queue.
551 result = result && sat_->PutValid(&pe);
553 logprintf(0, "Process Error: fill_thread failed to push pages, "
560 // Fill in thread status.
561 pages_copied_ = loops;
563 logprintf(9, "Log: Completed %d: Fill thread. Status %d, %d pages filled\n",
564 thread_num_, status_, pages_copied_);
569 // Print error information about a data miscompare.
570 void WorkerThread::ProcessError(struct ErrorRecord *error,
572 const char *message) {
573 char dimm_string[256] = "";
575 int core_id = sched_getcpu();
577 // Determine if this is a write or read error.
578 os_->Flush(error->vaddr);
579 error->reread = *(error->vaddr);
581 char *good = reinterpret_cast<char*>(&(error->expected));
582 char *bad = reinterpret_cast<char*>(&(error->actual));
584 sat_assert(error->expected != error->actual);
585 unsigned int offset = 0;
586 for (offset = 0; offset < (sizeof(error->expected) - 1); offset++) {
587 if (good[offset] != bad[offset])
591 error->vbyteaddr = reinterpret_cast<char*>(error->vaddr) + offset;
593 // Find physical address if possible.
594 error->paddr = os_->VirtualToPhysical(error->vbyteaddr);
596 // Pretty print DIMM mapping if available.
597 os_->FindDimm(error->paddr, dimm_string, sizeof(dimm_string));
599 // Report parseable error.
601 // Run miscompare error through diagnoser for logging and reporting.
602 os_->error_diagnoser_->AddMiscompareError(dimm_string,
603 reinterpret_cast<uint64>
607 "%s: miscompare on CPU %d(0x%s) at %p(0x%llx:%s): "
608 "read:0x%016llx, reread:0x%016llx expected:0x%016llx\n",
611 CurrentCpusFormat().c_str(),
621 // Overwrite incorrect data with correct data to prevent
622 // future miscompares when this data is reused.
623 *(error->vaddr) = error->expected;
624 os_->Flush(error->vaddr);
629 // Print error information about a data miscompare.
630 void FileThread::ProcessError(struct ErrorRecord *error,
632 const char *message) {
633 char dimm_string[256] = "";
635 // Determine if this is a write or read error.
636 os_->Flush(error->vaddr);
637 error->reread = *(error->vaddr);
639 char *good = reinterpret_cast<char*>(&(error->expected));
640 char *bad = reinterpret_cast<char*>(&(error->actual));
642 sat_assert(error->expected != error->actual);
643 unsigned int offset = 0;
644 for (offset = 0; offset < (sizeof(error->expected) - 1); offset++) {
645 if (good[offset] != bad[offset])
649 error->vbyteaddr = reinterpret_cast<char*>(error->vaddr) + offset;
651 // Find physical address if possible.
652 error->paddr = os_->VirtualToPhysical(error->vbyteaddr);
654 // Pretty print DIMM mapping if available.
655 os_->FindDimm(error->paddr, dimm_string, sizeof(dimm_string));
657 // If crc_page_ is valid, ie checking content read back from file,
658 // track src/dst memory addresses. Otherwise catagorize as general
659 // mememory miscompare for CRC checking everywhere else.
660 if (crc_page_ != -1) {
661 int miscompare_byteoffset = static_cast<char*>(error->vbyteaddr) -
662 static_cast<char*>(page_recs_[crc_page_].dst);
663 os_->error_diagnoser_->AddHDDMiscompareError(devicename_,
665 miscompare_byteoffset,
666 page_recs_[crc_page_].src,
667 page_recs_[crc_page_].dst);
669 os_->error_diagnoser_->AddMiscompareError(dimm_string,
670 reinterpret_cast<uint64>
675 "%s: miscompare on %s at %p(0x%llx:%s): read:0x%016llx, "
676 "reread:0x%016llx expected:0x%016llx\n",
686 // Overwrite incorrect data with correct data to prevent
687 // future miscompares when this data is reused.
688 *(error->vaddr) = error->expected;
689 os_->Flush(error->vaddr);
693 // Do a word by word result check of a region.
694 // Print errors on mismatches.
695 int WorkerThread::CheckRegion(void *addr,
696 class Pattern *pattern,
699 int64 pattern_offset) {
700 uint64 *memblock = static_cast<uint64*>(addr);
701 const int kErrorLimit = 128;
703 int overflowerrors = 0; // Count of overflowed errors.
704 bool page_error = false;
705 string errormessage("Hardware Error");
707 recorded[kErrorLimit]; // Queued errors for later printing.
709 // For each word in the data region.
710 for (int i = 0; i < length / wordsize_; i++) {
711 uint64 actual = memblock[i];
714 // Determine the value that should be there.
716 int index = 2 * i + pattern_offset;
717 data.l32.l = pattern->pattern(index);
718 data.l32.h = pattern->pattern(index + 1);
720 // Check tags if necessary.
721 if (tag_mode_ && ((reinterpret_cast<uint64>(&memblock[i]) & 0x3f) == 0)) {
722 expected = addr_to_tag(&memblock[i]);
726 // If the value is incorrect, save an error record for later printing.
727 if (actual != expected) {
728 if (errors < kErrorLimit) {
729 recorded[errors].actual = actual;
730 recorded[errors].expected = expected;
731 recorded[errors].vaddr = &memblock[i];
735 // If we have overflowed the error queue, just print the errors now.
736 logprintf(10, "Log: Error record overflow, too many miscompares!\n");
737 errormessage = "Page Error";
743 // Find if this is a whole block corruption.
744 if (page_error && !tag_mode_) {
745 int patsize = patternlist_->Size();
746 for (int pat = 0; pat < patsize; pat++) {
747 class Pattern *altpattern = patternlist_->GetPattern(pat);
750 const int kGoodAgain = 2;
751 const int kNoMatch = 3;
753 unsigned int badstart = 0;
754 unsigned int badend = 0;
756 // Don't match against ourself!
757 if (pattern == altpattern)
760 for (int i = 0; i < length / wordsize_; i++) {
761 uint64 actual = memblock[i];
765 // Determine the value that should be there.
766 int index = 2 * i + pattern_offset;
768 expected.l32.l = pattern->pattern(index);
769 expected.l32.h = pattern->pattern(index + 1);
771 possible.l32.l = pattern->pattern(index);
772 possible.l32.h = pattern->pattern(index + 1);
774 if (state == kGood) {
775 if (actual == expected.l64) {
777 } else if (actual == possible.l64) {
786 } else if (state == kBad) {
787 if (actual == possible.l64) {
790 } else if (actual == expected.l64) {
797 } else if (state == kGoodAgain) {
798 if (actual == expected.l64) {
807 if ((state == kGoodAgain) || (state == kBad)) {
808 unsigned int blockerrors = badend - badstart + 1;
809 errormessage = "Block Error";
810 // It's okay for the 1st entry to be corrected multiple times,
811 // it will simply be reported twice. Once here and once below
812 // when processing the error queue.
813 ProcessError(&recorded[0], 0, errormessage.c_str());
814 logprintf(0, "Block Error: (%p) pattern %s instead of %s, "
815 "%d bytes from offset 0x%x to 0x%x\n",
817 altpattern->name(), pattern->name(),
818 blockerrors * wordsize_,
819 offset + badstart * wordsize_,
820 offset + badend * wordsize_);
826 // Process error queue after all errors have been recorded.
827 for (int err = 0; err < errors; err++) {
829 if (errorcount_ + err < 30)
830 priority = 0; // Bump up the priority for the first few errors.
831 ProcessError(&recorded[err], priority, errormessage.c_str());
835 // For each word in the data region.
836 for (int i = 0; i < length / wordsize_; i++) {
837 uint64 actual = memblock[i];
840 // Determine the value that should be there.
841 int index = 2 * i + pattern_offset;
843 data.l32.l = pattern->pattern(index);
844 data.l32.h = pattern->pattern(index + 1);
847 // Check tags if necessary.
848 if (tag_mode_ && ((reinterpret_cast<uint64>(&memblock[i]) & 0x3f) == 0)) {
849 expected = addr_to_tag(&memblock[i]);
852 // If the value is incorrect, save an error record for later printing.
853 if (actual != expected) {
854 // If we have overflowed the error queue, print the errors now.
855 struct ErrorRecord er;
857 er.expected = expected;
858 er.vaddr = &memblock[i];
860 // Do the error printout. This will take a long time and
861 // likely change the machine state.
862 ProcessError(&er, 12, errormessage.c_str());
868 // Keep track of observed errors.
869 errorcount_ += errors + overflowerrors;
870 return errors + overflowerrors;
873 float WorkerThread::GetCopiedData() {
874 return pages_copied_ * sat_->page_length() / kMegabyte;
877 // Calculate the CRC of a region.
878 // Result check if the CRC mismatches.
879 int WorkerThread::CrcCheckPage(struct page_entry *srcpe) {
880 const int blocksize = 4096;
881 const int blockwords = blocksize / wordsize_;
884 const AdlerChecksum *expectedcrc = srcpe->pattern->crc();
885 uint64 *memblock = static_cast<uint64*>(srcpe->addr);
886 int blocks = sat_->page_length() / blocksize;
887 for (int currentblock = 0; currentblock < blocks; currentblock++) {
888 uint64 *memslice = memblock + currentblock * blockwords;
892 AdlerAddrCrcC(memslice, blocksize, &crc, srcpe);
894 CalculateAdlerChecksum(memslice, blocksize, &crc);
897 // If the CRC does not match, we'd better look closer.
898 if (!crc.Equals(*expectedcrc)) {
899 logprintf(11, "Log: CrcCheckPage Falling through to slow compare, "
900 "CRC mismatch %s != %s\n",
901 crc.ToHexString().c_str(),
902 expectedcrc->ToHexString().c_str());
903 int errorcount = CheckRegion(memslice,
906 currentblock * blocksize, 0);
907 if (errorcount == 0) {
908 logprintf(0, "Log: CrcCheckPage CRC mismatch %s != %s, "
909 "but no miscompares found.\n",
910 crc.ToHexString().c_str(),
911 expectedcrc->ToHexString().c_str());
913 errors += errorcount;
917 // For odd length transfers, we should never hit this.
918 int leftovers = sat_->page_length() % blocksize;
920 uint64 *memslice = memblock + blocks * blockwords;
921 errors += CheckRegion(memslice,
924 blocks * blocksize, 0);
930 // Print error information about a data miscompare.
931 void WorkerThread::ProcessTagError(struct ErrorRecord *error,
933 const char *message) {
934 char dimm_string[256] = "";
935 char tag_dimm_string[256] = "";
936 bool read_error = false;
938 int core_id = sched_getcpu();
940 // Determine if this is a write or read error.
941 os_->Flush(error->vaddr);
942 error->reread = *(error->vaddr);
944 // Distinguish read and write errors.
945 if (error->actual != error->reread) {
949 sat_assert(error->expected != error->actual);
951 error->vbyteaddr = reinterpret_cast<char*>(error->vaddr);
953 // Find physical address if possible.
954 error->paddr = os_->VirtualToPhysical(error->vbyteaddr);
955 error->tagpaddr = os_->VirtualToPhysical(error->tagvaddr);
957 // Pretty print DIMM mapping if available.
958 os_->FindDimm(error->paddr, dimm_string, sizeof(dimm_string));
959 // Pretty print DIMM mapping if available.
960 os_->FindDimm(error->tagpaddr, tag_dimm_string, sizeof(tag_dimm_string));
962 // Report parseable error.
965 "%s: Tag from %p(0x%llx:%s) (%s) "
966 "miscompare on CPU %d(0x%s) at %p(0x%llx:%s): "
967 "read:0x%016llx, reread:0x%016llx expected:0x%016llx\n",
969 error->tagvaddr, error->tagpaddr,
971 read_error ? "read error" : "write error",
973 CurrentCpusFormat().c_str(),
984 // Overwrite incorrect data with correct data to prevent
985 // future miscompares when this data is reused.
986 *(error->vaddr) = error->expected;
987 os_->Flush(error->vaddr);
991 // Print out and log a tag error.
992 bool WorkerThread::ReportTagError(
996 struct ErrorRecord er;
1002 // Generate vaddr from tag.
1003 er.tagvaddr = reinterpret_cast<uint64*>(actual);
1005 ProcessTagError(&er, 0, "Hardware Error");
1009 // C implementation of Adler memory copy, with memory tagging.
1010 bool WorkerThread::AdlerAddrMemcpyC(uint64 *dstmem64,
1012 unsigned int size_in_bytes,
1013 AdlerChecksum *checksum,
1014 struct page_entry *pe) {
1015 // Use this data wrapper to access memory with 64bit read/write.
1018 unsigned int count = size_in_bytes / sizeof(data);
1020 if (count > ((1U) << 19)) {
1021 // Size is too large, must be strictly less than 512 KB.
1030 class Pattern *pattern = pe->pattern;
1034 // Process 64 bits at a time.
1035 if ((i & 0x7) == 0) {
1036 data.l64 = srcmem64[i];
1037 dstdata.l64 = dstmem64[i];
1038 uint64 src_tag = addr_to_tag(&srcmem64[i]);
1039 uint64 dst_tag = addr_to_tag(&dstmem64[i]);
1040 // Detect if tags have been corrupted.
1041 if (data.l64 != src_tag)
1042 ReportTagError(&srcmem64[i], data.l64, src_tag);
1043 if (dstdata.l64 != dst_tag)
1044 ReportTagError(&dstmem64[i], dstdata.l64, dst_tag);
1046 data.l32.l = pattern->pattern(i << 1);
1047 data.l32.h = pattern->pattern((i << 1) + 1);
1048 a1 = a1 + data.l32.l;
1050 a1 = a1 + data.l32.h;
1054 dstmem64[i] = data.l64;
1057 data.l64 = srcmem64[i];
1058 a1 = a1 + data.l32.l;
1060 a1 = a1 + data.l32.h;
1062 dstmem64[i] = data.l64;
1066 data.l64 = srcmem64[i];
1067 a2 = a2 + data.l32.l;
1069 a2 = a2 + data.l32.h;
1071 dstmem64[i] = data.l64;
1074 checksum->Set(a1, a2, b1, b2);
1078 // x86_64 SSE2 assembly implementation of Adler memory copy, with address
1079 // tagging added as a second step. This is useful for debugging failures
1080 // that only occur when SSE / nontemporal writes are used.
1081 bool WorkerThread::AdlerAddrMemcpyWarm(uint64 *dstmem64,
1083 unsigned int size_in_bytes,
1084 AdlerChecksum *checksum,
1085 struct page_entry *pe) {
1086 // Do ASM copy, ignore checksum.
1087 AdlerChecksum ignored_checksum;
1088 os_->AdlerMemcpyWarm(dstmem64, srcmem64, size_in_bytes, &ignored_checksum);
1090 // Force cache flush of both the source and destination addresses.
1091 // length - length of block to flush in cachelines.
1092 // mem_increment - number of dstmem/srcmem values per cacheline.
1093 int length = size_in_bytes / kCacheLineSize;
1094 int mem_increment = kCacheLineSize / sizeof(*dstmem64);
1095 OsLayer::FastFlushSync();
1096 for (int i = 0; i < length; ++i) {
1097 OsLayer::FastFlushHint(dstmem64 + (i * mem_increment));
1098 OsLayer::FastFlushHint(srcmem64 + (i * mem_increment));
1100 OsLayer::FastFlushSync();
1103 AdlerAddrCrcC(srcmem64, size_in_bytes, checksum, pe);
1104 // Patch up address tags.
1105 TagAddrC(dstmem64, size_in_bytes);
1110 bool WorkerThread::TagAddrC(uint64 *memwords,
1111 unsigned int size_in_bytes) {
1112 // Mask is the bitmask of indexes used by the pattern.
1113 // It is the pattern size -1. Size is always a power of 2.
1115 // Select tag or data as appropriate.
1116 int length = size_in_bytes / wordsize_;
1117 for (int i = 0; i < length; i += 8) {
1119 data.l64 = addr_to_tag(&memwords[i]);
1120 memwords[i] = data.l64;
1125 // C implementation of Adler memory crc.
1126 bool WorkerThread::AdlerAddrCrcC(uint64 *srcmem64,
1127 unsigned int size_in_bytes,
1128 AdlerChecksum *checksum,
1129 struct page_entry *pe) {
1130 // Use this data wrapper to access memory with 64bit read/write.
1132 unsigned int count = size_in_bytes / sizeof(data);
1134 if (count > ((1U) << 19)) {
1135 // Size is too large, must be strictly less than 512 KB.
1144 class Pattern *pattern = pe->pattern;
1148 // Process 64 bits at a time.
1149 if ((i & 0x7) == 0) {
1150 data.l64 = srcmem64[i];
1151 uint64 src_tag = addr_to_tag(&srcmem64[i]);
1152 // Check that tags match expected.
1153 if (data.l64 != src_tag)
1154 ReportTagError(&srcmem64[i], data.l64, src_tag);
1156 data.l32.l = pattern->pattern(i << 1);
1157 data.l32.h = pattern->pattern((i << 1) + 1);
1158 a1 = a1 + data.l32.l;
1160 a1 = a1 + data.l32.h;
1163 data.l64 = srcmem64[i];
1164 a1 = a1 + data.l32.l;
1166 a1 = a1 + data.l32.h;
1171 data.l64 = srcmem64[i];
1172 a2 = a2 + data.l32.l;
1174 a2 = a2 + data.l32.h;
1178 checksum->Set(a1, a2, b1, b2);
1182 // Copy a block of memory quickly, while keeping a CRC of the data.
1183 // Result check if the CRC mismatches.
1184 int WorkerThread::CrcCopyPage(struct page_entry *dstpe,
1185 struct page_entry *srcpe) {
1187 const int blocksize = 4096;
1188 const int blockwords = blocksize / wordsize_;
1189 int blocks = sat_->page_length() / blocksize;
1191 // Base addresses for memory copy
1192 uint64 *targetmembase = static_cast<uint64*>(dstpe->addr);
1193 uint64 *sourcemembase = static_cast<uint64*>(srcpe->addr);
1194 // Remember the expected CRC
1195 const AdlerChecksum *expectedcrc = srcpe->pattern->crc();
1197 for (int currentblock = 0; currentblock < blocks; currentblock++) {
1198 uint64 *targetmem = targetmembase + currentblock * blockwords;
1199 uint64 *sourcemem = sourcemembase + currentblock * blockwords;
1203 AdlerAddrMemcpyC(targetmem, sourcemem, blocksize, &crc, srcpe);
1205 AdlerMemcpyC(targetmem, sourcemem, blocksize, &crc);
1208 // Investigate miscompares.
1209 if (!crc.Equals(*expectedcrc)) {
1210 logprintf(11, "Log: CrcCopyPage Falling through to slow compare, "
1211 "CRC mismatch %s != %s\n", crc.ToHexString().c_str(),
1212 expectedcrc->ToHexString().c_str());
1213 int errorcount = CheckRegion(sourcemem,
1216 currentblock * blocksize, 0);
1217 if (errorcount == 0) {
1218 logprintf(0, "Log: CrcCopyPage CRC mismatch %s != %s, "
1219 "but no miscompares found. Retrying with fresh data.\n",
1220 crc.ToHexString().c_str(),
1221 expectedcrc->ToHexString().c_str());
1223 // Copy the data originally read from this region back again.
1224 // This data should have any corruption read originally while
1225 // calculating the CRC.
1226 memcpy(sourcemem, targetmem, blocksize);
1227 errorcount = CheckRegion(sourcemem,
1230 currentblock * blocksize, 0);
1231 if (errorcount == 0) {
1232 int core_id = sched_getcpu();
1233 logprintf(0, "Process Error: CPU %d(0x%s) CrcCopyPage "
1234 "CRC mismatch %s != %s, "
1235 "but no miscompares found on second pass.\n",
1236 core_id, CurrentCpusFormat().c_str(),
1237 crc.ToHexString().c_str(),
1238 expectedcrc->ToHexString().c_str());
1239 struct ErrorRecord er;
1240 er.actual = sourcemem[0];
1241 er.expected = 0xbad00000ull << 32;
1242 er.vaddr = sourcemem;
1243 ProcessError(&er, 0, "Hardware Error");
1249 errors += errorcount;
1253 // For odd length transfers, we should never hit this.
1254 int leftovers = sat_->page_length() % blocksize;
1256 uint64 *targetmem = targetmembase + blocks * blockwords;
1257 uint64 *sourcemem = sourcemembase + blocks * blockwords;
1259 errors += CheckRegion(sourcemem,
1262 blocks * blocksize, 0);
1263 int leftoverwords = leftovers / wordsize_;
1264 for (int i = 0; i < leftoverwords; i++) {
1265 targetmem[i] = sourcemem[i];
1269 // Update pattern reference to reflect new contents.
1270 dstpe->pattern = srcpe->pattern;
1272 // Clean clean clean the errors away.
1274 // TODO(nsanders): Maybe we should patch rather than fill? Filling may
1275 // cause bad data to be propogated across the page.
1283 // Invert a block of memory quickly, traversing downwards.
1284 int InvertThread::InvertPageDown(struct page_entry *srcpe) {
1285 const int blocksize = 4096;
1286 const int blockwords = blocksize / wordsize_;
1287 int blocks = sat_->page_length() / blocksize;
1289 // Base addresses for memory copy
1290 unsigned int *sourcemembase = static_cast<unsigned int *>(srcpe->addr);
1292 for (int currentblock = blocks-1; currentblock >= 0; currentblock--) {
1293 unsigned int *sourcemem = sourcemembase + currentblock * blockwords;
1294 for (int i = blockwords - 32; i >= 0; i -= 32) {
1295 for (int index = i + 31; index >= i; --index) {
1296 unsigned int actual = sourcemem[index];
1297 sourcemem[index] = ~actual;
1299 OsLayer::FastFlush(&sourcemem[i]);
1306 // Invert a block of memory, traversing upwards.
1307 int InvertThread::InvertPageUp(struct page_entry *srcpe) {
1308 const int blocksize = 4096;
1309 const int blockwords = blocksize / wordsize_;
1310 int blocks = sat_->page_length() / blocksize;
1312 // Base addresses for memory copy
1313 unsigned int *sourcemembase = static_cast<unsigned int *>(srcpe->addr);
1315 for (int currentblock = 0; currentblock < blocks; currentblock++) {
1316 unsigned int *sourcemem = sourcemembase + currentblock * blockwords;
1317 for (int i = 0; i < blockwords; i += 32) {
1318 for (int index = i; index <= i + 31; ++index) {
1319 unsigned int actual = sourcemem[index];
1320 sourcemem[index] = ~actual;
1322 OsLayer::FastFlush(&sourcemem[i]);
1328 // Copy a block of memory quickly, while keeping a CRC of the data.
1329 // Result check if the CRC mismatches. Warm the CPU while running
1330 int WorkerThread::CrcWarmCopyPage(struct page_entry *dstpe,
1331 struct page_entry *srcpe) {
1333 const int blocksize = 4096;
1334 const int blockwords = blocksize / wordsize_;
1335 int blocks = sat_->page_length() / blocksize;
1337 // Base addresses for memory copy
1338 uint64 *targetmembase = static_cast<uint64*>(dstpe->addr);
1339 uint64 *sourcemembase = static_cast<uint64*>(srcpe->addr);
1340 // Remember the expected CRC
1341 const AdlerChecksum *expectedcrc = srcpe->pattern->crc();
1343 for (int currentblock = 0; currentblock < blocks; currentblock++) {
1344 uint64 *targetmem = targetmembase + currentblock * blockwords;
1345 uint64 *sourcemem = sourcemembase + currentblock * blockwords;
1349 AdlerAddrMemcpyWarm(targetmem, sourcemem, blocksize, &crc, srcpe);
1351 os_->AdlerMemcpyWarm(targetmem, sourcemem, blocksize, &crc);
1354 // Investigate miscompares.
1355 if (!crc.Equals(*expectedcrc)) {
1356 logprintf(11, "Log: CrcWarmCopyPage Falling through to slow compare, "
1357 "CRC mismatch %s != %s\n", crc.ToHexString().c_str(),
1358 expectedcrc->ToHexString().c_str());
1359 int errorcount = CheckRegion(sourcemem,
1362 currentblock * blocksize, 0);
1363 if (errorcount == 0) {
1364 logprintf(0, "Log: CrcWarmCopyPage CRC mismatch expected: %s != actual: %s, "
1365 "but no miscompares found. Retrying with fresh data.\n",
1366 expectedcrc->ToHexString().c_str(),
1367 crc.ToHexString().c_str() );
1369 // Copy the data originally read from this region back again.
1370 // This data should have any corruption read originally while
1371 // calculating the CRC.
1372 memcpy(sourcemem, targetmem, blocksize);
1373 errorcount = CheckRegion(sourcemem,
1376 currentblock * blocksize, 0);
1377 if (errorcount == 0) {
1378 int core_id = sched_getcpu();
1379 logprintf(0, "Process Error: CPU %d(0x%s) CrciWarmCopyPage "
1380 "CRC mismatch %s != %s, "
1381 "but no miscompares found on second pass.\n",
1382 core_id, CurrentCpusFormat().c_str(),
1383 crc.ToHexString().c_str(),
1384 expectedcrc->ToHexString().c_str());
1385 struct ErrorRecord er;
1386 er.actual = sourcemem[0];
1387 er.expected = 0xbad;
1388 er.vaddr = sourcemem;
1389 ProcessError(&er, 0, "Hardware Error");
1395 errors += errorcount;
1399 // For odd length transfers, we should never hit this.
1400 int leftovers = sat_->page_length() % blocksize;
1402 uint64 *targetmem = targetmembase + blocks * blockwords;
1403 uint64 *sourcemem = sourcemembase + blocks * blockwords;
1405 errors += CheckRegion(sourcemem,
1408 blocks * blocksize, 0);
1409 int leftoverwords = leftovers / wordsize_;
1410 for (int i = 0; i < leftoverwords; i++) {
1411 targetmem[i] = sourcemem[i];
1415 // Update pattern reference to reflect new contents.
1416 dstpe->pattern = srcpe->pattern;
1418 // Clean clean clean the errors away.
1420 // TODO(nsanders): Maybe we should patch rather than fill? Filling may
1421 // cause bad data to be propogated across the page.
1429 // Memory check work loop. Execute until done, then exhaust pages.
1430 bool CheckThread::Work() {
1431 struct page_entry pe;
1435 logprintf(9, "Log: Starting Check thread %d\n", thread_num_);
1437 // We want to check all the pages, and
1438 // stop when there aren't any left.
1440 result = result && sat_->GetValid(&pe);
1442 if (IsReadyToRunNoPause())
1443 logprintf(0, "Process Error: check_thread failed to pop pages, "
1450 // Do the result check.
1453 // Push pages back on the valid queue if we are still going,
1454 // throw them out otherwise.
1455 if (IsReadyToRunNoPause())
1456 result = result && sat_->PutValid(&pe);
1458 result = result && sat_->PutEmpty(&pe);
1460 logprintf(0, "Process Error: check_thread failed to push pages, "
1467 pages_copied_ = loops;
1469 logprintf(9, "Log: Completed %d: Check thread. Status %d, %d pages checked\n",
1470 thread_num_, status_, pages_copied_);
1475 // Memory copy work loop. Execute until marked done.
1476 bool CopyThread::Work() {
1477 struct page_entry src;
1478 struct page_entry dst;
1482 logprintf(9, "Log: Starting copy thread %d: cpu %s, mem %x\n",
1483 thread_num_, cpuset_format(&cpu_mask_).c_str(), tag_);
1485 while (IsReadyToRun()) {
1486 // Pop the needed pages.
1487 result = result && sat_->GetValid(&src, tag_);
1488 result = result && sat_->GetEmpty(&dst, tag_);
1490 logprintf(0, "Process Error: copy_thread failed to pop pages, "
1495 // Force errors for unittests.
1496 if (sat_->error_injection()) {
1498 char *addr = reinterpret_cast<char*>(src.addr);
1499 int offset = random() % sat_->page_length();
1500 addr[offset] = 0xba;
1504 // We can use memcpy, or CRC check while we copy.
1506 CrcWarmCopyPage(&dst, &src);
1507 } else if (sat_->strict()) {
1508 CrcCopyPage(&dst, &src);
1510 memcpy(dst.addr, src.addr, sat_->page_length());
1511 dst.pattern = src.pattern;
1514 result = result && sat_->PutValid(&dst);
1515 result = result && sat_->PutEmpty(&src);
1517 // Copy worker-threads yield themselves at the end of each copy loop,
1518 // to avoid threads from preempting each other in the middle of the inner
1519 // copy-loop. Cooperations between Copy worker-threads results in less
1520 // unnecessary cache thrashing (which happens when context-switching in the
1521 // middle of the inner copy-loop).
1525 logprintf(0, "Process Error: copy_thread failed to push pages, "
1532 pages_copied_ = loops;
1534 logprintf(9, "Log: Completed %d: Copy thread. Status %d, %d pages copied\n",
1535 thread_num_, status_, pages_copied_);
1539 // Memory invert work loop. Execute until marked done.
1540 bool InvertThread::Work() {
1541 struct page_entry src;
1545 logprintf(9, "Log: Starting invert thread %d\n", thread_num_);
1547 while (IsReadyToRun()) {
1548 // Pop the needed pages.
1549 result = result && sat_->GetValid(&src);
1551 logprintf(0, "Process Error: invert_thread failed to pop pages, "
1559 // For the same reason CopyThread yields itself (see YieldSelf comment
1560 // in CopyThread::Work(), InvertThread yields itself after each invert
1561 // operation to improve cooperation between different worker threads
1562 // stressing the memory/cache.
1565 InvertPageDown(&src);
1567 InvertPageDown(&src);
1575 result = result && sat_->PutValid(&src);
1577 logprintf(0, "Process Error: invert_thread failed to push pages, "
1584 pages_copied_ = loops * 2;
1586 logprintf(9, "Log: Completed %d: Copy thread. Status %d, %d pages copied\n",
1587 thread_num_, status_, pages_copied_);
1592 // Set file name to use for File IO.
1593 void FileThread::SetFile(const char *filename_init) {
1594 filename_ = filename_init;
1595 devicename_ = os_->FindFileDevice(filename_);
1598 // Open the file for access.
1599 bool FileThread::OpenFile(int *pfile) {
1600 int flags = O_RDWR | O_CREAT | O_SYNC;
1601 int fd = open(filename_.c_str(), flags | O_DIRECT, 0644);
1602 if (O_DIRECT != 0 && fd < 0 && errno == EINVAL) {
1603 fd = open(filename_.c_str(), flags, 0644); // Try without O_DIRECT
1604 os_->ActivateFlushPageCache(); // Not using O_DIRECT fixed EINVAL
1607 logprintf(0, "Process Error: Failed to create file %s!!\n",
1617 bool FileThread::CloseFile(int fd) {
1622 // Check sector tagging.
1623 bool FileThread::SectorTagPage(struct page_entry *src, int block) {
1624 int page_length = sat_->page_length();
1625 struct FileThread::SectorTag *tag =
1626 (struct FileThread::SectorTag *)(src->addr);
1629 unsigned char magic = ((0xba + thread_num_) & 0xff);
1630 for (int sec = 0; sec < page_length / 512; sec++) {
1631 tag[sec].magic = magic;
1632 tag[sec].block = block & 0xff;
1633 tag[sec].sector = sec & 0xff;
1634 tag[sec].pass = pass_ & 0xff;
1639 bool FileThread::WritePageToFile(int fd, struct page_entry *src) {
1640 int page_length = sat_->page_length();
1641 // Fill the file with our data.
1642 int64 size = write(fd, src->addr, page_length);
1644 if (size != page_length) {
1645 os_->ErrorReport(devicename_.c_str(), "write-error", 1);
1647 logprintf(0, "Block Error: file_thread failed to write, "
1654 // Write the data to the file.
1655 bool FileThread::WritePages(int fd) {
1656 int strict = sat_->strict();
1658 // Start fresh at beginning of file for each batch of pages.
1659 lseek64(fd, 0, SEEK_SET);
1660 for (int i = 0; i < sat_->disk_pages(); i++) {
1661 struct page_entry src;
1662 if (!GetValidPage(&src))
1664 // Save expected pattern.
1665 page_recs_[i].pattern = src.pattern;
1666 page_recs_[i].src = src.addr;
1668 // Check data correctness.
1672 SectorTagPage(&src, i);
1674 bool result = WritePageToFile(fd, &src);
1676 if (!PutEmptyPage(&src))
1682 return os_->FlushPageCache(); // If O_DIRECT worked, this will be a NOP.
1685 // Copy data from file into memory block.
1686 bool FileThread::ReadPageFromFile(int fd, struct page_entry *dst) {
1687 int page_length = sat_->page_length();
1689 // Do the actual read.
1690 int64 size = read(fd, dst->addr, page_length);
1691 if (size != page_length) {
1692 os_->ErrorReport(devicename_.c_str(), "read-error", 1);
1693 logprintf(0, "Block Error: file_thread failed to read, "
1701 // Check sector tagging.
1702 bool FileThread::SectorValidatePage(const struct PageRec &page,
1703 struct page_entry *dst, int block) {
1705 static int calls = 0;
1708 // Do sector tag compare.
1709 int firstsector = -1;
1710 int lastsector = -1;
1711 bool badsector = false;
1712 int page_length = sat_->page_length();
1714 // Cast data block into an array of tagged sectors.
1715 struct FileThread::SectorTag *tag =
1716 (struct FileThread::SectorTag *)(dst->addr);
1718 sat_assert(sizeof(*tag) == 512);
1721 if (sat_->error_injection()) {
1723 for (int badsec = 8; badsec < 17; badsec++)
1724 tag[badsec].pass = 27;
1727 (static_cast<int32*>(dst->addr))[27] = 0xbadda7a;
1731 // Check each sector for the correct tag we added earlier,
1732 // then revert the tag to the to normal data pattern.
1733 unsigned char magic = ((0xba + thread_num_) & 0xff);
1734 for (int sec = 0; sec < page_length / 512; sec++) {
1736 if ((tag[sec].magic != magic) ||
1737 (tag[sec].block != (block & 0xff)) ||
1738 (tag[sec].sector != (sec & 0xff)) ||
1739 (tag[sec].pass != (pass_ & 0xff))) {
1740 // Offset calculation for tag location.
1741 int offset = sec * sizeof(SectorTag);
1742 if (tag[sec].block != (block & 0xff))
1743 offset += 1 * sizeof(uint8);
1744 else if (tag[sec].sector != (sec & 0xff))
1745 offset += 2 * sizeof(uint8);
1746 else if (tag[sec].pass != (pass_ & 0xff))
1747 offset += 3 * sizeof(uint8);
1749 // Run sector tag error through diagnoser for logging and reporting.
1751 os_->error_diagnoser_->AddHDDSectorTagError(devicename_, tag[sec].block,
1754 page.src, page.dst);
1756 logprintf(5, "Sector Error: Sector tag @ 0x%x, pass %d/%d. "
1757 "sec %x/%x, block %d/%d, magic %x/%x, File: %s \n",
1758 block * page_length + 512 * sec,
1759 (pass_ & 0xff), (unsigned int)tag[sec].pass,
1760 sec, (unsigned int)tag[sec].sector,
1761 block, (unsigned int)tag[sec].block,
1762 magic, (unsigned int)tag[sec].magic,
1765 // Keep track of first and last bad sector.
1766 if (firstsector == -1)
1767 firstsector = (block * page_length / 512) + sec;
1768 lastsector = (block * page_length / 512) + sec;
1771 // Patch tag back to proper pattern.
1772 unsigned int *addr = (unsigned int *)(&tag[sec]);
1773 *addr = dst->pattern->pattern(512 * sec / sizeof(*addr));
1776 // If we found sector errors:
1777 if (badsector == true) {
1778 logprintf(5, "Log: file sector miscompare at offset %x-%x. File: %s\n",
1780 ((lastsector + 1) * 512) - 1,
1783 // Either exit immediately, or patch the data up and continue.
1784 if (sat_->stop_on_error()) {
1787 // Patch up bad pages.
1788 for (int block = (firstsector * 512) / page_length;
1789 block <= (lastsector * 512) / page_length;
1791 unsigned int *memblock = static_cast<unsigned int *>(dst->addr);
1792 int length = page_length / wordsize_;
1793 for (int i = 0; i < length; i++) {
1794 memblock[i] = dst->pattern->pattern(i);
1802 // Get memory for an incoming data transfer..
1803 bool FileThread::PagePrepare() {
1804 // We can only do direct IO to SAT pages if it is normal mem.
1805 page_io_ = os_->normal_mem();
1807 // Init a local buffer if we need it.
1809 #ifdef HAVE_POSIX_MEMALIGN
1810 int result = posix_memalign(&local_page_, 512, sat_->page_length());
1812 local_page_ = memalign(512, sat_->page_length());
1813 int result = (local_page_ == 0);
1816 logprintf(0, "Process Error: disk thread posix_memalign "
1817 "returned %d (fail)\n",
1827 // Remove memory allocated for data transfer.
1828 bool FileThread::PageTeardown() {
1829 // Free a local buffer if we need to.
1838 // Get memory for an incoming data transfer..
1839 bool FileThread::GetEmptyPage(struct page_entry *dst) {
1841 if (!sat_->GetEmpty(dst))
1844 dst->addr = local_page_;
1851 // Get memory for an outgoing data transfer..
1852 bool FileThread::GetValidPage(struct page_entry *src) {
1853 struct page_entry tmp;
1854 if (!sat_->GetValid(&tmp))
1860 src->addr = local_page_;
1862 CrcCopyPage(src, &tmp);
1863 if (!sat_->PutValid(&tmp))
1870 // Throw out a used empty page.
1871 bool FileThread::PutEmptyPage(struct page_entry *src) {
1873 if (!sat_->PutEmpty(src))
1879 // Throw out a used, filled page.
1880 bool FileThread::PutValidPage(struct page_entry *src) {
1882 if (!sat_->PutValid(src))
1888 // Copy data from file into memory blocks.
1889 bool FileThread::ReadPages(int fd) {
1890 int page_length = sat_->page_length();
1891 int strict = sat_->strict();
1894 // Read our data back out of the file, into it's new location.
1895 lseek64(fd, 0, SEEK_SET);
1896 for (int i = 0; i < sat_->disk_pages(); i++) {
1897 struct page_entry dst;
1898 if (!GetEmptyPage(&dst))
1900 // Retrieve expected pattern.
1901 dst.pattern = page_recs_[i].pattern;
1902 // Update page recordpage record.
1903 page_recs_[i].dst = dst.addr;
1905 // Read from the file into destination page.
1906 if (!ReadPageFromFile(fd, &dst)) {
1911 SectorValidatePage(page_recs_[i], &dst, i);
1913 // Ensure that the transfer ended up with correct data.
1915 // Record page index currently CRC checked.
1917 int errors = CrcCheckPage(&dst);
1919 logprintf(5, "Log: file miscompare at block %d, "
1920 "offset %x-%x. File: %s\n",
1921 i, i * page_length, ((i + 1) * page_length) - 1,
1926 errorcount_ += errors;
1928 if (!PutValidPage(&dst))
1934 // File IO work loop. Execute until marked done.
1935 bool FileThread::Work() {
1939 logprintf(9, "Log: Starting file thread %d, file %s, device %s\n",
1942 devicename_.c_str());
1944 if (!PagePrepare()) {
1949 // Open the data IO file.
1951 if (!OpenFile(&fd)) {
1958 // Load patterns into page records.
1959 page_recs_ = new struct PageRec[sat_->disk_pages()];
1960 for (int i = 0; i < sat_->disk_pages(); i++) {
1961 page_recs_[i].pattern = new class Pattern();
1965 while (IsReadyToRun()) {
1966 // Do the file write.
1967 if (!(result = result && WritePages(fd)))
1970 // Do the file read.
1971 if (!(result = result && ReadPages(fd)))
1978 pages_copied_ = loops * sat_->disk_pages();
1984 logprintf(9, "Log: Completed %d: file thread status %d, %d pages copied\n",
1985 thread_num_, status_, pages_copied_);
1986 // Failure to read from device indicates hardware,
1987 // rather than procedural SW error.
1992 bool NetworkThread::IsNetworkStopSet() {
1993 return !IsReadyToRunNoPause();
1996 bool NetworkSlaveThread::IsNetworkStopSet() {
1997 // This thread has no completion status.
1998 // It finishes whever there is no more data to be
2003 // Set ip name to use for Network IO.
2004 void NetworkThread::SetIP(const char *ipaddr_init) {
2005 strncpy(ipaddr_, ipaddr_init, 256);
2009 // Return 0 on error.
2010 bool NetworkThread::CreateSocket(int *psocket) {
2011 int sock = socket(AF_INET, SOCK_STREAM, 0);
2013 logprintf(0, "Process Error: Cannot open socket\n");
2022 // Close the socket.
2023 bool NetworkThread::CloseSocket(int sock) {
2028 // Initiate the tcp connection.
2029 bool NetworkThread::Connect(int sock) {
2030 struct sockaddr_in dest_addr;
2031 dest_addr.sin_family = AF_INET;
2032 dest_addr.sin_port = htons(kNetworkPort);
2033 memset(&(dest_addr.sin_zero), '\0', sizeof(dest_addr.sin_zero));
2035 // Translate dot notation to u32.
2036 if (inet_aton(ipaddr_, &dest_addr.sin_addr) == 0) {
2037 logprintf(0, "Process Error: Cannot resolve %s\n", ipaddr_);
2043 if (-1 == connect(sock, reinterpret_cast<struct sockaddr *>(&dest_addr),
2044 sizeof(struct sockaddr))) {
2045 logprintf(0, "Process Error: Cannot connect %s\n", ipaddr_);
2053 // Initiate the tcp connection.
2054 bool NetworkListenThread::Listen() {
2055 struct sockaddr_in sa;
2057 memset(&(sa.sin_zero), '\0', sizeof(sa.sin_zero));
2059 sa.sin_family = AF_INET;
2060 sa.sin_addr.s_addr = INADDR_ANY;
2061 sa.sin_port = htons(kNetworkPort);
2063 if (-1 == ::bind(sock_, (struct sockaddr*)&sa, sizeof(struct sockaddr))) {
2065 sat_strerror(errno, buf, sizeof(buf));
2066 logprintf(0, "Process Error: Cannot bind socket: %s\n", buf);
2075 // Wait for a connection from a network traffic generation thread.
2076 bool NetworkListenThread::Wait() {
2081 // Watch sock_ to see when it has input.
2083 FD_SET(sock_, &rfds);
2084 // Wait up to five seconds.
2088 retval = select(sock_ + 1, &rfds, NULL, NULL, &tv);
2090 return (retval > 0);
2093 // Wait for a connection from a network traffic generation thread.
2094 bool NetworkListenThread::GetConnection(int *pnewsock) {
2095 struct sockaddr_in sa;
2096 socklen_t size = sizeof(struct sockaddr_in);
2098 int newsock = accept(sock_, reinterpret_cast<struct sockaddr *>(&sa), &size);
2100 logprintf(0, "Process Error: Did not receive connection\n");
2105 *pnewsock = newsock;
2109 // Send a page, return false if a page was not sent.
2110 bool NetworkThread::SendPage(int sock, struct page_entry *src) {
2111 int page_length = sat_->page_length();
2112 char *address = static_cast<char*>(src->addr);
2114 // Send our data over the network.
2115 int size = page_length;
2117 int transferred = send(sock, address + (page_length - size), size, 0);
2118 if ((transferred == 0) || (transferred == -1)) {
2119 if (!IsNetworkStopSet()) {
2121 sat_strerror(errno, buf, sizeof(buf));
2122 logprintf(0, "Process Error: Thread %d, "
2123 "Network write failed, bailing. (%s)\n",
2129 size = size - transferred;
2134 // Receive a page. Return false if a page was not received.
2135 bool NetworkThread::ReceivePage(int sock, struct page_entry *dst) {
2136 int page_length = sat_->page_length();
2137 char *address = static_cast<char*>(dst->addr);
2139 // Maybe we will get our data back again, maybe not.
2140 int size = page_length;
2142 int transferred = recv(sock, address + (page_length - size), size, 0);
2143 if ((transferred == 0) || (transferred == -1)) {
2144 // Typically network slave thread should exit as network master
2145 // thread stops sending data.
2146 if (IsNetworkStopSet()) {
2148 if (transferred == 0 && err == 0) {
2149 // Two system setups will not sync exactly,
2150 // allow early exit, but log it.
2151 logprintf(0, "Log: Net thread did not receive any data, exiting.\n");
2154 sat_strerror(err, buf, sizeof(buf));
2155 // Print why we failed.
2156 logprintf(0, "Process Error: Thread %d, "
2157 "Network read failed, bailing (%s).\n",
2160 // Print arguments and results.
2161 logprintf(0, "Log: recv(%d, address %x, size %x, 0) == %x, err %d\n",
2162 sock, address + (page_length - size),
2163 size, transferred, err);
2164 if ((transferred == 0) &&
2165 (page_length - size < 512) &&
2166 (page_length - size > 0)) {
2167 // Print null terminated data received, to see who's been
2168 // sending us supicious unwanted data.
2169 address[page_length - size] = 0;
2170 logprintf(0, "Log: received %d bytes: '%s'\n",
2171 page_length - size, address);
2177 size = size - transferred;
2182 // Network IO work loop. Execute until marked done.
2183 // Return true if the thread ran as expected.
2184 bool NetworkThread::Work() {
2185 logprintf(9, "Log: Starting network thread %d, ip %s\n",
2191 if (!CreateSocket(&sock))
2194 // Network IO loop requires network slave thread to have already initialized.
2195 // We will sleep here for awhile to ensure that the slave thread will be
2196 // listening by the time we connect.
2197 // Sleep for 15 seconds.
2199 logprintf(9, "Log: Starting execution of network thread %d, ip %s\n",
2204 // Connect to a slave thread.
2210 int strict = sat_->strict();
2212 while (IsReadyToRun()) {
2213 struct page_entry src;
2214 struct page_entry dst;
2215 result = result && sat_->GetValid(&src);
2216 result = result && sat_->GetEmpty(&dst);
2218 logprintf(0, "Process Error: net_thread failed to pop pages, "
2223 // Check data correctness.
2227 // Do the network write.
2228 if (!(result = result && SendPage(sock, &src)))
2231 // Update pattern reference to reflect new contents.
2232 dst.pattern = src.pattern;
2234 // Do the network read.
2235 if (!(result = result && ReceivePage(sock, &dst)))
2238 // Ensure that the transfer ended up with correct data.
2242 // Return all of our pages to the queue.
2243 result = result && sat_->PutValid(&dst);
2244 result = result && sat_->PutEmpty(&src);
2246 logprintf(0, "Process Error: net_thread failed to push pages, "
2253 pages_copied_ = loops;
2259 logprintf(9, "Log: Completed %d: network thread status %d, "
2260 "%d pages copied\n",
2261 thread_num_, status_, pages_copied_);
2265 // Spawn slave threads for incoming connections.
2266 bool NetworkListenThread::SpawnSlave(int newsock, int threadid) {
2267 logprintf(12, "Log: Listen thread spawning slave\n");
2269 // Spawn slave thread, to reflect network traffic back to sender.
2270 ChildWorker *child_worker = new ChildWorker;
2271 child_worker->thread.SetSock(newsock);
2272 child_worker->thread.InitThread(threadid, sat_, os_, patternlist_,
2273 &child_worker->status);
2274 child_worker->status.Initialize();
2275 child_worker->thread.SpawnThread();
2276 child_workers_.push_back(child_worker);
2281 // Reap slave threads.
2282 bool NetworkListenThread::ReapSlaves() {
2284 // Gather status and reap threads.
2285 logprintf(12, "Log: Joining all outstanding threads\n");
2287 for (size_t i = 0; i < child_workers_.size(); i++) {
2288 NetworkSlaveThread& child_thread = child_workers_[i]->thread;
2289 logprintf(12, "Log: Joining slave thread %d\n", i);
2290 child_thread.JoinThread();
2291 if (child_thread.GetStatus() != 1) {
2292 logprintf(0, "Process Error: Slave Thread %d failed with status %d\n", i,
2293 child_thread.GetStatus());
2296 errorcount_ += child_thread.GetErrorCount();
2297 logprintf(9, "Log: Slave Thread %d found %lld miscompares\n", i,
2298 child_thread.GetErrorCount());
2299 pages_copied_ += child_thread.GetPageCount();
2305 // Network listener IO work loop. Execute until marked done.
2306 // Return false on fatal software error.
2307 bool NetworkListenThread::Work() {
2308 logprintf(9, "Log: Starting network listen thread %d\n",
2313 if (!CreateSocket(&sock_)) {
2317 logprintf(9, "Log: Listen thread created sock\n");
2319 // Allows incoming connections to be queued up by socket library.
2322 logprintf(12, "Log: Listen thread waiting for incoming connections\n");
2324 // Wait on incoming connections, and spawn worker threads for them.
2325 int threadcount = 0;
2326 while (IsReadyToRun()) {
2327 // Poll for connections that we can accept().
2329 // Accept those connections.
2330 logprintf(12, "Log: Listen thread found incoming connection\n");
2331 if (GetConnection(&newsock)) {
2332 SpawnSlave(newsock, threadcount);
2338 // Gather status and join spawned threads.
2341 // Delete the child workers.
2342 for (ChildVector::iterator it = child_workers_.begin();
2343 it != child_workers_.end(); ++it) {
2344 (*it)->status.Destroy();
2347 child_workers_.clear();
2353 "Log: Completed %d: network listen thread status %d, "
2354 "%d pages copied\n",
2355 thread_num_, status_, pages_copied_);
2359 // Set network reflector socket struct.
2360 void NetworkSlaveThread::SetSock(int sock) {
2364 // Network reflector IO work loop. Execute until marked done.
2365 // Return false on fatal software error.
2366 bool NetworkSlaveThread::Work() {
2367 logprintf(9, "Log: Starting network slave thread %d\n",
2370 // Verify that we have a socket.
2379 // Init a local buffer for storing data.
2380 void *local_page = NULL;
2381 #ifdef HAVE_POSIX_MEMALIGN
2382 int result = posix_memalign(&local_page, 512, sat_->page_length());
2384 local_page = memalign(512, sat_->page_length());
2385 int result = (local_page == 0);
2388 logprintf(0, "Process Error: net slave posix_memalign "
2389 "returned %d (fail)\n",
2395 struct page_entry page;
2396 page.addr = local_page;
2398 // This thread will continue to run as long as the thread on the other end of
2399 // the socket is still sending and receiving data.
2401 // Do the network read.
2402 if (!ReceivePage(sock, &page))
2405 // Do the network write.
2406 if (!SendPage(sock, &page))
2412 pages_copied_ = loops;
2413 // No results provided from this type of thread.
2420 "Log: Completed %d: network slave thread status %d, "
2421 "%d pages copied\n",
2422 thread_num_, status_, pages_copied_);
2426 // Thread work loop. Execute until marked finished.
2427 bool ErrorPollThread::Work() {
2428 logprintf(9, "Log: Starting system error poll thread %d\n", thread_num_);
2430 // This calls a generic error polling function in the Os abstraction layer.
2432 errorcount_ += os_->ErrorPoll();
2434 } while (IsReadyToRun());
2436 logprintf(9, "Log: Finished system error poll thread %d: %d errors\n",
2437 thread_num_, errorcount_);
2442 // Worker thread to heat up CPU.
2443 // This thread does not evaluate pass/fail or software error.
2444 bool CpuStressThread::Work() {
2445 logprintf(9, "Log: Starting CPU stress thread %d\n", thread_num_);
2448 // Run ludloff's platform/CPU-specific assembly workload.
2449 os_->CpuStressWorkload();
2451 } while (IsReadyToRun());
2453 logprintf(9, "Log: Finished CPU stress thread %d:\n",
2459 CpuCacheCoherencyThread::CpuCacheCoherencyThread(cc_cacheline_data *data,
2460 int cacheline_count,
2464 cc_cacheline_data_ = data;
2465 cc_cacheline_count_ = cacheline_count;
2466 cc_thread_num_ = thread_num;
2467 cc_thread_count_ = thread_count;
2468 cc_inc_count_ = inc_count;
2471 // A very simple psuedorandom generator. Since the random number is based
2472 // on only a few simple logic operations, it can be done quickly in registers
2473 // and the compiler can inline it.
2474 uint64 CpuCacheCoherencyThread::SimpleRandom(uint64 seed) {
2475 return (seed >> 1) ^ (-(seed & 1) & kRandomPolynomial);
2478 // Worked thread to test the cache coherency of the CPUs
2479 // Return false on fatal sw error.
2480 bool CpuCacheCoherencyThread::Work() {
2481 logprintf(9, "Log: Starting the Cache Coherency thread %d\n",
2483 uint64 time_start, time_end;
2486 // Use a slightly more robust random number for the initial
2487 // value, so the random sequences from the simple generator will
2488 // be more divergent.
2490 unsigned int seed = static_cast<unsigned int>(gettid());
2491 uint64 r = static_cast<uint64>(rand_r(&seed));
2492 r |= static_cast<uint64>(rand_r(&seed)) << 32;
2495 uint64 r = static_cast<uint64>(rand()); // NOLINT
2496 r |= static_cast<uint64>(rand()) << 32; // NOLINT
2499 gettimeofday(&tv, NULL); // Get the timestamp before increments.
2500 time_start = tv.tv_sec * 1000000ULL + tv.tv_usec;
2502 uint64 total_inc = 0; // Total increments done by the thread.
2503 while (IsReadyToRun()) {
2504 for (int i = 0; i < cc_inc_count_; i++) {
2505 // Choose a datastructure in random and increment the appropriate
2506 // member in that according to the offset (which is the same as the
2508 r = SimpleRandom(r);
2509 int cline_num = r % cc_cacheline_count_;
2511 // Reverse the order for odd numbered threads in odd numbered cache
2512 // lines. This is designed for massively multi-core systems where the
2513 // number of cores exceeds the bytes in a cache line, so "distant" cores
2514 // get a chance to exercize cache coherency between them.
2515 if (cline_num & cc_thread_num_ & 1)
2516 offset = (cc_thread_count_ & ~1) - cc_thread_num_;
2518 offset = cc_thread_num_;
2519 // Increment the member of the randomely selected structure.
2520 (cc_cacheline_data_[cline_num].num[offset])++;
2523 total_inc += cc_inc_count_;
2525 // Calculate if the local counter matches with the global value
2526 // in all the cache line structures for this particular thread.
2527 int cc_global_num = 0;
2528 for (int cline_num = 0; cline_num < cc_cacheline_count_; cline_num++) {
2530 // Perform the same offset calculation from above.
2531 if (cline_num & cc_thread_num_ & 1)
2532 offset = (cc_thread_count_ & ~1) - cc_thread_num_;
2534 offset = cc_thread_num_;
2535 cc_global_num += cc_cacheline_data_[cline_num].num[offset];
2536 // Reset the cachline member's value for the next run.
2537 cc_cacheline_data_[cline_num].num[offset] = 0;
2539 if (sat_->error_injection())
2542 // Since the count is only stored in a byte, to squeeze more into a
2543 // single cache line, only compare it as a byte. In the event that there
2544 // is something detected, the chance that it would be missed by a single
2545 // thread is 1 in 256. If it affects all cores, that makes the chance
2546 // of it being missed terribly minute. It seems unlikely any failure
2547 // case would be off by more than a small number.
2548 if ((cc_global_num & 0xff) != (cc_inc_count_ & 0xff)) {
2550 logprintf(0, "Hardware Error: global(%d) and local(%d) do not match\n",
2551 cc_global_num, cc_inc_count_);
2554 gettimeofday(&tv, NULL); // Get the timestamp at the end.
2555 time_end = tv.tv_sec * 1000000ULL + tv.tv_usec;
2557 uint64 us_elapsed = time_end - time_start;
2558 // inc_rate is the no. of increments per second.
2559 double inc_rate = total_inc * 1e6 / us_elapsed;
2561 logprintf(4, "Stats: CC Thread(%d): Time=%llu us,"
2562 " Increments=%llu, Increments/sec = %.6lf\n",
2563 cc_thread_num_, us_elapsed, total_inc, inc_rate);
2564 logprintf(9, "Log: Finished CPU Cache Coherency thread %d:\n",
2570 DiskThread::DiskThread(DiskBlockTable *block_table) {
2571 read_block_size_ = kSectorSize; // default 1 sector (512 bytes)
2572 write_block_size_ = kSectorSize; // this assumes read and write block size
2574 segment_size_ = -1; // use the entire disk as one segment
2575 cache_size_ = 16 * 1024 * 1024; // assume 16MiB cache by default
2576 // Use a queue such that 3/2 times as much data as the cache can hold
2577 // is written before it is read so that there is little chance the read
2578 // data is in the cache.
2579 queue_size_ = ((cache_size_ / write_block_size_) * 3) / 2;
2580 blocks_per_segment_ = 32;
2582 read_threshold_ = 100000; // 100ms is a reasonable limit for
2583 write_threshold_ = 100000; // reading/writing a sector
2585 read_timeout_ = 5000000; // 5 seconds should be long enough for a
2586 write_timeout_ = 5000000; // timout for reading/writing
2588 device_sectors_ = 0;
2589 non_destructive_ = 0;
2591 #ifdef HAVE_LIBAIO_H
2594 block_table_ = block_table;
2595 update_block_table_ = 1;
2597 block_buffer_ = NULL;
2599 blocks_written_ = 0;
2603 DiskThread::~DiskThread() {
2605 free(block_buffer_);
2608 // Set filename for device file (in /dev).
2609 void DiskThread::SetDevice(const char *device_name) {
2610 device_name_ = device_name;
2613 // Set various parameters that control the behaviour of the test.
2614 // -1 is used as a sentinel value on each parameter (except non_destructive)
2615 // to indicate that the parameter not be set.
2616 bool DiskThread::SetParameters(int read_block_size,
2617 int write_block_size,
2620 int blocks_per_segment,
2621 int64 read_threshold,
2622 int64 write_threshold,
2623 int non_destructive) {
2624 if (read_block_size != -1) {
2625 // Blocks must be aligned to the disk's sector size.
2626 if (read_block_size % kSectorSize != 0) {
2627 logprintf(0, "Process Error: Block size must be a multiple of %d "
2628 "(thread %d).\n", kSectorSize, thread_num_);
2632 read_block_size_ = read_block_size;
2635 if (write_block_size != -1) {
2636 // Write blocks must be aligned to the disk's sector size and to the
2638 if (write_block_size % kSectorSize != 0) {
2639 logprintf(0, "Process Error: Write block size must be a multiple "
2640 "of %d (thread %d).\n", kSectorSize, thread_num_);
2643 if (write_block_size % read_block_size_ != 0) {
2644 logprintf(0, "Process Error: Write block size must be a multiple "
2645 "of the read block size, which is %d (thread %d).\n",
2646 read_block_size_, thread_num_);
2650 write_block_size_ = write_block_size;
2653 // Make sure write_block_size_ is still valid.
2654 if (read_block_size_ > write_block_size_) {
2655 logprintf(5, "Log: Assuming write block size equal to read block size, "
2656 "which is %d (thread %d).\n", read_block_size_,
2658 write_block_size_ = read_block_size_;
2660 if (write_block_size_ % read_block_size_ != 0) {
2661 logprintf(0, "Process Error: Write block size (defined as %d) must "
2662 "be a multiple of the read block size, which is %d "
2663 "(thread %d).\n", write_block_size_, read_block_size_,
2670 if (cache_size != -1) {
2671 cache_size_ = cache_size;
2674 if (blocks_per_segment != -1) {
2675 if (blocks_per_segment <= 0) {
2676 logprintf(0, "Process Error: Blocks per segment must be greater than "
2677 "zero.\n (thread %d)", thread_num_);
2681 blocks_per_segment_ = blocks_per_segment;
2684 if (read_threshold != -1) {
2685 if (read_threshold <= 0) {
2686 logprintf(0, "Process Error: Read threshold must be greater than "
2687 "zero (thread %d).\n", thread_num_);
2691 read_threshold_ = read_threshold;
2694 if (write_threshold != -1) {
2695 if (write_threshold <= 0) {
2696 logprintf(0, "Process Error: Write threshold must be greater than "
2697 "zero (thread %d).\n", thread_num_);
2701 write_threshold_ = write_threshold;
2704 if (segment_size != -1) {
2705 // Segments must be aligned to the disk's sector size.
2706 if (segment_size % kSectorSize != 0) {
2707 logprintf(0, "Process Error: Segment size must be a multiple of %d"
2708 " (thread %d).\n", kSectorSize, thread_num_);
2712 segment_size_ = segment_size / kSectorSize;
2715 non_destructive_ = non_destructive;
2717 // Having a queue of 150% of blocks that will fit in the disk's cache
2718 // should be enough to force out the oldest block before it is read and hence,
2719 // making sure the data comes form the disk and not the cache.
2720 queue_size_ = ((cache_size_ / write_block_size_) * 3) / 2;
2721 // Updating DiskBlockTable parameters
2722 if (update_block_table_) {
2723 block_table_->SetParameters(kSectorSize, write_block_size_,
2724 device_sectors_, segment_size_,
2730 // Open a device, return false on failure.
2731 bool DiskThread::OpenDevice(int *pfile) {
2732 int flags = O_RDWR | O_SYNC | O_LARGEFILE;
2733 int fd = open(device_name_.c_str(), flags | O_DIRECT, 0);
2734 if (O_DIRECT != 0 && fd < 0 && errno == EINVAL) {
2735 fd = open(device_name_.c_str(), flags, 0); // Try without O_DIRECT
2736 os_->ActivateFlushPageCache();
2739 logprintf(0, "Process Error: Failed to open device %s (thread %d)!!\n",
2740 device_name_.c_str(), thread_num_);
2745 return GetDiskSize(fd);
2748 // Retrieves the size (in bytes) of the disk/file.
2749 // Return false on failure.
2750 bool DiskThread::GetDiskSize(int fd) {
2751 struct stat device_stat;
2752 if (fstat(fd, &device_stat) == -1) {
2753 logprintf(0, "Process Error: Unable to fstat disk %s (thread %d).\n",
2754 device_name_.c_str(), thread_num_);
2758 // For a block device, an ioctl is needed to get the size since the size
2759 // of the device file (i.e. /dev/sdb) is 0.
2760 if (S_ISBLK(device_stat.st_mode)) {
2761 uint64 block_size = 0;
2763 if (ioctl(fd, BLKGETSIZE64, &block_size) == -1) {
2764 logprintf(0, "Process Error: Unable to ioctl disk %s (thread %d).\n",
2765 device_name_.c_str(), thread_num_);
2769 // Zero size indicates nonworking device..
2770 if (block_size == 0) {
2771 os_->ErrorReport(device_name_.c_str(), "device-size-zero", 1);
2773 status_ = true; // Avoid a procedural error.
2777 device_sectors_ = block_size / kSectorSize;
2779 } else if (S_ISREG(device_stat.st_mode)) {
2780 device_sectors_ = device_stat.st_size / kSectorSize;
2783 logprintf(0, "Process Error: %s is not a regular file or block "
2784 "device (thread %d).\n", device_name_.c_str(),
2789 logprintf(12, "Log: Device sectors: %lld on disk %s (thread %d).\n",
2790 device_sectors_, device_name_.c_str(), thread_num_);
2792 if (update_block_table_) {
2793 block_table_->SetParameters(kSectorSize, write_block_size_,
2794 device_sectors_, segment_size_,
2801 bool DiskThread::CloseDevice(int fd) {
2806 // Return the time in microseconds.
2807 int64 DiskThread::GetTime() {
2809 gettimeofday(&tv, NULL);
2810 return tv.tv_sec * 1000000 + tv.tv_usec;
2813 // Do randomized reads and (possibly) writes on a device.
2814 // Return false on fatal SW error, true on SW success,
2815 // regardless of whether HW failed.
2816 bool DiskThread::DoWork(int fd) {
2817 int64 block_num = 0;
2820 if (segment_size_ == -1) {
2823 num_segments = device_sectors_ / segment_size_;
2824 if (device_sectors_ % segment_size_ != 0)
2828 // Disk size should be at least 3x cache size. See comment later for
2830 sat_assert(device_sectors_ * kSectorSize > 3 * cache_size_);
2832 // This disk test works by writing blocks with a certain pattern to
2833 // disk, then reading them back and verifying it against the pattern
2834 // at a later time. A failure happens when either the block cannot
2835 // be written/read or when the read block is different than what was
2836 // written. If a block takes too long to write/read, then a warning
2837 // is given instead of an error since taking too long is not
2838 // necessarily an error.
2840 // To prevent the read blocks from coming from the disk cache,
2841 // enough blocks are written before read such that a block would
2842 // be ejected from the disk cache by the time it is read.
2844 // TODO(amistry): Implement some sort of read/write throttling. The
2845 // flood of asynchronous I/O requests when a drive is
2846 // unplugged is causing the application and kernel to
2847 // become unresponsive.
2849 while (IsReadyToRun()) {
2850 // Write blocks to disk.
2851 logprintf(16, "Log: Write phase %sfor disk %s (thread %d).\n",
2852 non_destructive_ ? "(disabled) " : "",
2853 device_name_.c_str(), thread_num_);
2854 while (IsReadyToRunNoPause() &&
2855 in_flight_sectors_.size() <
2856 static_cast<size_t>(queue_size_ + 1)) {
2857 // Confine testing to a particular segment of the disk.
2858 int64 segment = (block_num / blocks_per_segment_) % num_segments;
2859 if (!non_destructive_ &&
2860 (block_num % blocks_per_segment_ == 0)) {
2861 logprintf(20, "Log: Starting to write segment %lld out of "
2862 "%lld on disk %s (thread %d).\n",
2863 segment, num_segments, device_name_.c_str(),
2868 BlockData *block = block_table_->GetUnusedBlock(segment);
2870 // If an unused sequence of sectors could not be found, skip to the
2871 // next block to process. Soon, a new segment will come and new
2872 // sectors will be able to be allocated. This effectively puts a
2873 // minumim on the disk size at 3x the stated cache size, or 48MiB
2874 // if a cache size is not given (since the cache is set as 16MiB
2875 // by default). Given that todays caches are at the low MiB range
2876 // and drive sizes at the mid GB, this shouldn't pose a problem.
2877 // The 3x minimum comes from the following:
2878 // 1. In order to allocate 'y' blocks from a segment, the
2879 // segment must contain at least 2y blocks or else an
2880 // allocation may not succeed.
2881 // 2. Assume the entire disk is one segment.
2882 // 3. A full write phase consists of writing blocks corresponding to
2884 // 4. Therefore, the one segment must have 2 * 3/2 * cache
2885 // size worth of blocks = 3 * cache size worth of blocks
2887 // In non-destructive mode, don't write anything to disk.
2888 if (!non_destructive_) {
2889 if (!WriteBlockToDisk(fd, block)) {
2890 block_table_->RemoveBlock(block);
2896 // Block is either initialized by writing, or in nondestructive case,
2897 // initialized by being added into the datastructure for later reading.
2898 block->initialized();
2900 in_flight_sectors_.push(block);
2902 if (!os_->FlushPageCache()) // If O_DIRECT worked, this will be a NOP.
2905 // Verify blocks on disk.
2906 logprintf(20, "Log: Read phase for disk %s (thread %d).\n",
2907 device_name_.c_str(), thread_num_);
2908 while (IsReadyToRunNoPause() && !in_flight_sectors_.empty()) {
2909 BlockData *block = in_flight_sectors_.front();
2910 in_flight_sectors_.pop();
2911 if (!ValidateBlockOnDisk(fd, block))
2913 block_table_->RemoveBlock(block);
2918 pages_copied_ = blocks_written_ + blocks_read_;
2922 // Do an asynchronous disk I/O operation.
2923 // Return false if the IO is not set up.
2924 bool DiskThread::AsyncDiskIO(IoOp op, int fd, void *buf, int64 size,
2925 int64 offset, int64 timeout) {
2926 #ifdef HAVE_LIBAIO_H
2927 // Use the Linux native asynchronous I/O interface for reading/writing.
2928 // A read/write consists of three basic steps:
2929 // 1. create an io context.
2930 // 2. prepare and submit an io request to the context
2931 // 3. wait for an event on the context.
2936 const char *error_str;
2938 { IO_CMD_PREAD, "read", "disk-read-error" },
2939 { IO_CMD_PWRITE, "write", "disk-write-error" }
2943 memset(&cb, 0, sizeof(cb));
2946 cb.aio_lio_opcode = operations[op].opcode;
2948 cb.u.c.nbytes = size;
2949 cb.u.c.offset = offset;
2951 struct iocb *cbs[] = { &cb };
2952 if (io_submit(aio_ctx_, 1, cbs) != 1) {
2955 sat_strerror(error, buf, sizeof(buf));
2956 logprintf(0, "Process Error: Unable to submit async %s "
2957 "on disk %s (thread %d). Error %d, %s\n",
2958 operations[op].op_str, device_name_.c_str(),
2959 thread_num_, error, buf);
2963 struct io_event event;
2964 memset(&event, 0, sizeof(event));
2966 tv.tv_sec = timeout / 1000000;
2967 tv.tv_nsec = (timeout % 1000000) * 1000;
2968 if (io_getevents(aio_ctx_, 1, 1, &event, &tv) != 1) {
2969 // A ctrl-c from the keyboard will cause io_getevents to fail with an
2970 // EINTR error code. This is not an error and so don't treat it as such,
2971 // but still log it.
2973 if (error == EINTR) {
2974 logprintf(5, "Log: %s interrupted on disk %s (thread %d).\n",
2975 operations[op].op_str, device_name_.c_str(),
2978 os_->ErrorReport(device_name_.c_str(), operations[op].error_str, 1);
2980 logprintf(0, "Hardware Error: Timeout doing async %s to sectors "
2981 "starting at %lld on disk %s (thread %d).\n",
2982 operations[op].op_str, offset / kSectorSize,
2983 device_name_.c_str(), thread_num_);
2986 // Don't bother checking return codes since io_cancel seems to always fail.
2987 // Since io_cancel is always failing, destroying and recreating an I/O
2988 // context is a workaround for canceling an in-progress I/O operation.
2989 // TODO(amistry): Find out why io_cancel isn't working and make it work.
2990 io_cancel(aio_ctx_, &cb, &event);
2991 io_destroy(aio_ctx_);
2993 if (io_setup(5, &aio_ctx_)) {
2996 sat_strerror(error, buf, sizeof(buf));
2997 logprintf(0, "Process Error: Unable to create aio context on disk %s"
2998 " (thread %d) Error %d, %s\n",
2999 device_name_.c_str(), thread_num_, error, buf);
3005 // event.res contains the number of bytes written/read or
3006 // error if < 0, I think.
3007 if (event.res != static_cast<uint64>(size)) {
3009 os_->ErrorReport(device_name_.c_str(), operations[op].error_str, 1);
3011 int64 result = static_cast<int64>(event.res);
3015 logprintf(0, "Hardware Error: Low-level I/O error while doing %s to "
3016 "sectors starting at %lld on disk %s (thread %d).\n",
3017 operations[op].op_str, offset / kSectorSize,
3018 device_name_.c_str(), thread_num_);
3021 logprintf(0, "Hardware Error: Unknown error while doing %s to "
3022 "sectors starting at %lld on disk %s (thread %d).\n",
3023 operations[op].op_str, offset / kSectorSize,
3024 device_name_.c_str(), thread_num_);
3027 logprintf(0, "Hardware Error: Unable to %s to sectors starting at "
3028 "%lld on disk %s (thread %d).\n",
3029 operations[op].op_str, offset / kSectorSize,
3030 device_name_.c_str(), thread_num_);
3036 #else // !HAVE_LIBAIO_H
3041 // Write a block to disk.
3042 // Return false if the block is not written.
3043 bool DiskThread::WriteBlockToDisk(int fd, BlockData *block) {
3044 memset(block_buffer_, 0, block->size());
3046 // Fill block buffer with a pattern
3047 struct page_entry pe;
3048 if (!sat_->GetValid(&pe)) {
3049 // Even though a valid page could not be obatined, it is not an error
3050 // since we can always fill in a pattern directly, albeit slower.
3051 unsigned int *memblock = static_cast<unsigned int *>(block_buffer_);
3052 block->set_pattern(patternlist_->GetRandomPattern());
3054 logprintf(11, "Log: Warning, using pattern fill fallback in "
3055 "DiskThread::WriteBlockToDisk on disk %s (thread %d).\n",
3056 device_name_.c_str(), thread_num_);
3058 for (unsigned int i = 0; i < block->size()/wordsize_; i++) {
3059 memblock[i] = block->pattern()->pattern(i);
3062 memcpy(block_buffer_, pe.addr, block->size());
3063 block->set_pattern(pe.pattern);
3064 sat_->PutValid(&pe);
3067 logprintf(12, "Log: Writing %lld sectors starting at %lld on disk %s"
3069 block->size()/kSectorSize, block->address(),
3070 device_name_.c_str(), thread_num_);
3072 int64 start_time = GetTime();
3074 if (!AsyncDiskIO(ASYNC_IO_WRITE, fd, block_buffer_, block->size(),
3075 block->address() * kSectorSize, write_timeout_)) {
3079 int64 end_time = GetTime();
3080 logprintf(12, "Log: Writing time: %lld us (thread %d).\n",
3081 end_time - start_time, thread_num_);
3082 if (end_time - start_time > write_threshold_) {
3083 logprintf(5, "Log: Write took %lld us which is longer than threshold "
3084 "%lld us on disk %s (thread %d).\n",
3085 end_time - start_time, write_threshold_, device_name_.c_str(),
3092 // Verify a block on disk.
3093 // Return true if the block was read, also increment errorcount
3094 // if the block had data errors or performance problems.
3095 bool DiskThread::ValidateBlockOnDisk(int fd, BlockData *block) {
3096 int64 blocks = block->size() / read_block_size_;
3097 int64 bytes_read = 0;
3098 int64 current_blocks;
3099 int64 current_bytes;
3100 uint64 address = block->address();
3102 logprintf(20, "Log: Reading sectors starting at %lld on disk %s "
3104 address, device_name_.c_str(), thread_num_);
3106 // Read block from disk and time the read. If it takes longer than the
3107 // threshold, complain.
3108 if (lseek64(fd, address * kSectorSize, SEEK_SET) == -1) {
3109 logprintf(0, "Process Error: Unable to seek to sector %lld in "
3110 "DiskThread::ValidateSectorsOnDisk on disk %s "
3111 "(thread %d).\n", address, device_name_.c_str(), thread_num_);
3114 int64 start_time = GetTime();
3116 // Split a large write-sized block into small read-sized blocks and
3117 // read them in groups of randomly-sized multiples of read block size.
3118 // This assures all data written on disk by this particular block
3119 // will be tested using a random reading pattern.
3120 while (blocks != 0) {
3121 // Test all read blocks in a written block.
3122 current_blocks = (random() % blocks) + 1;
3123 current_bytes = current_blocks * read_block_size_;
3125 memset(block_buffer_, 0, current_bytes);
3127 logprintf(20, "Log: Reading %lld sectors starting at sector %lld on "
3128 "disk %s (thread %d)\n",
3129 current_bytes / kSectorSize,
3130 (address * kSectorSize + bytes_read) / kSectorSize,
3131 device_name_.c_str(), thread_num_);
3133 if (!AsyncDiskIO(ASYNC_IO_READ, fd, block_buffer_, current_bytes,
3134 address * kSectorSize + bytes_read,
3139 int64 end_time = GetTime();
3140 logprintf(20, "Log: Reading time: %lld us (thread %d).\n",
3141 end_time - start_time, thread_num_);
3142 if (end_time - start_time > read_threshold_) {
3143 logprintf(5, "Log: Read took %lld us which is longer than threshold "
3144 "%lld us on disk %s (thread %d).\n",
3145 end_time - start_time, read_threshold_,
3146 device_name_.c_str(), thread_num_);
3149 // In non-destructive mode, don't compare the block to the pattern since
3150 // the block was never written to disk in the first place.
3151 if (!non_destructive_) {
3152 if (CheckRegion(block_buffer_, block->pattern(), current_bytes,
3154 os_->ErrorReport(device_name_.c_str(), "disk-pattern-error", 1);
3156 logprintf(0, "Hardware Error: Pattern mismatch in block starting at "
3157 "sector %lld in DiskThread::ValidateSectorsOnDisk on "
3158 "disk %s (thread %d).\n",
3159 address, device_name_.c_str(), thread_num_);
3163 bytes_read += current_blocks * read_block_size_;
3164 blocks -= current_blocks;
3170 // Direct device access thread.
3171 // Return false on software error.
3172 bool DiskThread::Work() {
3175 logprintf(9, "Log: Starting disk thread %d, disk %s\n",
3176 thread_num_, device_name_.c_str());
3178 srandom(time(NULL));
3180 if (!OpenDevice(&fd)) {
3185 // Allocate a block buffer aligned to 512 bytes since the kernel requires it
3186 // when using direct IO.
3187 #ifdef HAVE_POSIX_MEMALIGN
3188 int memalign_result = posix_memalign(&block_buffer_, kBufferAlignment,
3189 sat_->page_length());
3191 block_buffer_ = memalign(kBufferAlignment, sat_->page_length());
3192 int memalign_result = (block_buffer_ == 0);
3194 if (memalign_result) {
3196 logprintf(0, "Process Error: Unable to allocate memory for buffers "
3197 "for disk %s (thread %d) posix memalign returned %d.\n",
3198 device_name_.c_str(), thread_num_, memalign_result);
3203 #ifdef HAVE_LIBAIO_H
3204 if (io_setup(5, &aio_ctx_)) {
3206 logprintf(0, "Process Error: Unable to create aio context for disk %s"
3208 device_name_.c_str(), thread_num_);
3214 bool result = DoWork(fd);
3218 #ifdef HAVE_LIBAIO_H
3219 io_destroy(aio_ctx_);
3223 logprintf(9, "Log: Completed %d (disk %s): disk thread status %d, "
3224 "%d pages copied\n",
3225 thread_num_, device_name_.c_str(), status_, pages_copied_);
3229 RandomDiskThread::RandomDiskThread(DiskBlockTable *block_table)
3230 : DiskThread(block_table) {
3231 update_block_table_ = 0;
3234 RandomDiskThread::~RandomDiskThread() {
3237 // Workload for random disk thread.
3238 bool RandomDiskThread::DoWork(int fd) {
3239 logprintf(11, "Log: Random phase for disk %s (thread %d).\n",
3240 device_name_.c_str(), thread_num_);
3241 while (IsReadyToRun()) {
3242 BlockData *block = block_table_->GetRandomBlock();
3243 if (block == NULL) {
3244 logprintf(12, "Log: No block available for device %s (thread %d).\n",
3245 device_name_.c_str(), thread_num_);
3247 ValidateBlockOnDisk(fd, block);
3248 block_table_->ReleaseBlock(block);
3252 pages_copied_ = blocks_read_;
3256 MemoryRegionThread::MemoryRegionThread() {
3257 error_injection_ = false;
3261 MemoryRegionThread::~MemoryRegionThread() {
3266 // Set a region of memory or MMIO to be tested.
3267 // Return false if region could not be mapped.
3268 bool MemoryRegionThread::SetRegion(void *region, int64 size) {
3269 int plength = sat_->page_length();
3270 int npages = size / plength;
3271 if (size % plength) {
3272 logprintf(0, "Process Error: region size is not a multiple of SAT "
3278 pages_ = new PageEntryQueue(npages);
3279 char *base_addr = reinterpret_cast<char*>(region);
3280 region_ = base_addr;
3281 for (int i = 0; i < npages; i++) {
3282 struct page_entry pe;
3284 pe.addr = reinterpret_cast<void*>(base_addr + i * plength);
3285 pe.offset = i * plength;
3293 // More detailed error printout for hardware errors in memory or MMIO
3295 void MemoryRegionThread::ProcessError(struct ErrorRecord *error,
3297 const char *message) {
3298 uint32 buffer_offset;
3299 if (phase_ == kPhaseCopy) {
3300 // If the error occurred on the Copy Phase, it means that
3301 // the source data (i.e., the main memory) is wrong. so
3302 // just pass it to the original ProcessError to call a
3304 WorkerThread::ProcessError(error, priority, message);
3305 } else if (phase_ == kPhaseCheck) {
3306 // A error on the Check Phase means that the memory region tested
3307 // has an error. Gathering more information and then reporting
3309 // Determine if this is a write or read error.
3310 os_->Flush(error->vaddr);
3311 error->reread = *(error->vaddr);
3312 char *good = reinterpret_cast<char*>(&(error->expected));
3313 char *bad = reinterpret_cast<char*>(&(error->actual));
3314 sat_assert(error->expected != error->actual);
3315 unsigned int offset = 0;
3316 for (offset = 0; offset < (sizeof(error->expected) - 1); offset++) {
3317 if (good[offset] != bad[offset])
3321 error->vbyteaddr = reinterpret_cast<char*>(error->vaddr) + offset;
3323 buffer_offset = error->vbyteaddr - region_;
3325 // Find physical address if possible.
3326 error->paddr = os_->VirtualToPhysical(error->vbyteaddr);
3328 "%s: miscompare on %s, CRC check at %p(0x%llx), "
3329 "offset %llx: read:0x%016llx, reread:0x%016llx "
3330 "expected:0x%016llx\n",
3332 identifier_.c_str(),
3340 logprintf(0, "Process Error: memory region thread raised an "
3341 "unexpected error.");
3345 // Workload for testion memory or MMIO regions.
3346 // Return false on software error.
3347 bool MemoryRegionThread::Work() {
3348 struct page_entry source_pe;
3349 struct page_entry memregion_pe;
3352 const uint64 error_constant = 0x00ba00000000ba00LL;
3354 // For error injection.
3359 logprintf(9, "Log: Starting Memory Region thread %d\n", thread_num_);
3361 while (IsReadyToRun()) {
3362 // Getting pages from SAT and queue.
3363 phase_ = kPhaseNoPhase;
3364 result = result && sat_->GetValid(&source_pe);
3366 logprintf(0, "Process Error: memory region thread failed to pop "
3367 "pages from SAT, bailing\n");
3371 result = result && pages_->PopRandom(&memregion_pe);
3373 logprintf(0, "Process Error: memory region thread failed to pop "
3374 "pages from queue, bailing\n");
3378 // Error injection for CRC copy.
3379 if ((sat_->error_injection() || error_injection_) && loops == 1) {
3380 addr = reinterpret_cast<int64*>(source_pe.addr);
3381 offset = random() % (sat_->page_length() / wordsize_);
3382 data = addr[offset];
3383 addr[offset] = error_constant;
3386 // Copying SAT page into memory region.
3387 phase_ = kPhaseCopy;
3388 CrcCopyPage(&memregion_pe, &source_pe);
3389 memregion_pe.pattern = source_pe.pattern;
3391 // Error injection for CRC Check.
3392 if ((sat_->error_injection() || error_injection_) && loops == 2) {
3393 addr = reinterpret_cast<int64*>(memregion_pe.addr);
3394 offset = random() % (sat_->page_length() / wordsize_);
3395 data = addr[offset];
3396 addr[offset] = error_constant;
3399 // Checking page content in memory region.
3400 phase_ = kPhaseCheck;
3401 CrcCheckPage(&memregion_pe);
3403 phase_ = kPhaseNoPhase;
3404 // Storing pages on their proper queues.
3405 result = result && sat_->PutValid(&source_pe);
3407 logprintf(0, "Process Error: memory region thread failed to push "
3408 "pages into SAT, bailing\n");
3411 result = result && pages_->Push(&memregion_pe);
3413 logprintf(0, "Process Error: memory region thread failed to push "
3414 "pages into queue, bailing\n");
3418 if ((sat_->error_injection() || error_injection_) &&
3419 loops >= 1 && loops <= 2) {
3420 addr[offset] = data;
3427 pages_copied_ = loops;
3429 logprintf(9, "Log: Completed %d: Memory Region thread. Status %d, %d "
3430 "pages checked\n", thread_num_, status_, pages_copied_);
3434 // The list of MSRs to read from each cpu.
3435 const CpuFreqThread::CpuRegisterType CpuFreqThread::kCpuRegisters[] = {
3436 { kMsrTscAddr, "TSC" },
3437 { kMsrAperfAddr, "APERF" },
3438 { kMsrMperfAddr, "MPERF" },
3441 CpuFreqThread::CpuFreqThread(int num_cpus, int freq_threshold, int round)
3442 : num_cpus_(num_cpus),
3443 freq_threshold_(freq_threshold),
3445 sat_assert(round >= 0);
3447 // If rounding is off, force rounding to the nearest MHz.
3451 round_value_ = round/2.0;
3455 CpuFreqThread::~CpuFreqThread() {
3458 // Compute the difference between the currently read MSR values and the
3459 // previously read values and store the results in delta. If any of the
3460 // values did not increase, or the TSC value is too small, returns false.
3461 // Otherwise, returns true.
3462 bool CpuFreqThread::ComputeDelta(CpuDataType *current, CpuDataType *previous,
3463 CpuDataType *delta) {
3464 // Loop through the msrs.
3465 for (int msr = 0; msr < kMsrLast; msr++) {
3466 if (previous->msrs[msr] > current->msrs[msr]) {
3467 logprintf(0, "Log: Register %s went backwards 0x%llx to 0x%llx "
3468 "skipping interval\n", kCpuRegisters[msr], previous->msrs[msr],
3469 current->msrs[msr]);
3472 delta->msrs[msr] = current->msrs[msr] - previous->msrs[msr];
3476 // Check for TSC < 1 Mcycles over interval.
3477 if (delta->msrs[kMsrTsc] < (1000 * 1000)) {
3478 logprintf(0, "Log: Insanely slow TSC rate, TSC stops in idle?\n");
3481 timersub(¤t->tv, &previous->tv, &delta->tv);
3486 // Compute the change in values of the MSRs between current and previous,
3487 // set the frequency in MHz of the cpu. If there is an error computing
3488 // the delta, return false. Othewise, return true.
3489 bool CpuFreqThread::ComputeFrequency(CpuDataType *current,
3490 CpuDataType *previous, int *freq) {
3492 if (!ComputeDelta(current, previous, &delta)) {
3496 double interval = delta.tv.tv_sec + delta.tv.tv_usec / 1000000.0;
3497 double frequency = 1.0 * delta.msrs[kMsrTsc] / 1000000
3498 * delta.msrs[kMsrAperf] / delta.msrs[kMsrMperf] / interval;
3500 // Use the rounding value to round up properly.
3501 int computed = static_cast<int>(frequency + round_value_);
3502 *freq = computed - (computed % round_);
3506 // This is the task function that the thread executes.
3507 bool CpuFreqThread::Work() {
3509 if (!AvailableCpus(&cpuset)) {
3510 logprintf(0, "Process Error: Cannot get information about the cpus.\n");
3514 // Start off indicating the test is passing.
3519 uint32 num_intervals = 0;
3520 bool paused = false;
3524 vector<CpuDataType> data[2];
3525 data[0].resize(num_cpus_);
3526 data[1].resize(num_cpus_);
3527 while (IsReadyToRun(&paused)) {
3529 // Reset the intervals and restart logic after the pause.
3532 if (num_intervals == 0) {
3533 // If this is the first interval, then always wait a bit before
3534 // starting to collect data.
3535 sat_sleep(kStartupDelay);
3538 // Get the per cpu counters.
3540 for (int cpu = 0; cpu < num_cpus_; cpu++) {
3541 if (CPU_ISSET(cpu, &cpuset)) {
3542 if (!GetMsrs(cpu, &data[curr][cpu])) {
3543 logprintf(0, "Failed to get msrs on cpu %d.\n", cpu);
3550 // Reset the number of collected intervals since something bad happened.
3557 // Only compute a delta when we have at least two intervals worth of data.
3558 if (num_intervals > 2) {
3559 for (int cpu = 0; cpu < num_cpus_; cpu++) {
3560 if (CPU_ISSET(cpu, &cpuset)) {
3562 if (!ComputeFrequency(&data[curr][cpu], &data[prev][cpu],
3564 // Reset the number of collected intervals since an unknown
3566 logprintf(0, "Log: Cannot get frequency of cpu %d.\n", cpu);
3570 logprintf(15, "Cpu %d Freq %d\n", cpu, freq);
3571 if (freq < freq_threshold_) {
3574 logprintf(0, "Log: Cpu %d frequency is too low, frequency %d MHz "
3575 "threshold %d MHz.\n", cpu, freq, freq_threshold_);
3581 sat_sleep(kIntervalPause);
3583 // Swap the values in curr and prev (these values flip between 0 and 1).
3592 // Get the MSR values for this particular cpu and save them in data. If
3593 // any error is encountered, returns false. Otherwise, returns true.
3594 bool CpuFreqThread::GetMsrs(int cpu, CpuDataType *data) {
3595 for (int msr = 0; msr < kMsrLast; msr++) {
3596 if (!os_->ReadMSR(cpu, kCpuRegisters[msr].msr, &data->msrs[msr])) {
3600 // Save the time at which we acquired these values.
3601 gettimeofday(&data->tv, NULL);
3606 // Returns true if this test can run on the current machine. Otherwise,
3608 bool CpuFreqThread::CanRun() {
3609 #if defined(STRESSAPPTEST_CPU_X86_64) || defined(STRESSAPPTEST_CPU_I686)
3610 unsigned int eax, ebx, ecx, edx;
3612 // Check that the TSC feature is supported.
3613 // This check is valid for both Intel and AMD.
3615 cpuid(&eax, &ebx, &ecx, &edx);
3616 if (!(edx & (1 << 5))) {
3617 logprintf(0, "Process Error: No TSC support.\n");
3621 // Check the highest extended function level supported.
3622 // This check is valid for both Intel and AMD.
3624 cpuid(&eax, &ebx, &ecx, &edx);
3625 if (eax < 0x80000007) {
3626 logprintf(0, "Process Error: No invariant TSC support.\n");
3630 // Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
3631 // This check is valid for both Intel and AMD.
3633 cpuid(&eax, &ebx, &ecx, &edx);
3634 if ((edx & (1 << 8)) == 0) {
3635 logprintf(0, "Process Error: No non-stop TSC support.\n");
3639 // APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
3640 // This check is valid for both Intel and AMD.
3642 cpuid(&eax, &ebx, &ecx, &edx);
3643 if ((ecx & 1) == 0) {
3644 logprintf(0, "Process Error: No APERF MSR support.\n");
3649 logprintf(0, "Process Error: "
3650 "cpu_freq_test is only supported on X86 processors.\n");