1 // Copyright 2006 Google Inc. All Rights Reserved.
2 // Author: nsanders, menderico
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
16 // os.cc : os and machine specific implementation
17 // This file includes an abstracted interface
18 // for linux-distro specific and HW specific
25 #include <linux/types.h>
31 #include <sys/ioctl.h>
33 #include <sys/types.h>
41 #define SHM_HUGETLB 04000 // remove when glibc defines it
47 // This file must work with autoconf on its public version,
48 // so these includes are correct.
50 #include "error_diag.h"
53 // OsLayer initialization.
58 min_hugepages_bytes_ = 0;
61 use_hugepages_ = false;
62 use_posix_shm_ = false;
63 dynamic_mapped_shmem_ = false;
64 mmapped_allocation_ = false;
68 time_initialized_ = 0;
74 num_cpus_per_node_ = 0;
76 err_log_callback_ = 0;
77 error_injection_ = false;
80 address_mode_ = sizeof(pvoid) * 8;
85 use_flush_page_cache_ = false;
93 delete error_diagnoser_;
98 // OsLayer initialization.
99 bool OsLayer::Initialize() {
101 clock_ = new Clock();
104 time_initialized_ = clock_->Now();
105 // Detect asm support.
108 if (num_cpus_ == 0) {
110 num_cpus_ = sysconf(_SC_NPROCESSORS_ONLN);
111 num_cpus_per_node_ = num_cpus_ / num_nodes_;
113 logprintf(5, "Log: %d nodes, %d cpus.\n", num_nodes_, num_cpus_);
114 sat_assert(CPU_SETSIZE >= num_cpus_);
115 cpu_sets_.resize(num_nodes_);
116 cpu_sets_valid_.resize(num_nodes_);
117 // Create error diagnoser.
118 error_diagnoser_ = new ErrorDiag();
119 if (!error_diagnoser_->set_os(this))
124 // Machine type detected. Can we implement all these functions correctly?
125 bool OsLayer::IsSupported() {
127 // There are no explicitly supported systems in open source version.
131 // This is the default empty implementation.
132 // SAT won't report full error information.
136 int OsLayer::AddressMode() {
137 // Detect 32/64 bit binary.
139 return sizeof(pvoid) * 8;
142 // Translates user virtual to physical address.
143 uint64 OsLayer::VirtualToPhysical(void *vaddr) {
145 off64_t off = ((uintptr_t)vaddr) / sysconf(_SC_PAGESIZE) * 8;
146 int fd = open(kPagemapPath, O_RDONLY);
147 // /proc/self/pagemap is available in kernel >= 2.6.25
151 if (lseek64(fd, off, SEEK_SET) != off || read(fd, &frame, 8) != 8) {
153 string errtxt = ErrorString(err);
154 logprintf(0, "Process Error: failed to access %s with errno %d (%s)\n",
155 kPagemapPath, err, errtxt.c_str());
161 if (!(frame & (1LL << 63)) || (frame & (1LL << 62)))
163 shift = (frame >> 55) & 0x3f;
164 frame = (frame & 0x007fffffffffffffLL) << shift;
165 return frame | ((uintptr_t)vaddr & ((1LL << shift) - 1));
168 // Returns the HD device that contains this file.
169 string OsLayer::FindFileDevice(string filename) {
173 // Returns a list of locations corresponding to HD devices.
174 list<string> OsLayer::FindFileDevices() {
175 // No autodetection on unknown systems.
176 list<string> locations;
181 // Get HW core features from cpuid instruction.
182 void OsLayer::GetFeatures() {
183 #if defined(STRESSAPPTEST_CPU_X86_64) || defined(STRESSAPPTEST_CPU_I686)
184 unsigned int eax = 1, ebx, ecx, edx;
185 cpuid(&eax, &ebx, &ecx, &edx);
186 has_clflush_ = (edx >> 19) & 1;
187 has_vector_ = (edx >> 26) & 1; // SSE2 caps bit.
189 logprintf(9, "Log: has clflush: %s, has sse2: %s\n",
190 has_clflush_ ? "true" : "false",
191 has_vector_ ? "true" : "false");
192 #elif defined(STRESSAPPTEST_CPU_PPC)
193 // All PPC implementations have cache flush instructions.
195 #elif defined(STRESSAPPTEST_CPU_ARMV7A)
196 // TODO(nsanders): add detect from /proc/cpuinfo or /proc/self/auxv.
197 // For now assume neon and don't run -W if you don't have it.
198 has_vector_ = true; // NEON.
199 #warning "Unsupported CPU type ARMV7A: unable to determine feature set."
201 #warning "Unsupported CPU type: unable to determine feature set."
206 // Enable FlushPageCache to be functional instead of a NOP.
207 void OsLayer::ActivateFlushPageCache(void) {
208 logprintf(9, "Log: page cache will be flushed as needed\n");
209 use_flush_page_cache_ = true;
212 // Flush the page cache to ensure reads come from the disk.
213 bool OsLayer::FlushPageCache(void) {
214 if (!use_flush_page_cache_)
217 // First, ask the kernel to write the cache to the disk.
220 // Second, ask the kernel to empty the cache by writing "1" to
221 // "/proc/sys/vm/drop_caches".
222 static const char *drop_caches_file = "/proc/sys/vm/drop_caches";
223 int dcfile = open(drop_caches_file, O_WRONLY);
226 string errtxt = ErrorString(err);
227 logprintf(3, "Log: failed to open %s - err %d (%s)\n",
228 drop_caches_file, err, errtxt.c_str());
232 ssize_t bytes_written = write(dcfile, "1", 1);
235 if (bytes_written != 1) {
237 string errtxt = ErrorString(err);
238 logprintf(3, "Log: failed to write %s - err %d (%s)\n",
239 drop_caches_file, err, errtxt.c_str());
246 // We need to flush the cacheline here.
247 void OsLayer::Flush(void *vaddr) {
248 // Use the generic flush. This function is just so we can override
249 // this if we are so inclined.
251 OsLayer::FastFlush(vaddr);
256 // Run C or ASM copy as appropriate..
257 bool OsLayer::AdlerMemcpyWarm(uint64 *dstmem, uint64 *srcmem,
258 unsigned int size_in_bytes,
259 AdlerChecksum *checksum) {
261 return AdlerMemcpyAsm(dstmem, srcmem, size_in_bytes, checksum);
263 return AdlerMemcpyWarmC(dstmem, srcmem, size_in_bytes, checksum);
268 // Translate physical address to memory module/chip name.
269 // Assumes interleaving between two memory channels based on the XOR of
270 // all address bits in the 'channel_hash' mask, with repeated 'channel_width_'
271 // blocks with bits distributed from each chip in that channel.
272 int OsLayer::FindDimm(uint64 addr, char *buf, int len) {
274 snprintf(buf, len, "DIMM Unknown");
278 // Find channel by XORing address bits in channel_hash mask.
279 uint32 low = static_cast<uint32>(addr & channel_hash_);
280 uint32 high = static_cast<uint32>((addr & channel_hash_) >> 32);
281 vector<string>& channel = (*channels_)[
282 __builtin_parity(high) ^ __builtin_parity(low)];
284 // Find dram chip by finding which byte within the channel
285 // by address mod channel width, then divide the channel
286 // evenly among the listed dram chips. Note, this will not work
288 int chip = (addr % (channel_width_ / 8)) /
289 ((channel_width_ / 8) / channel.size());
290 string name = channel[chip];
291 snprintf(buf, len, "%s", name.c_str());
296 // Classifies addresses according to "regions"
297 // This isn't really implemented meaningfully here..
298 int32 OsLayer::FindRegion(uint64 addr) {
299 static bool warned = false;
301 if (regionsize_ == 0) {
302 regionsize_ = totalmemsize_ / 8;
303 if (regionsize_ < 512 * kMegabyte)
304 regionsize_ = 512 * kMegabyte;
305 regioncount_ = totalmemsize_ / regionsize_;
306 if (regioncount_ < 1) regioncount_ = 1;
309 int32 region_num = addr / regionsize_;
310 if (region_num >= regioncount_) {
312 logprintf(0, "Log: region number %d exceeds region count %d\n",
313 region_num, regioncount_);
316 region_num = region_num % regioncount_;
321 // Report which cores are associated with a given region.
322 cpu_set_t *OsLayer::FindCoreMask(int32 region) {
323 sat_assert(region >= 0);
324 region %= num_nodes_;
325 if (!cpu_sets_valid_[region]) {
326 CPU_ZERO(&cpu_sets_[region]);
327 for (int i = 0; i < num_cpus_per_node_; ++i) {
328 CPU_SET(i + region * num_cpus_per_node_, &cpu_sets_[region]);
330 cpu_sets_valid_[region] = true;
331 logprintf(5, "Log: Region %d mask 0x%s\n",
332 region, FindCoreMaskFormat(region).c_str());
334 return &cpu_sets_[region];
337 // Return cores associated with a given region in hex string.
338 string OsLayer::FindCoreMaskFormat(int32 region) {
339 cpu_set_t* mask = FindCoreMask(region);
340 string format = cpuset_format(mask);
341 if (format.size() < 8)
342 format = string(8 - format.size(), '0') + format;
346 // Report an error in an easily parseable way.
347 bool OsLayer::ErrorReport(const char *part, const char *symptom, int count) {
348 time_t now = clock_->Now();
349 int ttf = now - time_initialized_;
350 if (strlen(symptom) && strlen(part)) {
351 logprintf(0, "Report Error: %s : %s : %d : %ds\n",
352 symptom, part, count, ttf);
354 // Log something so the error still shows up, but this won't break the
356 logprintf(0, "Warning: Invalid Report Error: "
357 "%s : %s : %d : %ds\n", symptom, part, count, ttf);
362 // Read the number of hugepages out of the kernel interface in proc.
363 int64 OsLayer::FindHugePages() {
366 // This is a kernel interface to query the numebr of hugepages
367 // available in the system.
368 static const char *hugepages_info_file = "/proc/sys/vm/nr_hugepages";
369 int hpfile = open(hugepages_info_file, O_RDONLY);
371 ssize_t bytes_read = read(hpfile, buf, 64);
374 if (bytes_read <= 0) {
375 logprintf(12, "Log: /proc/sys/vm/nr_hugepages "
376 "read did not provide data\n");
380 if (bytes_read == 64) {
381 logprintf(0, "Process Error: /proc/sys/vm/nr_hugepages "
382 "is surprisingly large\n");
386 // Add a null termintation to be string safe.
387 buf[bytes_read] = '\0';
388 // Read the page count.
389 int64 pages = strtoull(buf, NULL, 10); // NOLINT
394 int64 OsLayer::FindFreeMemSize() {
397 if (totalmemsize_ > 0)
398 return totalmemsize_;
400 int64 pages = sysconf(_SC_PHYS_PAGES);
401 int64 avpages = sysconf(_SC_AVPHYS_PAGES);
402 int64 pagesize = sysconf(_SC_PAGESIZE);
403 int64 physsize = pages * pagesize;
404 int64 avphyssize = avpages * pagesize;
406 // Assume 2MB hugepages.
407 int64 hugepagesize = FindHugePages() * 2 * kMegabyte;
409 if ((pages == -1) || (pagesize == -1)) {
410 logprintf(0, "Process Error: sysconf could not determine memory size.\n");
414 // We want to leave enough stuff for things to run.
415 // If the user specified a minimum amount of memory to expect, require that.
416 // Otherwise, if more than 2GB is present, leave 192M + 5% for other stuff.
417 // If less than 2GB is present use 85% of what's available.
418 // These are fairly arbitrary numbers that seem to work OK.
420 // TODO(nsanders): is there a more correct way to determine target
422 if (hugepagesize > 0) {
423 if (min_hugepages_bytes_ > 0) {
424 minsize = min_hugepages_bytes_;
426 minsize = hugepagesize;
429 if (physsize < 2048LL * kMegabyte) {
430 minsize = ((pages * 85) / 100) * pagesize;
432 minsize = ((pages * 95) / 100) * pagesize - (192 * kMegabyte);
434 // Make sure that at least reserve_mb_ is left for the system.
435 if (reserve_mb_ > 0) {
436 int64 totalsize = pages * pagesize;
437 int64 reserve_kb = reserve_mb_ * kMegabyte;
438 if (reserve_kb > totalsize) {
439 logprintf(0, "Procedural Error: %lld is bigger than the total memory "
440 "available %lld\n", reserve_kb, totalsize);
441 } else if (reserve_kb > totalsize - minsize) {
442 logprintf(5, "Warning: Overriding memory to use: original %lld, "
443 "current %lld\n", minsize, totalsize - reserve_kb);
444 minsize = totalsize - reserve_kb;
449 // Use hugepage sizing if available.
450 if (hugepagesize > 0) {
451 if (hugepagesize < minsize) {
452 logprintf(0, "Procedural Error: Not enough hugepages. "
453 "%lldMB available < %lldMB required.\n",
454 hugepagesize / kMegabyte,
455 minsize / kMegabyte);
456 // Require the calculated minimum amount of memory.
459 // Require that we get all hugepages.
463 // Require the calculated minimum amount of memory.
467 logprintf(5, "Log: Total %lld MB. Free %lld MB. Hugepages %lld MB. "
468 "Targeting %lld MB (%lld%%)\n",
469 physsize / kMegabyte,
470 avphyssize / kMegabyte,
471 hugepagesize / kMegabyte,
473 size * 100 / physsize);
475 totalmemsize_ = size;
479 // Allocates all memory available.
480 int64 OsLayer::AllocateAllMem() {
481 int64 length = FindFreeMemSize();
482 bool retval = AllocateTestMem(length, 0);
489 // Allocate the target memory. This may be from malloc, hugepage pool
490 // or other platform specific sources.
491 bool OsLayer::AllocateTestMem(int64 length, uint64 paddr_base) {
492 // Try hugepages first.
495 sat_assert(length >= 0);
498 logprintf(0, "Process Error: non zero paddr_base %#llx is not supported,"
499 " ignore.\n", paddr_base);
501 // Determine optimal memory allocation path.
502 bool prefer_hugepages = false;
503 bool prefer_posix_shm = false;
504 bool prefer_dynamic_mapping = false;
506 // Are there enough hugepages?
507 int64 hugepagesize = FindHugePages() * 2 * kMegabyte;
508 // TODO(nsanders): Is there enough /dev/shm? Is there enough free memeory?
509 if ((length >= 1400LL * kMegabyte) && (address_mode_ == 32)) {
510 prefer_dynamic_mapping = true;
511 prefer_posix_shm = true;
512 logprintf(3, "Log: Prefer POSIX shared memory allocation.\n");
513 logprintf(3, "Log: You may need to run "
514 "'sudo mount -o remount,size=100\% /dev/shm.'\n");
515 } else if (hugepagesize >= length) {
516 prefer_hugepages = true;
517 logprintf(3, "Log: Prefer using hugepage allocation.\n");
519 logprintf(3, "Log: Prefer plain malloc memory allocation.\n");
522 #ifdef HAVE_SYS_SHM_H
523 // Allocate hugepage mapped memory.
524 if (prefer_hugepages) {
525 do { // Allow break statement.
529 if ((shmid = shmget(2, length,
530 SHM_HUGETLB | IPC_CREAT | SHM_R | SHM_W)) < 0) {
532 string errtxt = ErrorString(err);
533 logprintf(3, "Log: failed to allocate shared hugepage "
534 "object - err %d (%s)\n",
535 err, errtxt.c_str());
536 logprintf(3, "Log: sysctl -w vm.nr_hugepages=XXX allows hugepages.\n");
540 shmaddr = shmat(shmid, NULL, 0);
541 if (shmaddr == reinterpret_cast<void*>(-1)) {
543 string errtxt = ErrorString(err);
544 logprintf(0, "Log: failed to attach shared "
545 "hugepage object - err %d (%s).\n",
546 err, errtxt.c_str());
547 if (shmctl(shmid, IPC_RMID, NULL) < 0) {
549 string errtxt = ErrorString(err);
550 logprintf(0, "Log: failed to remove shared "
551 "hugepage object - err %d (%s).\n",
552 err, errtxt.c_str());
556 use_hugepages_ = true;
559 logprintf(0, "Log: Using shared hugepage object 0x%x at %p.\n",
564 if ((!use_hugepages_) && prefer_posix_shm) {
567 void *shmaddr = NULL;
569 shm_object = shm_open("/stressapptest", O_CREAT | O_RDWR, S_IRWXU);
570 if (shm_object < 0) {
572 string errtxt = ErrorString(err);
573 logprintf(3, "Log: failed to allocate shared "
574 "smallpage object - err %d (%s)\n",
575 err, errtxt.c_str());
579 if (0 > ftruncate(shm_object, length)) {
581 string errtxt = ErrorString(err);
582 logprintf(3, "Log: failed to ftruncate shared "
583 "smallpage object - err %d (%s)\n",
584 err, errtxt.c_str());
588 // 32 bit linux apps can only use ~1.4G of address space.
589 // Use dynamic mapping for allocations larger than that.
590 // Currently perf hit is ~10% for this.
591 if (prefer_dynamic_mapping) {
592 dynamic_mapped_shmem_ = true;
594 // Do a full mapping here otherwise.
595 shmaddr = mmap64(NULL, length, PROT_READ | PROT_WRITE,
596 MAP_SHARED | MAP_NORESERVE | MAP_LOCKED | MAP_POPULATE,
598 if (shmaddr == reinterpret_cast<void*>(-1)) {
600 string errtxt = ErrorString(err);
601 logprintf(0, "Log: failed to map shared "
602 "smallpage object - err %d (%s).\n",
603 err, errtxt.c_str());
608 use_posix_shm_ = true;
611 char location_message[256] = "";
612 if (dynamic_mapped_shmem_) {
613 sprintf(location_message, "mapped as needed");
615 sprintf(location_message, "at %p", shmaddr);
617 logprintf(0, "Log: Using posix shared memory object 0x%x %s.\n",
618 shm_object, location_message);
620 shm_unlink("/stressapptest");
622 #endif // HAVE_SYS_SHM_H
624 if (!use_hugepages_ && !use_posix_shm_) {
625 // If the page size is what SAT is expecting explicitly perform mmap()
627 if (sysconf(_SC_PAGESIZE) >= 4096) {
628 void *map_buf = mmap(NULL, length, PROT_READ | PROT_WRITE,
629 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
630 if (map_buf != MAP_FAILED) {
632 mmapped_allocation_ = true;
633 logprintf(0, "Log: Using mmap() allocation at %p.\n", buf);
636 if (!mmapped_allocation_) {
637 // Use memalign to ensure that blocks are aligned enough for disk direct
639 buf = static_cast<char*>(memalign(4096, length));
641 logprintf(0, "Log: Using memaligned allocation at %p.\n", buf);
643 logprintf(0, "Process Error: memalign returned 0\n");
644 if ((length >= 1499LL * kMegabyte) && (address_mode_ == 32)) {
645 logprintf(0, "Log: You are trying to allocate > 1.4G on a 32 "
646 "bit process. Please setup shared memory.\n");
653 if (buf || dynamic_mapped_shmem_) {
654 testmemsize_ = length;
659 return (buf != 0) || dynamic_mapped_shmem_;
662 // Free the test memory.
663 void OsLayer::FreeTestMem() {
665 if (use_hugepages_) {
666 #ifdef HAVE_SYS_SHM_H
668 shmctl(shmid_, IPC_RMID, NULL);
670 } else if (use_posix_shm_) {
671 if (!dynamic_mapped_shmem_) {
672 munmap(testmem_, testmemsize_);
675 } else if (mmapped_allocation_) {
676 munmap(testmem_, testmemsize_);
686 // Prepare the target memory. It may requre mapping in, or this may be a noop.
687 void *OsLayer::PrepareTestMem(uint64 offset, uint64 length) {
688 sat_assert((offset + length) <= testmemsize_);
689 if (dynamic_mapped_shmem_) {
690 // TODO(nsanders): Check if we can support MAP_NONBLOCK,
691 // and evaluate performance hit from not using it.
693 void * mapping = mmap64(NULL, length, PROT_READ | PROT_WRITE,
694 MAP_SHARED | MAP_NORESERVE | MAP_LOCKED | MAP_POPULATE,
697 void * mapping = mmap(NULL, length, PROT_READ | PROT_WRITE,
698 MAP_SHARED | MAP_NORESERVE | MAP_LOCKED | MAP_POPULATE,
701 if (mapping == MAP_FAILED) {
702 string errtxt = ErrorString(errno);
703 logprintf(0, "Process Error: PrepareTestMem mmap64(%llx, %llx) failed. "
705 offset, length, errtxt.c_str());
711 return reinterpret_cast<void*>(reinterpret_cast<char*>(testmem_) + offset);
714 // Release the test memory resources, if any.
715 void OsLayer::ReleaseTestMem(void *addr, uint64 offset, uint64 length) {
716 if (dynamic_mapped_shmem_) {
717 int retval = munmap(addr, length);
719 string errtxt = ErrorString(errno);
720 logprintf(0, "Process Error: ReleaseTestMem munmap(%p, %llx) failed. "
722 addr, length, errtxt.c_str());
728 // No error polling on unknown systems.
729 int OsLayer::ErrorPoll() {
733 // Generally, poll for errors once per second.
734 void OsLayer::ErrorWait() {
739 // Open a PCI bus-dev-func as a file and return its file descriptor.
740 // Error is indicated by return value less than zero.
741 int OsLayer::PciOpen(int bus, int device, int function) {
744 snprintf(dev_file, sizeof(dev_file), "/proc/bus/pci/%02x/%02x.%x",
745 bus, device, function);
747 int fd = open(dev_file, O_RDWR);
749 logprintf(0, "Process Error: Unable to open PCI bus %d, device %d, "
750 "function %d (errno %d).\n",
751 bus, device, function, errno);
759 // Read and write functions to access PCI config.
760 uint32 OsLayer::PciRead(int fd, uint32 offset, int width) {
761 // Strict aliasing rules lawyers will cause data corruption
762 // on cast pointers in some gccs.
769 uint32 size = width / 8;
771 sat_assert((width == 32) || (width == 16) || (width == 8));
772 sat_assert(offset <= (256 - size));
774 if (lseek(fd, offset, SEEK_SET) < 0) {
775 logprintf(0, "Process Error: Can't seek %x\n", offset);
778 if (read(fd, &datacast, size) != static_cast<ssize_t>(size)) {
779 logprintf(0, "Process Error: Can't read %x\n", offset);
786 sat_assert(&(datacast.l8) == reinterpret_cast<uint8*>(&datacast));
789 sat_assert(&(datacast.l16) == reinterpret_cast<uint16*>(&datacast));
797 void OsLayer::PciWrite(int fd, uint32 offset, uint32 value, int width) {
798 // Strict aliasing rules lawyers will cause data corruption
799 // on cast pointers in some gccs.
806 uint32 size = width / 8;
808 sat_assert((width == 32) || (width == 16) || (width == 8));
809 sat_assert(offset <= (256 - size));
811 // Cram the data into the right alignment.
814 sat_assert(&(datacast.l8) == reinterpret_cast<uint8*>(&datacast));
817 sat_assert(&(datacast.l16) == reinterpret_cast<uint16*>(&datacast));
818 datacast.l16 = value;
820 datacast.l32 = value;
823 if (lseek(fd, offset, SEEK_SET) < 0) {
824 logprintf(0, "Process Error: Can't seek %x\n", offset);
827 if (write(fd, &datacast, size) != static_cast<ssize_t>(size)) {
828 logprintf(0, "Process Error: Can't write %x to %x\n", datacast.l32, offset);
838 int OsLayer::OpenMSR(uint32 core, uint32 address) {
840 snprintf(buf, sizeof(buf), "/dev/cpu/%d/msr", core);
841 int fd = open(buf, O_RDWR);
845 uint32 pos = lseek(fd, address, SEEK_SET);
846 if (pos != address) {
848 logprintf(5, "Log: can't seek to msr %x, cpu %d\n", address, core);
855 bool OsLayer::ReadMSR(uint32 core, uint32 address, uint64 *data) {
856 int fd = OpenMSR(core, address);
860 // Read from the msr.
861 bool res = (sizeof(*data) == read(fd, data, sizeof(*data)));
864 logprintf(5, "Log: Failed to read msr %x core %d\n", address, core);
871 bool OsLayer::WriteMSR(uint32 core, uint32 address, uint64 *data) {
872 int fd = OpenMSR(core, address);
877 bool res = (sizeof(*data) == write(fd, data, sizeof(*data)));
880 logprintf(5, "Log: Failed to write msr %x core %d\n", address, core);
887 // Extract bits [n+len-1, n] from a 32 bit word.
888 // so GetBitField(0x0f00, 8, 4) == 0xf.
889 uint32 OsLayer::GetBitField(uint32 val, uint32 n, uint32 len) {
890 return (val >> n) & ((1<<len) - 1);
893 // Generic CPU stress workload that would work on any CPU/Platform.
894 // Float-point array moving average calculation.
895 bool OsLayer::CpuStressWorkload() {
896 double float_arr[100];
899 unsigned int seed = 12345;
902 // Initialize array with random numbers.
903 for (int i = 0; i < 100; i++) {
905 float_arr[i] = rand_r(&seed);
906 if (rand_r(&seed) % 2)
907 float_arr[i] *= -1.0;
910 float_arr[i] = rand(); // NOLINT
911 if (rand() % 2) // NOLINT
912 float_arr[i] *= -1.0;
916 // Calculate moving average.
917 for (int i = 0; i < 100000000; i++) {
919 (float_arr[i % 100] + float_arr[(i + 1) % 100] +
920 float_arr[(i + 99) % 100]) / 3;
921 sum += float_arr[i % 100];
924 // Artificial printf so the loops do not get optimized away.
926 logprintf(12, "Log: I'm Feeling Lucky!\n");