Mixe for Privacy and Anonymity in the Internet
atomicops.h
Go to the documentation of this file.
1 // ©2013-2016 Cameron Desrochers.
2 // Distributed under the simplified BSD license (see the license file that
3 // should have come with this header).
4 // Uses Jeff Preshing's semaphore implementation (under the terms of its
5 // separate zlib license, embedded below).
6 
7 #pragma once
8 
9 // Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
10 // of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
11 // Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
12 // Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
13 
14 #include <cassert>
15 #include <type_traits>
16 #include <cerrno>
17 #include <cstdint>
18 #include <ctime>
19 
20 // Platform detection
21 #if defined(__INTEL_COMPILER)
22 #define AE_ICC
23 #elif defined(_MSC_VER)
24 #define AE_VCPP
25 #elif defined(__GNUC__)
26 #define AE_GCC
27 #endif
28 
29 #if defined(_M_IA64) || defined(__ia64__)
30 #define AE_ARCH_IA64
31 #elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
32 #define AE_ARCH_X64
33 #elif defined(_M_IX86) || defined(__i386__)
34 #define AE_ARCH_X86
35 #elif defined(_M_PPC) || defined(__powerpc__)
36 #define AE_ARCH_PPC
37 #else
38 #define AE_ARCH_UNKNOWN
39 #endif
40 
41 
42 // AE_UNUSED
43 #define AE_UNUSED(x) ((void)x)
44 
45 // AE_NO_TSAN
46 #if defined(__has_feature)
47 #if __has_feature(thread_sanitizer)
48 #define AE_NO_TSAN __attribute__((no_sanitize("thread")))
49 #else
50 #define AE_NO_TSAN
51 #endif
52 #else
53 #define AE_NO_TSAN
54 #endif
55 
56 
57 // AE_FORCEINLINE
58 #if defined(AE_VCPP) || defined(AE_ICC)
59 #define AE_FORCEINLINE __forceinline
60 #elif defined(AE_GCC)
61 //#define AE_FORCEINLINE __attribute__((always_inline))
62 #define AE_FORCEINLINE inline
63 #else
64 #define AE_FORCEINLINE inline
65 #endif
66 
67 
68 // AE_ALIGN
69 #if defined(AE_VCPP) || defined(AE_ICC)
70 #define AE_ALIGN(x) __declspec(align(x))
71 #elif defined(AE_GCC)
72 #define AE_ALIGN(x) __attribute__((aligned(x)))
73 #else
74 // Assume GCC compliant syntax...
75 #define AE_ALIGN(x) __attribute__((aligned(x)))
76 #endif
77 
78 
79 // Portable atomic fences implemented below:
80 
81 
88 
89  // memory_order_sync: Forces a full sync:
90  // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
92 };
93 
94 
95 #if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || (defined(AE_ICC) && __INTEL_COMPILER < 1600)
96 // VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
97 
98 #include <intrin.h>
99 
100 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
101 #define AeFullSync _mm_mfence
102 #define AeLiteSync _mm_mfence
103 #elif defined(AE_ARCH_IA64)
104 #define AeFullSync __mf
105 #define AeLiteSync __mf
106 #elif defined(AE_ARCH_PPC)
107 #include <ppcintrinsics.h>
108 #define AeFullSync __sync
109 #define AeLiteSync __lwsync
110 #endif
111 
112 
113 #ifdef AE_VCPP
114 #pragma warning(push)
115 #pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
116 #ifdef __cplusplus_cli
117 #pragma managed(push, off)
118 #endif
119 #endif
120 
121 namespace moodycamel {
122 
124 {
125  switch (order) {
126  case memory_order_relaxed: break;
127  case memory_order_acquire: _ReadBarrier(); break;
128  case memory_order_release: _WriteBarrier(); break;
129  case memory_order_acq_rel: _ReadWriteBarrier(); break;
130  case memory_order_seq_cst: _ReadWriteBarrier(); break;
131  default: assert(false);
132  }
133 }
134 
135 // x86/x64 have a strong memory model -- all loads and stores have
136 // acquire and release semantics automatically (so only need compiler
137 // barriers for those).
138 #if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
140 {
141  switch (order) {
142  case memory_order_relaxed: break;
143  case memory_order_acquire: _ReadBarrier(); break;
144  case memory_order_release: _WriteBarrier(); break;
145  case memory_order_acq_rel: _ReadWriteBarrier(); break;
147  _ReadWriteBarrier();
148  AeFullSync();
149  _ReadWriteBarrier();
150  break;
151  default: assert(false);
152  }
153 }
154 #else
156 {
157  // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
158  switch (order) {
160  break;
162  _ReadBarrier();
163  AeLiteSync();
164  _ReadBarrier();
165  break;
167  _WriteBarrier();
168  AeLiteSync();
169  _WriteBarrier();
170  break;
172  _ReadWriteBarrier();
173  AeLiteSync();
174  _ReadWriteBarrier();
175  break;
177  _ReadWriteBarrier();
178  AeFullSync();
179  _ReadWriteBarrier();
180  break;
181  default: assert(false);
182  }
183 }
184 #endif
185 } // end namespace moodycamel
186 #else
187 // Use standard library of atomics
188 #include <atomic>
189 
190 
192 {
193  switch (order) {
194  case memory_order_relaxed: break;
195  case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
196  case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
197  case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
198  case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
199  default: assert(false);
200  }
201 }
202 
204 {
205  switch (order) {
206  case memory_order_relaxed: break;
207  case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire); break;
208  case memory_order_release: std::atomic_thread_fence(std::memory_order_release); break;
209  case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel); break;
210  case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst); break;
211  default: assert(false);
212  }
213 }
214 
215 
216 #endif
217 
218 
219 #if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
220 #define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
221 #endif
222 
223 #ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
224 #include <atomic>
225 #endif
226 #include <utility>
227 
228 // WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
229 // Provides basic support for atomic variables -- no memory ordering guarantees are provided.
230 // The guarantee of atomicity is only made for types that already have atomic load and store guarantees
231 // at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
232 template<typename T>
234 {
235 public:
237 #ifdef AE_VCPP
238 #pragma warning(push)
239 #pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
240 #endif
241  template<typename U> AE_NO_TSAN weak_atomic(U&& x) : value(std::forward<U>(x)) { }
242 #ifdef __cplusplus_cli
243  // Work around bug with universal reference/nullptr combination that only appears when /clr is on
244  AE_NO_TSAN weak_atomic(nullptr_t) : value(nullptr) { }
245 #endif
246  AE_NO_TSAN weak_atomic(weak_atomic const& other) : value(other.load()) { }
247  AE_NO_TSAN weak_atomic(weak_atomic&& other) : value(std::move(other.load())) { }
248 #ifdef AE_VCPP
249 #pragma warning(pop)
250 #endif
251 
252  AE_FORCEINLINE operator T() const AE_NO_TSAN { return load(); }
253 
254 
255 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
256  template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) AE_NO_TSAN { value = std::forward<U>(x); return *this; }
257  AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) AE_NO_TSAN { value = other.value; return *this; }
258 
259  AE_FORCEINLINE T load() const AE_NO_TSAN { return value; }
260 
262  {
263 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
264  if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
265 #if defined(_M_AMD64)
266  else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
267 #endif
268 #else
269 #error Unsupported platform
270 #endif
271  assert(false && "T must be either a 32 or 64 bit type");
272  return value;
273  }
274 
276  {
277 #if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
278  if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
279 #if defined(_M_AMD64)
280  else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
281 #endif
282 #else
283 #error Unsupported platform
284 #endif
285  assert(false && "T must be either a 32 or 64 bit type");
286  return value;
287  }
288 #else
289  template<typename U>
291  {
292  value.store(std::forward<U>(x), std::memory_order_relaxed);
293  return *this;
294  }
295 
297  {
299  return *this;
300  }
301 
303 
305  {
306  return value.fetch_add(increment, std::memory_order_acquire);
307  }
308 
310  {
311  return value.fetch_add(increment, std::memory_order_release);
312  }
313 #endif
314 
315 
316 private:
317 #ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
318  // No std::atomic support, but still need to circumvent compiler optimizations.
319  // `volatile` will make memory access slow, but is guaranteed to be reliable.
320  volatile T value;
321 #else
322  std::atomic<T> value;
323 #endif
324 };
325 
326 
327 
328 
329 // Portable single-producer, single-consumer semaphore below:
330 
331 #if defined(_WIN32)
332 // Avoid including windows.h in a header; we only need a handful of
333 // items, so we'll redeclare them here (this is relatively safe since
334 // the API generally has to remain stable between Windows versions).
335 // I know this is an ugly hack but it still beats polluting the global
336 // namespace with thousands of generic names or adding a .cpp for nothing.
337 extern "C" {
338  struct _SECURITY_ATTRIBUTES;
339  __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
340  __declspec(dllimport) int __stdcall CloseHandle(void* hObject);
341  __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
342  __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
343 }
344 #elif defined(__MACH__)
345 #include <mach/mach.h>
346 #elif defined(__unix__)
347 #include <semaphore.h>
348 #endif
349 
350  // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
351  // portable + lightweight semaphore implementations, originally from
352  // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
353  // LICENSE:
354  // Copyright (c) 2015 Jeff Preshing
355  //
356  // This software is provided 'as-is', without any express or implied
357  // warranty. In no event will the authors be held liable for any damages
358  // arising from the use of this software.
359  //
360  // Permission is granted to anyone to use this software for any purpose,
361  // including commercial applications, and to alter it and redistribute it
362  // freely, subject to the following restrictions:
363  //
364  // 1. The origin of this software must not be misrepresented; you must not
365  // claim that you wrote the original software. If you use this software
366  // in a product, an acknowledgement in the product documentation would be
367  // appreciated but is not required.
368  // 2. Altered source versions must be plainly marked as such, and must not be
369  // misrepresented as being the original software.
370  // 3. This notice may not be removed or altered from any source distribution.
371  namespace spsc_sema
372  {
373 #if defined(_WIN32)
374  class Semaphore
375  {
376  private:
377  void* m_hSema;
378 
379  Semaphore(const Semaphore& other);
380  Semaphore& operator=(const Semaphore& other);
381 
382  public:
383  AE_NO_TSAN Semaphore(int initialCount = 0)
384  {
385  assert(initialCount >= 0);
386  const long maxLong = 0x7fffffff;
387  m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
388  }
389 
390  AE_NO_TSAN ~Semaphore()
391  {
392  CloseHandle(m_hSema);
393  }
394 
395  void wait() AE_NO_TSAN
396  {
397  const unsigned long infinite = 0xffffffff;
398  WaitForSingleObject(m_hSema, infinite);
399  }
400 
401  bool try_wait() AE_NO_TSAN
402  {
403  const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
404  return WaitForSingleObject(m_hSema, 0) != RC_WAIT_TIMEOUT;
405  }
406 
407  bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
408  {
409  const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
410  return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) != RC_WAIT_TIMEOUT;
411  }
412 
413  void signal(int count = 1) AE_NO_TSAN
414  {
415  ReleaseSemaphore(m_hSema, count, nullptr);
416  }
417  };
418 #elif defined(__MACH__)
419  //---------------------------------------------------------
420  // Semaphore (Apple iOS and OSX)
421  // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
422  //---------------------------------------------------------
423  class Semaphore
424  {
425  private:
426  semaphore_t m_sema;
427 
428  Semaphore(const Semaphore& other);
429  Semaphore& operator=(const Semaphore& other);
430 
431  public:
432  AE_NO_TSAN Semaphore(int initialCount = 0)
433  {
434  assert(initialCount >= 0);
435  semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
436  }
437 
438  AE_NO_TSAN ~Semaphore()
439  {
440  semaphore_destroy(mach_task_self(), m_sema);
441  }
442 
443  void wait() AE_NO_TSAN
444  {
445  semaphore_wait(m_sema);
446  }
447 
448  bool try_wait() AE_NO_TSAN
449  {
450  return timed_wait(0);
451  }
452 
453  bool timed_wait(std::int64_t timeout_usecs) AE_NO_TSAN
454  {
455  mach_timespec_t ts;
456  ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
457  ts.tv_nsec = (timeout_usecs % 1000000) * 1000;
458 
459  // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
460  kern_return_t rc = semaphore_timedwait(m_sema, ts);
461 
462  return rc != KERN_OPERATION_TIMED_OUT && rc != KERN_ABORTED;
463  }
464 
465  void signal() AE_NO_TSAN
466  {
467  semaphore_signal(m_sema);
468  }
469 
470  void signal(int count) AE_NO_TSAN
471  {
472  while (count-- > 0)
473  {
474  semaphore_signal(m_sema);
475  }
476  }
477  };
478 #elif defined(__unix__)
479  //---------------------------------------------------------
480  // Semaphore (POSIX, Linux)
481  //---------------------------------------------------------
482  class Semaphore
483  {
484  private:
485  sem_t m_sema;
486 
487  Semaphore(const Semaphore& other);
488  Semaphore& operator=(const Semaphore& other);
489 
490  public:
491  AE_NO_TSAN Semaphore(int initialCount = 0)
492  {
493  assert(initialCount >= 0);
494  sem_init(&m_sema, 0, initialCount);
495  }
496 
497  AE_NO_TSAN ~Semaphore()
498  {
499  sem_destroy(&m_sema);
500  }
501 
502  void wait() AE_NO_TSAN
503  {
504  // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
505  int rc;
506  do
507  {
508  rc = sem_wait(&m_sema);
509  }
510  while (rc == -1 && errno == EINTR);
511  }
512 
513  bool try_wait() AE_NO_TSAN
514  {
515  int rc;
516  do {
517  rc = sem_trywait(&m_sema);
518  } while (rc == -1 && errno == EINTR);
519  return !(rc == -1 && errno == EAGAIN);
520  }
521 
522  bool timed_wait(std::uint64_t usecs) AE_NO_TSAN
523  {
524  struct timespec ts;
525  const int usecs_in_1_sec = 1000000;
526  const int nsecs_in_1_sec = 1000000000;
527  clock_gettime(CLOCK_REALTIME, &ts);
528  ts.tv_sec += usecs / usecs_in_1_sec;
529  ts.tv_nsec += (usecs % usecs_in_1_sec) * 1000;
530  // sem_timedwait bombs if you have more than 1e9 in tv_nsec
531  // so we have to clean things up before passing it in
532  if (ts.tv_nsec >= nsecs_in_1_sec) {
533  ts.tv_nsec -= nsecs_in_1_sec;
534  ++ts.tv_sec;
535  }
536 
537  int rc;
538  do {
539  rc = sem_timedwait(&m_sema, &ts);
540  } while (rc == -1 && errno == EINTR);
541  return !(rc == -1 && errno == ETIMEDOUT);
542  }
543 
544  void signal() AE_NO_TSAN
545  {
546  sem_post(&m_sema);
547  }
548 
549  void signal(int count) AE_NO_TSAN
550  {
551  while (count-- > 0)
552  {
553  sem_post(&m_sema);
554  }
555  }
556  };
557 #else
558 #error Unsupported platform! (No semaphore wrapper available)
559 #endif
560 
561  //---------------------------------------------------------
562  // LightweightSemaphore
563  //---------------------------------------------------------
565  {
566  public:
568 
569  private:
571  Semaphore m_sema;
572 
573  bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1) AE_NO_TSAN
574  {
575  ssize_t oldCount;
576  // Is there a better way to set the initial spin count?
577  // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
578  // as threads start hitting the kernel semaphore.
579  int spin = 10000;
580  while (--spin >= 0)
581  {
582  if (m_count.load() > 0)
583  {
585  return true;
586  }
587  compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
588  }
589  oldCount = m_count.fetch_add_acquire(-1);
590  if (oldCount > 0)
591  return true;
592  if (timeout_usecs < 0)
593  {
594  m_sema.wait();
595  return true;
596  }
597  if (m_sema.timed_wait(timeout_usecs))
598  return true;
599  // At this point, we've timed out waiting for the semaphore, but the
600  // count is still decremented indicating we may still be waiting on
601  // it. So we have to re-adjust the count, but only if the semaphore
602  // wasn't signaled enough times for us too since then. If it was, we
603  // need to release the semaphore too.
604  while (true)
605  {
606  oldCount = m_count.fetch_add_release(1);
607  if (oldCount < 0)
608  return false; // successfully restored things to the way they were
609  // Oh, the producer thread just signaled the semaphore after all. Try again:
610  oldCount = m_count.fetch_add_acquire(-1);
611  if (oldCount > 0 && m_sema.try_wait())
612  return true;
613  }
614  }
615 
616  public:
617  AE_NO_TSAN LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
618  {
619  assert(initialCount >= 0);
620  }
621 
623  {
624  if (m_count.load() > 0)
625  {
627  return true;
628  }
629  return false;
630  }
631 
633  {
634  if (!tryWait())
636  }
637 
638  bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
639  {
640  return tryWait() || waitWithPartialSpinning(timeout_usecs);
641  }
642 
643  void signal(ssize_t count = 1) AE_NO_TSAN
644  {
645  assert(count >= 0);
646  ssize_t oldCount = m_count.fetch_add_release(count);
647  assert(oldCount >= -1);
648  if (oldCount < 0)
649  {
650  m_sema.signal(1);
651  }
652  }
653 
655  {
656  ssize_t count = m_count.load();
657  return count > 0 ? count : 0;
658  }
659  };
660  } // end namespace spsc_sema
661 
662 #if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
663 #pragma warning(pop)
664 #ifdef __cplusplus_cli
665 #pragma managed(pop)
666 #endif
667 #endif
memory_order
Definition: atomicops.h:82
@ memory_order_seq_cst
Definition: atomicops.h:87
@ memory_order_sync
Definition: atomicops.h:91
@ memory_order_release
Definition: atomicops.h:85
@ memory_order_relaxed
Definition: atomicops.h:83
@ memory_order_acq_rel
Definition: atomicops.h:86
@ memory_order_acquire
Definition: atomicops.h:84
AE_FORCEINLINE void fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:203
AE_FORCEINLINE void compiler_fence(memory_order order) AE_NO_TSAN
Definition: atomicops.h:191
#define AE_FORCEINLINE
Definition: atomicops.h:64
#define AE_NO_TSAN
Definition: atomicops.h:53
std::make_signed< std::size_t >::type ssize_t
Definition: atomicops.h:567
bool tryWait() AE_NO_TSAN
Definition: atomicops.h:622
weak_atomic< ssize_t > m_count
Definition: atomicops.h:570
bool wait(std::int64_t timeout_usecs) AE_NO_TSAN
Definition: atomicops.h:638
AE_NO_TSAN LightweightSemaphore(ssize_t initialCount=0)
Definition: atomicops.h:617
ssize_t availableApprox() const AE_NO_TSAN
Definition: atomicops.h:654
void signal(ssize_t count=1) AE_NO_TSAN
Definition: atomicops.h:643
bool waitWithPartialSpinning(std::int64_t timeout_usecs=-1) AE_NO_TSAN
Definition: atomicops.h:573
AE_FORCEINLINE T fetch_add_acquire(T increment) AE_NO_TSAN
Definition: atomicops.h:304
AE_FORCEINLINE T fetch_add_release(T increment) AE_NO_TSAN
Definition: atomicops.h:309
AE_NO_TSAN weak_atomic(U &&x)
Definition: atomicops.h:241
std::atomic< T > value
Definition: atomicops.h:322
AE_FORCEINLINE weak_atomic const & operator=(weak_atomic const &other) AE_NO_TSAN
Definition: atomicops.h:296
AE_FORCEINLINE T load() const AE_NO_TSAN
Definition: atomicops.h:302
AE_NO_TSAN weak_atomic(weak_atomic const &other)
Definition: atomicops.h:246
AE_FORCEINLINE weak_atomic const & operator=(U &&x) AE_NO_TSAN
Definition: atomicops.h:290
AE_NO_TSAN weak_atomic()
Definition: atomicops.h:236
AE_NO_TSAN weak_atomic(weak_atomic &&other)
Definition: atomicops.h:247
UINT8 type
Definition: typedefs.hpp:1