diff --git a/.gitignore b/.gitignore
index c024df33..8d148cdc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
tags
+cscope.out
**/*.swp
**/*.swo
.swp
diff --git a/benchmark/source/BenchmarkVector.cpp b/benchmark/source/BenchmarkVector.cpp
index 80cfbce8..93315309 100644
--- a/benchmark/source/BenchmarkVector.cpp
+++ b/benchmark/source/BenchmarkVector.cpp
@@ -56,16 +56,14 @@ namespace
return *this;
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData)
- { x.mpData = NULL; }
+ MovableType(MovableType&& x) EA_NOEXCEPT : mpData(x.mpData)
+ { x.mpData = NULL; }
- MovableType& operator=(MovableType&& x)
- {
- eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case.
- return *this;
- }
- #endif
+ MovableType& operator=(MovableType&& x)
+ {
+ eastl::swap(mpData, x.mpData); // In practice it may not be right to do a swap, depending on the case.
+ return *this;
+ }
~MovableType()
{ delete[] mpData; }
@@ -123,21 +121,19 @@ namespace
return *this;
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject)
- {
- x.mpObject = NULL;
- }
+ AutoRefCount(AutoRefCount&& x) EA_NOEXCEPT : mpObject(x.mpObject)
+ {
+ x.mpObject = NULL;
+ }
- AutoRefCount& operator=(AutoRefCount&& x)
- {
- if(mpObject)
- mpObject->Release();
- mpObject = x.mpObject;
- x.mpObject = NULL;
- return *this;
- }
- #endif
+ AutoRefCount& operator=(AutoRefCount&& x)
+ {
+ if(mpObject)
+ mpObject->Release();
+ mpObject = x.mpObject;
+ x.mpObject = NULL;
+ return *this;
+ }
~AutoRefCount()
{
diff --git a/build.sh b/build.sh
new file mode 100755
index 00000000..20dd7c17
--- /dev/null
+++ b/build.sh
@@ -0,0 +1,15 @@
+build_folder=build
+mkdir $build_folder
+pushd $build_folder
+cmake .. -DEASTL_BUILD_TESTS:BOOL=ON -DEASTL_BUILD_BENCHMARK:BOOL=OFF
+cmake --build . --config Release
+# cmake --build . --config Debug
+# cmake --build . --config RelWithDebInfo
+# cmake --build . --config MinSizeRel
+pushd test
+ctest -C Release
+# ctest -C Debug
+# ctest -C RelWithDebInfo
+# ctest -C MinSizeRel
+popd
+popd
diff --git a/doc/EASTL.natvis b/doc/EASTL.natvis
index 836878b1..5b78aa80 100644
--- a/doc/EASTL.natvis
+++ b/doc/EASTL.natvis
@@ -454,5 +454,13 @@
({mFirst}, {mSecond})
+
+
+ nullopt
+ {value()}
+
+ - value()
+
+
diff --git a/include/EASTL/algorithm.h b/include/EASTL/algorithm.h
index 7710ddbd..dc5202a0 100644
--- a/include/EASTL/algorithm.h
+++ b/include/EASTL/algorithm.h
@@ -1158,27 +1158,25 @@ namespace eastl
/// Rand randInstance;
/// shuffle(pArrayBegin, pArrayEnd, randInstance);
///
- #if EASTL_MOVE_SEMANTICS_ENABLED
- // See the C++11 Standard, 26.5.1.3, Uniform random number generator requirements.
- // Also http://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution
+ // See the C++11 Standard, 26.5.1.3, Uniform random number generator requirements.
+ // Also http://en.cppreference.com/w/cpp/numeric/random/uniform_int_distribution
- template
- void shuffle(RandomAccessIterator first, RandomAccessIterator last, UniformRandomNumberGenerator&& urng)
+ template
+ void shuffle(RandomAccessIterator first, RandomAccessIterator last, UniformRandomNumberGenerator&& urng)
+ {
+ if(first != last)
{
- if(first != last)
- {
- typedef typename eastl::iterator_traits::difference_type difference_type;
- typedef typename eastl::make_unsigned::type unsigned_difference_type;
- typedef typename eastl::uniform_int_distribution uniform_int_distribution;
- typedef typename uniform_int_distribution::param_type uniform_int_distribution_param_type;
+ typedef typename eastl::iterator_traits::difference_type difference_type;
+ typedef typename eastl::make_unsigned::type unsigned_difference_type;
+ typedef typename eastl::uniform_int_distribution uniform_int_distribution;
+ typedef typename uniform_int_distribution::param_type uniform_int_distribution_param_type;
- uniform_int_distribution uid;
+ uniform_int_distribution uid;
- for(RandomAccessIterator i = first + 1; i != last; ++i)
- iter_swap(i, first + uid(urng, uniform_int_distribution_param_type(0, i - first)));
- }
+ for(RandomAccessIterator i = first + 1; i != last; ++i)
+ iter_swap(i, first + uid(urng, uniform_int_distribution_param_type(0, i - first)));
}
- #endif
+ }
/// random_shuffle
@@ -1199,23 +1197,18 @@ namespace eastl
/// Rand randInstance;
/// random_shuffle(pArrayBegin, pArrayEnd, randInstance);
///
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator&& rng)
- #else
- template
- inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator& rng)
- #endif
- {
- typedef typename eastl::iterator_traits::difference_type difference_type;
+ template
+ inline void random_shuffle(RandomAccessIterator first, RandomAccessIterator last, RandomNumberGenerator&& rng)
+ {
+ typedef typename eastl::iterator_traits::difference_type difference_type;
- // We must do 'rand((i - first) + 1)' here and cannot do 'rand(last - first)',
- // as it turns out that the latter results in unequal distribution probabilities.
- // http://www.cigital.com/papers/download/developer_gambling.php
+ // We must do 'rand((i - first) + 1)' here and cannot do 'rand(last - first)',
+ // as it turns out that the latter results in unequal distribution probabilities.
+ // http://www.cigital.com/papers/download/developer_gambling.php
- for(RandomAccessIterator i = first + 1; i < last; ++i)
- iter_swap(i, first + (difference_type)rng((eastl_size_t)((i - first) + 1)));
- }
+ for(RandomAccessIterator i = first + 1; i < last; ++i)
+ iter_swap(i, first + (difference_type)rng((eastl_size_t)((i - first) + 1)));
+ }
/// random_shuffle
@@ -1997,8 +1990,7 @@ namespace eastl
/// We should verify that such a thing results in an improvement.
///
template
- inline bool
- equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)
+ EA_CPP14_CONSTEXPR inline bool equal(InputIterator1 first1, InputIterator1 last1, InputIterator2 first2)
{
for(; first1 != last1; ++first1, ++first2)
{
diff --git a/include/EASTL/allocator.h b/include/EASTL/allocator.h
index a52948d5..ad20e4d8 100644
--- a/include/EASTL/allocator.h
+++ b/include/EASTL/allocator.h
@@ -12,11 +12,6 @@
#include
-#ifdef _MSC_VER
- #pragma warning(push)
- #pragma warning(disable: 4189) // local variable is initialized but not referenced
-#endif
-
#if defined(EA_PRAGMA_ONCE_SUPPORTED)
#pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
#endif
@@ -167,14 +162,9 @@ namespace eastl
#ifndef EASTL_USER_DEFINED_ALLOCATOR // If the user hasn't declared that he has defined a different allocator implementation elsewhere...
- #ifdef _MSC_VER
- #pragma warning(push, 0)
- #pragma warning(disable: 4265 4365 4836 4548)
- #include
- #pragma warning(pop)
- #else
- #include
- #endif
+ EA_DISABLE_ALL_VC_WARNINGS()
+ #include
+ EA_RESTORE_ALL_VC_WARNINGS()
#if !EASTL_DLL // If building a regular library and not building EASTL as a DLL...
// It is expected that the application define the following
@@ -376,17 +366,15 @@ namespace eastl
{
result = EASTLAllocAligned(a, n, alignment, alignmentOffset);
// Ensure the result is correctly aligned. An assertion here may indicate a bug in the allocator.
- EASTL_ASSERT((reinterpret_cast(result)& ~(alignment - 1)) == reinterpret_cast(result));
+ auto resultMinusOffset = (char*)result - alignmentOffset;
+ EA_UNUSED(resultMinusOffset);
+ EASTL_ASSERT((reinterpret_cast(resultMinusOffset)& ~(alignment - 1)) == reinterpret_cast(resultMinusOffset));
}
return result;
}
}
-#ifdef _MSC_VER
- #pragma warning(pop)
-#endif
-
#endif // Header include guard
diff --git a/include/EASTL/array.h b/include/EASTL/array.h
index bc07948d..4158d90e 100644
--- a/include/EASTL/array.h
+++ b/include/EASTL/array.h
@@ -23,13 +23,9 @@
#include
#if EASTL_EXCEPTIONS_ENABLED
- #ifdef _MSC_VER
- #pragma warning(push, 0)
- #endif
+ EA_DISABLE_ALL_VC_WARNINGS()
#include // std::out_of_range, std::length_error.
- #ifdef _MSC_VER
- #pragma warning(pop)
- #endif
+ EA_RESTORE_ALL_VC_WARNINGS()
#endif
#if defined(EA_PRAGMA_ONCE_SUPPORTED)
@@ -93,39 +89,39 @@ namespace eastl
// may exit via an exception, and does not cause iterators to become associated with the other container.
void swap(this_type& x) EA_NOEXCEPT_IF(eastl::is_nothrow_swappable::value);
- iterator begin() EA_NOEXCEPT;
- const_iterator begin() const EA_NOEXCEPT;
- const_iterator cbegin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR iterator begin() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator begin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator cbegin() const EA_NOEXCEPT;
- iterator end() EA_NOEXCEPT;
- const_iterator end() const EA_NOEXCEPT;
- const_iterator cend() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR iterator end() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator end() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_iterator cend() const EA_NOEXCEPT;
- reverse_iterator rbegin() EA_NOEXCEPT;
- const_reverse_iterator rbegin() const EA_NOEXCEPT;
- const_reverse_iterator crbegin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR reverse_iterator rbegin() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator rbegin() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator crbegin() const EA_NOEXCEPT;
- reverse_iterator rend() EA_NOEXCEPT;
- const_reverse_iterator rend() const EA_NOEXCEPT;
- const_reverse_iterator crend() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR reverse_iterator rend() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator rend() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const_reverse_iterator crend() const EA_NOEXCEPT;
- bool empty() const EA_NOEXCEPT;
- size_type size() const EA_NOEXCEPT;
- size_type max_size() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR bool empty() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR size_type size() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR size_type max_size() const EA_NOEXCEPT;
- T* data() EA_NOEXCEPT;
- const T* data() const EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR T* data() EA_NOEXCEPT;
+ EA_CPP14_CONSTEXPR const T* data() const EA_NOEXCEPT;
- reference operator[](size_type i);
- const_reference operator[](size_type i) const;
- const_reference at(size_type i) const;
- reference at(size_type i);
+ EA_CPP14_CONSTEXPR reference operator[](size_type i);
+ EA_CPP14_CONSTEXPR const_reference operator[](size_type i) const;
+ EA_CPP14_CONSTEXPR const_reference at(size_type i) const;
+ EA_CPP14_CONSTEXPR reference at(size_type i);
- reference front();
- const_reference front() const;
+ EA_CPP14_CONSTEXPR reference front();
+ EA_CPP14_CONSTEXPR const_reference front() const;
- reference back();
- const_reference back() const;
+ EA_CPP14_CONSTEXPR reference back();
+ EA_CPP14_CONSTEXPR const_reference back() const;
bool validate() const;
int validate_iterator(const_iterator i) const;
@@ -155,7 +151,7 @@ namespace eastl
template
- inline typename array::iterator
+ EA_CPP14_CONSTEXPR inline typename array::iterator
array::begin() EA_NOEXCEPT
{
return &mValue[0];
@@ -163,7 +159,7 @@ namespace eastl
template
- inline typename array::const_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_iterator
array::begin() const EA_NOEXCEPT
{
return &mValue[0];
@@ -171,7 +167,7 @@ namespace eastl
template
- inline typename array::const_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_iterator
array::cbegin() const EA_NOEXCEPT
{
return &mValue[0];
@@ -179,7 +175,7 @@ namespace eastl
template
- inline typename array::iterator
+ EA_CPP14_CONSTEXPR inline typename array::iterator
array::end() EA_NOEXCEPT
{
return &mValue[N];
@@ -187,7 +183,7 @@ namespace eastl
template
- inline typename array::const_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_iterator
array::end() const EA_NOEXCEPT
{
return &mValue[N];
@@ -195,7 +191,7 @@ namespace eastl
template
- inline typename array::const_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_iterator
array::cend() const EA_NOEXCEPT
{
return &mValue[N];
@@ -203,7 +199,7 @@ namespace eastl
template
- inline typename array::reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::reverse_iterator
array::rbegin() EA_NOEXCEPT
{
return reverse_iterator(&mValue[N]);
@@ -211,7 +207,7 @@ namespace eastl
template
- inline typename array::const_reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator
array::rbegin() const EA_NOEXCEPT
{
return const_reverse_iterator(&mValue[N]);
@@ -219,7 +215,7 @@ namespace eastl
template
- inline typename array::const_reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator
array::crbegin() const EA_NOEXCEPT
{
return const_reverse_iterator(&mValue[N]);
@@ -227,7 +223,7 @@ namespace eastl
template
- inline typename array::reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::reverse_iterator
array::rend() EA_NOEXCEPT
{
return reverse_iterator(&mValue[0]);
@@ -235,23 +231,23 @@ namespace eastl
template
- inline typename array::const_reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator
array::rend() const EA_NOEXCEPT
{
- return const_reverse_iterator(reinterpret_cast(&mValue[0]));
+ return const_reverse_iterator(static_cast(&mValue[0]));
}
template
- inline typename array::const_reverse_iterator
+ EA_CPP14_CONSTEXPR inline typename array::const_reverse_iterator
array::crend() const EA_NOEXCEPT
{
- return const_reverse_iterator(reinterpret_cast(&mValue[0]));
+ return const_reverse_iterator(static_cast(&mValue[0]));
}
template
- inline typename array::size_type
+ EA_CPP14_CONSTEXPR inline typename array::size_type
array::size() const EA_NOEXCEPT
{
return (size_type)N;
@@ -259,7 +255,7 @@ namespace eastl
template
- inline typename array::size_type
+ EA_CPP14_CONSTEXPR inline typename array::size_type
array::max_size() const EA_NOEXCEPT
{
return (size_type)N;
@@ -267,14 +263,14 @@ namespace eastl
template
- inline bool array::empty() const EA_NOEXCEPT
+ EA_CPP14_CONSTEXPR inline bool array::empty() const EA_NOEXCEPT
{
return (N == 0);
}
template
- inline typename array::reference
+ EA_CPP14_CONSTEXPR inline typename array::reference
array::operator[](size_type i)
{
#if EASTL_ASSERT_ENABLED
@@ -288,7 +284,7 @@ namespace eastl
template
- inline typename array::const_reference
+ EA_CPP14_CONSTEXPR inline typename array::const_reference
array::operator[](size_type i) const
{
#if EASTL_ASSERT_ENABLED
@@ -303,7 +299,7 @@ namespace eastl
template
- inline typename array::reference
+ EA_CPP14_CONSTEXPR inline typename array::reference
array::front()
{
#if EASTL_ASSERT_ENABLED
@@ -316,7 +312,7 @@ namespace eastl
template
- inline typename array::const_reference
+ EA_CPP14_CONSTEXPR inline typename array::const_reference
array::front() const
{
#if EASTL_ASSERT_ENABLED
@@ -329,7 +325,7 @@ namespace eastl
template
- inline typename array::reference
+ EA_CPP14_CONSTEXPR inline typename array::reference
array::back()
{
#if EASTL_ASSERT_ENABLED
@@ -342,7 +338,7 @@ namespace eastl
template
- inline typename array::const_reference
+ EA_CPP14_CONSTEXPR inline typename array::const_reference
array::back() const
{
#if EASTL_ASSERT_ENABLED
@@ -355,23 +351,21 @@ namespace eastl
template
- inline T* array::data() EA_NOEXCEPT
+ EA_CPP14_CONSTEXPR inline T* array::data() EA_NOEXCEPT
{
return mValue;
}
template
- inline const T*
- array::data() const EA_NOEXCEPT
+ EA_CPP14_CONSTEXPR inline const T* array::data() const EA_NOEXCEPT
{
return mValue;
}
template
- inline typename array::const_reference
- array::at(size_type i) const
+ EA_CPP14_CONSTEXPR inline typename array::const_reference array::at(size_type i) const
{
#if EASTL_EXCEPTIONS_ENABLED
if(EASTL_UNLIKELY(i >= N))
@@ -382,13 +376,12 @@ namespace eastl
#endif
EA_ANALYSIS_ASSUME(i < N);
- return reinterpret_cast(mValue[i]);
+ return static_cast(mValue[i]);
}
template
- inline typename array::reference
- array::at(size_type i)
+ EA_CPP14_CONSTEXPR inline typename array::reference array::at(size_type i)
{
#if EASTL_EXCEPTIONS_ENABLED
if(EASTL_UNLIKELY(i >= N))
@@ -399,7 +392,7 @@ namespace eastl
#endif
EA_ANALYSIS_ASSUME(i < N);
- return reinterpret_cast(mValue[i]);
+ return static_cast(mValue[i]);
}
@@ -432,42 +425,42 @@ namespace eastl
///////////////////////////////////////////////////////////////////////
template
- inline bool operator==(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator==(const array& a, const array& b)
{
return eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]);
}
template
- inline bool operator<(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator<(const array& a, const array& b)
{
return eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]);
}
template
- inline bool operator!=(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator!=(const array& a, const array& b)
{
return !eastl::equal(&a.mValue[0], &a.mValue[N], &b.mValue[0]);
}
template
- inline bool operator>(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator>(const array& a, const array& b)
{
return eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]);
}
template
- inline bool operator<=(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator<=(const array& a, const array& b)
{
return !eastl::lexicographical_compare(&b.mValue[0], &b.mValue[N], &a.mValue[0], &a.mValue[N]);
}
template
- inline bool operator>=(const array& a, const array& b)
+ EA_CPP14_CONSTEXPR inline bool operator>=(const array& a, const array& b)
{
return !eastl::lexicographical_compare(&a.mValue[0], &a.mValue[N], &b.mValue[0], &b.mValue[N]);
}
diff --git a/include/EASTL/bonus/list_map.h b/include/EASTL/bonus/list_map.h
index 1a07a603..0c05678f 100644
--- a/include/EASTL/bonus/list_map.h
+++ b/include/EASTL/bonus/list_map.h
@@ -212,10 +212,8 @@ namespace eastl
// To do: Implement the following:
//list_map(const this_type& x);
- //#if EASTL_MOVE_SEMANTICS_ENABLED
- // list_map(this_type&& x);
- // list_map(this_type&& x, const allocator_type& allocator);
- //#endif
+ //list_map(this_type&& x);
+ //list_map(this_type&& x, const allocator_type& allocator);
//list_map(std::initializer_list ilist, const Compare& compare = Compare(), const allocator_type& allocator = EASTL_LIST_MAP_DEFAULT_ALLOCATOR);
//template
@@ -223,10 +221,7 @@ namespace eastl
//this_type& operator=(const this_type& x);
//this_type& operator=(std::initializer_list ilist);
-
- //#if EASTL_MOVE_SEMANTICS_ENABLED
- // this_type& operator=(this_type&& x);
- //#endif
+ //this_type& operator=(this_type&& x);
//void swap(this_type& x);
diff --git a/include/EASTL/bonus/ring_buffer.h b/include/EASTL/bonus/ring_buffer.h
index 4f022510..fcd8fd2c 100644
--- a/include/EASTL/bonus/ring_buffer.h
+++ b/include/EASTL/bonus/ring_buffer.h
@@ -940,37 +940,73 @@ namespace eastl
}
*/
- template= EASTL_MAX_STACK_USAGE)>
- struct ContainerTemporary
+
+ namespace Internal
{
- ContainerTemporary(Container& parentContainer);
- Container& get();
- };
+ ///////////////////////////////////////////////////////////////
+ // has_overflow_allocator
+ //
+ // returns true_type when the specified container type is an
+ // eastl::fixed_* container and therefore has an overflow
+ // allocator type.
+ //
+ template
+ struct has_overflow_allocator : false_type {};
+
+ template
+ struct has_overflow_allocator().get_overflow_allocator())>> : true_type {};
- template
- struct ContainerTemporary
+ ///////////////////////////////////////////////////////////////
+ // GetFixedContainerCtorAllocator
+ //
+ // eastl::fixed_* containers are only constructible via their
+ // overflow allocator type. This helper select the appropriate
+ // allocator from the specified container.
+ //
+ template ()()>
+ struct GetFixedContainerCtorAllocator
+ {
+ auto& operator()(Container& c) { return c.get_overflow_allocator(); }
+ };
+
+ template
+ struct GetFixedContainerCtorAllocator
+ {
+ auto& operator()(Container& c) { return c.get_allocator(); }
+ };
+ } // namespace Internal
+
+
+ ///////////////////////////////////////////////////////////////
+ // ContainerTemporary
+ //
+ // Helper type which prevents utilizing excessive stack space
+ // when creating temporaries when swapping/copying the underlying
+ // ring_buffer container type.
+ //
+ template = EASTL_MAX_STACK_USAGE)>
+ struct ContainerTemporary
{
Container mContainer;
ContainerTemporary(Container& parentContainer)
- : mContainer(parentContainer.get_allocator())
+ : mContainer(Internal::GetFixedContainerCtorAllocator{}(parentContainer))
{
}
Container& get() { return mContainer; }
};
-
- template
+ template
struct ContainerTemporary
{
typename Container::allocator_type* mAllocator;
Container* mContainer;
ContainerTemporary(Container& parentContainer)
- : mAllocator(&parentContainer.get_allocator())
- , mContainer(new(mAllocator->allocate(sizeof(Container))) Container)
+ : mAllocator(&parentContainer.get_allocator())
+ , mContainer(new (mAllocator->allocate(sizeof(Container))) Container)
{
}
diff --git a/include/EASTL/bonus/tuple_vector.h b/include/EASTL/bonus/tuple_vector.h
index d9463037..1573f287 100644
--- a/include/EASTL/bonus/tuple_vector.h
+++ b/include/EASTL/bonus/tuple_vector.h
@@ -34,6 +34,10 @@
#pragma once // Some compilers (e.g. VC++) benefit significantly from using this. We've measured 3-4% build speed improvements in apps as a result.
#endif
+EA_DISABLE_VC_WARNING(4623) // warning C4623: default constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4625) // warning C4625: copy constructor was implicitly defined as deleted
+EA_DISABLE_VC_WARNING(4510) // warning C4510: default constructor could not be generated
+
namespace eastl
{
/// EASTL_TUPLE_VECTOR_DEFAULT_NAME
@@ -162,6 +166,14 @@ struct TupleRecurser<>
// This is fine, as our default ctor initializes with NULL pointers.
size_type alignment = TupleRecurser::GetTotalAlignment();
void* ptr = capacity ? allocate_memory(vec.internalAllocator(), offset, alignment, 0) : nullptr;
+
+ #if EASTL_ASSERT_ENABLED
+ if (EASTL_UNLIKELY((size_type)ptr & (alignment - 1)) != 0)
+ {
+ EASTL_FAIL_MSG("tuple_vector::DoAllocate -- memory not alignment at requested alignment");
+ }
+ #endif
+
return make_pair(ptr, offset);
}
@@ -1375,9 +1387,9 @@ class TupleVecImpl, Ts...> : public TupleV
} // namespace TupleVecInternal
- // Move_iterator specialization for TupleVecIter.
- // An rvalue reference of a move_iterator would normaly be "tuple &&" whereas
- // what we actually want is "tuple". This specialization gives us that.
+// Move_iterator specialization for TupleVecIter.
+// An rvalue reference of a move_iterator would normaly be "tuple &&" whereas
+// what we actually want is "tuple". This specialization gives us that.
template
class move_iterator, Ts...>>
{
@@ -1557,4 +1569,8 @@ class tuple_vector_alloc
} // namespace eastl
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+EA_RESTORE_VC_WARNING()
+
#endif // EASTL_TUPLEVECTOR_H
diff --git a/include/EASTL/core_allocator_adapter.h b/include/EASTL/core_allocator_adapter.h
index 30c431df..e0e8adc8 100644
--- a/include/EASTL/core_allocator_adapter.h
+++ b/include/EASTL/core_allocator_adapter.h
@@ -165,7 +165,6 @@ namespace EA
CoreDeleterAdapter(const CoreDeleterAdapter& in) { mpCoreAllocator = in.mpCoreAllocator; }
- #if EASTL_MOVE_SEMANTICS_ENABLED
CoreDeleterAdapter(CoreDeleterAdapter&& in)
{
mpCoreAllocator = in.mpCoreAllocator;
@@ -184,8 +183,6 @@ namespace EA
in.mpCoreAllocator = nullptr;
return *this;
}
- #endif
-
};
diff --git a/include/EASTL/deque.h b/include/EASTL/deque.h
index 69a3af4a..88fe5750 100644
--- a/include/EASTL/deque.h
+++ b/include/EASTL/deque.h
@@ -227,19 +227,19 @@ namespace eastl
T* mpEnd; // The end of the current subarray. To consider: remove this member, as it is always equal to 'mpBegin + kDequeSubarraySize'. Given that deque subarrays usually consist of hundreds of bytes, this isn't a massive win. Also, now that we are implementing a zero-allocation new deque policy, mpEnd may in fact not be equal to 'mpBegin + kDequeSubarraySize'.
T** mpCurrentArrayPtr; // Pointer to current subarray. We could alternatively implement this as a list node iterator if the deque used a linked list.
- struct Increment{ };
- struct Decrement{ };
- struct FromConst{};
+ struct Increment {};
+ struct Decrement {};
+ struct FromConst {};
DequeIterator(T** pCurrentArrayPtr, T* pCurrent);
DequeIterator(const const_iterator& x, FromConst) : mpCurrent(x.mpCurrent), mpBegin(x.mpBegin), mpEnd(x.mpEnd), mpCurrentArrayPtr(x.mpCurrentArrayPtr){}
DequeIterator(const iterator& x, Increment);
DequeIterator(const iterator& x, Decrement);
- this_type copy(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate,
+ this_type copy(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait is_move_assignable,
this_type copy(const iterator& first, const iterator& last, false_type); // false means it does not.
- void copy_backward(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait has_trivial_relocate,
+ void copy_backward(const iterator& first, const iterator& last, true_type); // true means that value_type has the type_trait is_move_assignable,
void copy_backward(const iterator& first, const iterator& last, false_type); // false means it does not.
void SetSubarray(T** pCurrentArrayPtr);
@@ -298,17 +298,17 @@ namespace eastl
void set_allocator(const allocator_type& allocator);
protected:
- T* DoAllocateSubarray();
- void DoFreeSubarray(T* p);
- void DoFreeSubarrays(T** pBegin, T** pEnd);
+ T* DoAllocateSubarray();
+ void DoFreeSubarray(T* p);
+ void DoFreeSubarrays(T** pBegin, T** pEnd);
- T** DoAllocatePtrArray(size_type n);
- void DoFreePtrArray(T** p, size_t n);
+ T** DoAllocatePtrArray(size_type n);
+ void DoFreePtrArray(T** p, size_t n);
iterator DoReallocSubarray(size_type nAdditionalCapacity, Side allocationSide);
void DoReallocPtrArray(size_type nAdditionalCapacity, Side allocationSide);
- void DoInit(size_type n);
+ void DoInit(size_type n);
}; // DequeBase
@@ -1053,7 +1053,7 @@ namespace eastl
memmove(mpCurrent, first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent));
return *this + (last.mpCurrent - first.mpCurrent);
}
- return eastl::copy(first, last, *this);
+ return eastl::copy(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this)).base();
}
@@ -1073,7 +1073,7 @@ namespace eastl
if((first.mpBegin == last.mpBegin) && (first.mpBegin == mpBegin)) // If all operations are within the same subarray, implement the operation as a memcpy.
memmove(mpCurrent - (last.mpCurrent - first.mpCurrent), first.mpCurrent, (size_t)((uintptr_t)last.mpCurrent - (uintptr_t)first.mpCurrent));
else
- eastl::copy_backward(first, last, *this);
+ eastl::copy_backward(eastl::make_move_iterator(first), eastl::make_move_iterator(last), eastl::make_move_iterator(*this));
}
@@ -1780,7 +1780,7 @@ namespace eastl
if(i < (difference_type)(size() / 2)) // Should we insert at the front or at the back? We divide the range in half.
{
- emplace_front(*mItBegin); // This operation potentially invalidates all existing iterators and so we need to assign them anew relative to mItBegin below.
+ emplace_front(eastl::move(*mItBegin)); // This operation potentially invalidates all existing iterators and so we need to assign them anew relative to mItBegin below.
itPosition = mItBegin + i;
@@ -1788,18 +1788,18 @@ namespace eastl
iterator oldBegin (mItBegin, typename iterator::Increment());
const iterator oldBeginPlus1(oldBegin, typename iterator::Increment());
- oldBegin.copy(oldBeginPlus1, newPosition, eastl::has_trivial_relocate());
+ oldBegin.copy(oldBeginPlus1, newPosition, eastl::is_move_assignable());
}
else
{
- emplace_back(*iterator(mItEnd, typename iterator::Decrement()));
+ emplace_back(eastl::move(*iterator(mItEnd, typename iterator::Decrement())));
itPosition = mItBegin + i;
iterator oldBack (mItEnd, typename iterator::Decrement());
const iterator oldBackMinus1(oldBack, typename iterator::Decrement());
- oldBack.copy_backward(itPosition, oldBackMinus1, eastl::has_trivial_relocate());
+ oldBack.copy_backward(itPosition, oldBackMinus1, eastl::is_move_assignable());
}
*itPosition = eastl::move(valueSaved);
@@ -1925,6 +1925,9 @@ namespace eastl
#if EASTL_ASSERT_ENABLED
if(EASTL_UNLIKELY(!(validate_iterator(position) & isf_valid)))
EASTL_FAIL_MSG("deque::erase -- invalid iterator");
+
+ if(EASTL_UNLIKELY(position == end()))
+ EASTL_FAIL_MSG("deque::erase -- end() iterator is an invalid iterator for erase");
#endif
iterator itPosition(position, typename iterator::FromConst());
@@ -1933,12 +1936,12 @@ namespace eastl
if(i < (difference_type)(size() / 2)) // Should we move the front entries forward or the back entries backward? We divide the range in half.
{
- itNext.copy_backward(mItBegin, itPosition, eastl::has_trivial_relocate());
+ itNext.copy_backward(mItBegin, itPosition, eastl::is_move_assignable());
pop_front();
}
else
{
- itPosition.copy(itNext, mItEnd, eastl::has_trivial_relocate());
+ itPosition.copy(itNext, mItEnd, eastl::is_move_assignable());
pop_back();
}
@@ -1970,7 +1973,7 @@ namespace eastl
const iterator itNewBegin(mItBegin + n);
value_type** const pPtrArrayBegin = mItBegin.mpCurrentArrayPtr;
- itLast.copy_backward(mItBegin, itFirst, eastl::has_trivial_relocate());
+ itLast.copy_backward(mItBegin, itFirst, eastl::is_move_assignable());
for(; mItBegin != itNewBegin; ++mItBegin) // Question: If value_type is a POD type, will the compiler generate this loop at all?
mItBegin.mpCurrent->~value_type(); // If so, then we need to make a specialization for destructing PODs.
@@ -1984,7 +1987,7 @@ namespace eastl
iterator itNewEnd(mItEnd - n);
value_type** const pPtrArrayEnd = itNewEnd.mpCurrentArrayPtr + 1;
- itFirst.copy(itLast, mItEnd, eastl::has_trivial_relocate());
+ itFirst.copy(itLast, mItEnd, eastl::is_move_assignable());
for(iterator itTemp(itNewEnd); itTemp != mItEnd; ++itTemp)
itTemp.mpCurrent->~value_type();
@@ -2077,6 +2080,7 @@ namespace eastl
template
void deque::swap(deque& x)
{
+ #if defined(EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR) && EASTL_DEQUE_LEGACY_SWAP_BEHAVIOUR_REQUIRES_COPY_CTOR
if(mAllocator == x.mAllocator) // If allocators are equivalent...
DoSwap(x);
else // else swap the contents.
@@ -2085,6 +2089,19 @@ namespace eastl
*this = x; // itself call this member swap function.
x = temp;
}
+ #else
+ // NOTE(rparolin): The previous implementation required T to be copy-constructible in the fall-back case where
+ // allocators with unique instances copied elements. This was an unnecessary restriction and prevented the common
+ // usage of deque with non-copyable types (eg. eastl::deque or eastl::deque).
+ //
+ // The previous implementation violated the following requirements of deque::swap so the fall-back code has
+ // been removed. EASTL implicitly defines 'propagate_on_container_swap = false' therefore the fall-back case is
+ // undefined behaviour. We simply swap the contents and the allocator as that is the common expectation of
+ // users and does not put the container into an invalid state since it can not free its memory via its current
+ // allocator instance.
+ //
+ DoSwap(x);
+ #endif
}
diff --git a/include/EASTL/fixed_hash_set.h b/include/EASTL/fixed_hash_set.h
index f5f56d71..0db9f49f 100644
--- a/include/EASTL/fixed_hash_set.h
+++ b/include/EASTL/fixed_hash_set.h
@@ -116,18 +116,14 @@ namespace eastl
const Predicate& predicate = Predicate());
fixed_hash_set(const this_type& x);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- fixed_hash_set(this_type&& x);
- fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator);
- #endif
+ fixed_hash_set(this_type&& x);
+ fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator);
fixed_hash_set(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_SET_DEFAULT_ALLOCATOR);
this_type& operator=(const this_type& x);
this_type& operator=(std::initializer_list ilist);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- this_type& operator=(this_type&& x);
- #endif
+ this_type& operator=(this_type&& x);
void swap(this_type& x);
@@ -207,17 +203,13 @@ namespace eastl
const Predicate& predicate = Predicate());
fixed_hash_multiset(const this_type& x);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- fixed_hash_multiset(this_type&& x);
- fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator);
- #endif
+ fixed_hash_multiset(this_type&& x);
+ fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator);
fixed_hash_multiset(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_HASH_MULTISET_DEFAULT_ALLOCATOR);
this_type& operator=(const this_type& x);
this_type& operator=(std::initializer_list ilist);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- this_type& operator=(this_type&& x);
- #endif
+ this_type& operator=(this_type&& x);
void swap(this_type& x);
@@ -343,50 +335,48 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline fixed_hash_set::fixed_hash_set(this_type&& x)
- : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
- x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
- {
- // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
- mAllocator.copy_overflow_allocator(x.mAllocator);
+ template
+ inline fixed_hash_set::fixed_hash_set(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
- EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
- base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
- mAllocator.reset(mNodeBuffer);
- base_type::insert(x.begin(), x.end());
- }
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
- template
- inline fixed_hash_set::fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator)
- : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
- x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
- {
- // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
- mAllocator.copy_overflow_allocator(x.mAllocator);
+ template
+ inline fixed_hash_set::fixed_hash_set(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
- EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
- base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
- mAllocator.reset(mNodeBuffer);
- base_type::insert(x.begin(), x.end());
- }
- #endif
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
template
@@ -418,15 +408,13 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline typename fixed_hash_set::this_type&
- fixed_hash_set::operator=(this_type&& x)
- {
- operator=(x);
- return *this;
- }
- #endif
+ template
+ inline typename fixed_hash_set::this_type&
+ fixed_hash_set::operator=(this_type&& x)
+ {
+ operator=(x);
+ return *this;
+ }
template
@@ -620,50 +608,48 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x)
- : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
- x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
- {
- // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
- mAllocator.copy_overflow_allocator(x.mAllocator);
+ template
+ inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount), x.hash_function(),
+ x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
- EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
- base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
- mAllocator.reset(mNodeBuffer);
- base_type::insert(x.begin(), x.end());
- }
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
- template
- inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator)
- : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
- x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
- {
- // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
- mAllocator.copy_overflow_allocator(x.mAllocator);
+ template
+ inline fixed_hash_multiset::fixed_hash_multiset(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(prime_rehash_policy::GetPrevBucketCountOnly(bucketCount),
+ x.hash_function(), x.equal_function(), fixed_allocator_type(NULL, mBucketBuffer, overflowAllocator))
+ {
+ // This implementation is the same as above. If we could rely on using C++11 delegating constructor support then we could just call that here.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
- EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
+ EASTL_CT_ASSERT((nodeCount >= 1) && (bucketCount >= 2));
- if(!bEnableOverflow)
- base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
+ if(!bEnableOverflow)
+ base_type::set_max_load_factor(10000.f); // Set it so that we will never resize.
- mAllocator.reset(mNodeBuffer);
- base_type::insert(x.begin(), x.end());
- }
- #endif
+ mAllocator.reset(mNodeBuffer);
+ base_type::insert(x.begin(), x.end());
+ }
template
@@ -695,15 +681,13 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline typename fixed_hash_multiset::this_type&
- fixed_hash_multiset::operator=(this_type&& x)
- {
- base_type::operator=(x);
- return *this;
- }
- #endif
+ template
+ inline typename fixed_hash_multiset::this_type&
+ fixed_hash_multiset::operator=(this_type&& x)
+ {
+ base_type::operator=(x);
+ return *this;
+ }
template
diff --git a/include/EASTL/fixed_list.h b/include/EASTL/fixed_list.h
index dbac1ae0..88c70317 100644
--- a/include/EASTL/fixed_list.h
+++ b/include/EASTL/fixed_list.h
@@ -96,10 +96,8 @@ namespace eastl
explicit fixed_list(size_type n); // Currently we don't support overflowAllocator specification for other constructors, for simplicity.
fixed_list(size_type n, const value_type& value);
fixed_list(const this_type& x);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- fixed_list(this_type&& x);
- fixed_list(this_type&&, const overflow_allocator_type& overflowAllocator);
- #endif
+ fixed_list(this_type&& x);
+ fixed_list(this_type&&, const overflow_allocator_type& overflowAllocator);
fixed_list(std::initializer_list ilist, const overflow_allocator_type& overflowAllocator = EASTL_FIXED_LIST_DEFAULT_ALLOCATOR);
template
@@ -107,9 +105,7 @@ namespace eastl
this_type& operator=(const this_type& x);
this_type& operator=(std::initializer_list ilist);
- #if EASTL_MOVE_SEMANTICS_ENABLED
- this_type& operator=(this_type&& x);
- #endif
+ this_type& operator=(this_type&& x);
void swap(this_type& x);
void reset_lose_memory(); // This is a unilateral reset to an initially empty state. No destructors are called, no deallocation occurs.
@@ -188,44 +184,42 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline fixed_list::fixed_list(this_type&& x)
- : base_type(fixed_allocator_type(mBuffer))
- {
- // Since we are a fixed_list, we can't normally swap pointers unless both this and
- // x are using using overflow and the overflow allocators are equal. To do:
- //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator()))
- //{
- // We can swap contents and may need to swap the allocators as well.
- //}
-
- // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
- // way then we may want to make a shared implementation.
- mAllocator.copy_overflow_allocator(x.mAllocator);
-
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ template
+ inline fixed_list::fixed_list(this_type&& x)
+ : base_type(fixed_allocator_type(mBuffer))
+ {
+ // Since we are a fixed_list, we can't normally swap pointers unless both this and
+ // x are using using overflow and the overflow allocators are equal. To do:
+ //if(has_overflowed() && x.has_overflowed() && (get_overflow_allocator() == x.get_overflow_allocator()))
+ //{
+ // We can swap contents and may need to swap the allocators as well.
+ //}
+
+ // The following is currently identical to the fixed_vector(const this_type& x) code above. If it stays that
+ // way then we may want to make a shared implementation.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- assign(x.begin(), x.end());
- }
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+ assign(x.begin(), x.end());
+ }
- template
- inline fixed_list::fixed_list(this_type&& x, const overflow_allocator_type& overflowAllocator)
- : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
- {
- // See comments above.
- mAllocator.copy_overflow_allocator(x.mAllocator);
- #if EASTL_NAME_ENABLED
- mAllocator.set_name(x.mAllocator.get_name());
- #endif
+ template
+ inline fixed_list::fixed_list(this_type&& x, const overflow_allocator_type& overflowAllocator)
+ : base_type(fixed_allocator_type(mBuffer, overflowAllocator))
+ {
+ // See comments above.
+ mAllocator.copy_overflow_allocator(x.mAllocator);
- assign(x.begin(), x.end());
- }
- #endif
+ #if EASTL_NAME_ENABLED
+ mAllocator.set_name(x.mAllocator.get_name());
+ #endif
+
+ assign(x.begin(), x.end());
+ }
template
@@ -267,14 +261,12 @@ namespace eastl
}
- #if EASTL_MOVE_SEMANTICS_ENABLED
- template
- inline typename fixed_list::this_type&
- fixed_list::operator=(this_type&& x)
- {
- return operator=(x);
- }
- #endif
+ template