Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb::internal::concurrent_vector_base_v3::helper Class Reference
Inheritance diagram for tbb::internal::concurrent_vector_base_v3::helper:
Collaboration diagram for tbb::internal::concurrent_vector_base_v3::helper:

Classes

struct  destroy_body
 
struct  init_body
 TODO: turn into lambda functions when available. More...
 
struct  safe_init_body
 
struct  segment_not_used_predicate
 

Public Member Functions

 helper (segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw ()
 
void first_segment () throw ()
 
void next_segment () throw ()
 
template<typename F >
size_type apply (const F &func)
 
segment_value_t get_segment_value (size_type index, bool wait)
 
 ~helper ()
 
void cleanup ()
 Out of line code to assists destructor in infrequent cases. More...
 

Static Public Member Functions

static bool incompact_predicate (size_type size)
 
static size_type find_segment_end (const concurrent_vector_base_v3 &v)
 
static void assign_first_segment_if_necessary (concurrent_vector_base_v3 &v, segment_index_t k)
 assign first segment size. k - is index of last segment to be allocated, not a count of segments More...
 
static voidallocate_segment (concurrent_vector_base_v3 &v, size_type n)
 
template<typename argument_type >
static void publish_segment (segment_t &s, argument_type rhs)
 Publish segment so other threads can see it. More...
 
static size_type enable_segment (concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
 
static void extend_table_if_necessary (concurrent_vector_base_v3 &v, size_type k, size_type start)
 
static void extend_segment_table (concurrent_vector_base_v3 &v, size_type start)
 
static segment_tacquire_segment (concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner)
 

Public Attributes

segment_ttable
 
size_type first_block
 
size_type k
 
size_type sz
 
size_type start
 
size_type finish
 
size_type element_size
 

Static Public Attributes

static const size_type page_size = 4096
 memory page size More...
 

Detailed Description

Definition at line 44 of file concurrent_vector.cpp.

Constructor & Destructor Documentation

◆ helper()

tbb::internal::concurrent_vector_base_v3::helper::helper ( segment_t segments,
size_type  fb,
size_type  esize,
size_type  index,
size_type  s,
size_type  f 
)
throw (
)
inline

◆ ~helper()

tbb::internal::concurrent_vector_base_v3::helper::~helper ( )
inline

Definition at line 164 of file concurrent_vector.cpp.

164  {
165  if( sz >= finish ) return; // the work is done correctly
166  cleanup();
167  }
void cleanup()
Out of line code to assists destructor in infrequent cases.

References cleanup(), finish, and sz.

Here is the call graph for this function:

Member Function Documentation

◆ acquire_segment()

static segment_t& tbb::internal::concurrent_vector_base_v3::helper::acquire_segment ( concurrent_vector_base_v3 v,
size_type  index,
size_type  element_size,
bool  owner 
)
inlinestatic

Definition at line 108 of file concurrent_vector.cpp.

108  {
109  segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument
110  if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state
111  if( owner ) {
112  enable_segment( v, index, element_size );
113  } else {
114  ITT_NOTIFY(sync_prepare, &s);
115  spin_wait_while(segment_not_used_predicate(s));
116  ITT_NOTIFY(sync_acquired, &s);
117  }
118  } else {
119  ITT_NOTIFY(sync_acquired, &s);
120  }
121  enforce_segment_allocated(s.load<relaxed>()); //it's hard to recover correctly after segment_allocation_failed state
122  return s;
123  }
static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
No ordering.
Definition: atomic.h:51
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
Acquire.
Definition: atomic.h:47
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void const char const char int ITT_FORMAT __itt_group_sync s
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:409

References tbb::acquire, element_size, enable_segment(), tbb::internal::concurrent_vector_base_v3::enforce_segment_allocated, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::relaxed, s, and tbb::internal::spin_wait_while().

Referenced by tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_push_back().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ allocate_segment()

static void* tbb::internal::concurrent_vector_base_v3::helper::allocate_segment ( concurrent_vector_base_v3 v,
size_type  n 
)
inlinestatic

Definition at line 79 of file concurrent_vector.cpp.

79  {
80  void *ptr = v.vector_allocator_ptr(v, n);
81  if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception
82  return ptr;
83  }
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References tbb::internal::eid_bad_alloc, tbb::internal::throw_exception(), and tbb::internal::concurrent_vector_base_v3::vector_allocator_ptr.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_compact().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ apply()

template<typename F >
size_type tbb::internal::concurrent_vector_base_v3::helper::apply ( const F &  func)
inline

Definition at line 145 of file concurrent_vector.cpp.

145  {
146  first_segment();
147  while( sz < finish ) { // work for more than one segment
148  //TODO: remove extra load() of table[k] inside func
149  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start );
150  next_segment();
151  }
152  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start );
153  return k;
154  }

References element_size, finish, first_segment(), k, next_segment(), start, sz, and table.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_clear(), tbb::internal::concurrent_vector_base_v3::internal_compact(), tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_resize().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ assign_first_segment_if_necessary()

static void tbb::internal::concurrent_vector_base_v3::helper::assign_first_segment_if_necessary ( concurrent_vector_base_v3 v,
segment_index_t  k 
)
inlinestatic

assign first segment size. k - is index of last segment to be allocated, not a count of segments

Definition at line 64 of file concurrent_vector.cpp.

64  {
65  if( !v.my_first_block ) {
66  /* There was a suggestion to set first segment according to incompact_predicate:
67  while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )
68  --k; // while previous vector size is compact, decrement
69  // reasons to not do it:
70  // * constructor(n) is not ready to accept fragmented segments
71  // * backward compatibility due to that constructor
72  // * current version gives additional guarantee and faster init.
73  // * two calls to reserve() will give the same effect.
74  */
75  v.my_first_block.compare_and_swap(k+1, 0); // store number of segments
76  }
77  }

References tbb::internal::atomic_impl< T >::compare_and_swap(), k, and tbb::internal::concurrent_vector_base_v3::my_first_block.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_assign(), tbb::internal::concurrent_vector_base_v3::internal_copy(), tbb::internal::concurrent_vector_base_v3::internal_grow(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ cleanup()

void tbb::internal::concurrent_vector_base_v3::helper::cleanup ( )

Out of line code to assists destructor in infrequent cases.

Definition at line 285 of file concurrent_vector.cpp.

285  {
286  if( !sz ) { // allocation failed, restore the table
287  segment_index_t k_start = k, k_end = segment_index_of(finish-1);
288  if( segment_base( k_start ) < start )
289  get_segment_value(k_start++, true); // wait
290  if( k_start < first_block ) {
291  segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary
292  if((segment0 != segment_not_used()) && !k_start ) ++k_start;
293  if(segment0 != segment_allocated())
294  for(; k_start < first_block && k_start <= k_end; ++k_start )
295  publish_segment(table[k_start], segment_allocation_failed());
296  else for(; k_start < first_block && k_start <= k_end; ++k_start )
297  publish_segment(table[k_start], static_cast<void*>(
298  (segment0.pointer<char>()) + segment_base(k_start)*element_size) );
299  }
300  for(; k_start <= k_end; ++k_start ) // not in first block
301  if(table[k_start].load<acquire>() == segment_not_used())
302  publish_segment(table[k_start], segment_allocation_failed());
303  // fill allocated items
304  first_segment();
305  goto recover;
306  }
307  while( sz <= finish ) { // there is still work for at least one segment
308  next_segment();
309 recover:
310  segment_value_t array = table[k].load<relaxed>();
311  if(array == segment_allocated())
312  std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
313  else __TBB_ASSERT( array == segment_allocation_failed(), NULL );
314  }
315 }
No ordering.
Definition: atomic.h:51
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
segment_value_t get_segment_value(size_type index, bool wait)
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
static segment_index_t segment_index_of(size_type index)
static segment_index_t segment_base(segment_index_t k)

References __TBB_ASSERT, tbb::internal::concurrent_vector_base_v3::segment_t::load(), tbb::internal::concurrent_vector_base_v3::segment_value_t::pointer(), tbb::relaxed, tbb::internal::concurrent_vector_base_v3::segment_base(), and tbb::internal::concurrent_vector_base_v3::segment_index_of().

Referenced by ~helper().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ enable_segment()

concurrent_vector_base_v3::size_type tbb::internal::concurrent_vector_base_v3::helper::enable_segment ( concurrent_vector_base_v3 v,
concurrent_vector_base_v3::size_type  k,
concurrent_vector_base_v3::size_type  element_size,
bool  mark_as_not_used_on_failure = false 
)
static

Definition at line 226 of file concurrent_vector.cpp.

227  {
228 
229  struct segment_scope_guard : no_copy{
230  segment_t* my_segment_ptr;
231  bool my_mark_as_not_used;
232  segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}
233  void dismiss(){ my_segment_ptr = 0;}
234  ~segment_scope_guard(){
235  if (my_segment_ptr){
236  if (!my_mark_as_not_used){
237  publish_segment(*my_segment_ptr, segment_allocation_failed());
238  }else{
239  publish_segment(*my_segment_ptr, segment_not_used());
240  }
241  }
242  }
243  };
244 
245  segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block
246  __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), "concurrent operation during growth?");
247 
248  size_type size_of_enabled_segment = segment_size(k);
249  size_type size_to_allocate = size_of_enabled_segment;
250  if( !k ) {
252  size_of_enabled_segment = 2 ;
253  size_to_allocate = segment_size(v.my_first_block);
254 
255  } else {
256  spin_wait_while_eq( v.my_first_block, segment_index_t(0) );
257  }
258 
259  if( k && (k < v.my_first_block)){ //no need to allocate anything
260  // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory
261  segment_value_t array0 = s[0].load<acquire>();
262  if(array0 == segment_not_used()){
263  // sync_prepare called only if there is a wait
264  ITT_NOTIFY(sync_prepare, &s[0]);
265  spin_wait_while( segment_not_used_predicate(s[0]));
266  array0 = s[0].load<acquire>();
267  }
268  ITT_NOTIFY(sync_acquired, &s[0]);
269 
270  segment_scope_guard k_segment_guard(s[k], false);
271  enforce_segment_allocated(array0); // initial segment should be allocated
272  k_segment_guard.dismiss();
273 
274  publish_segment( s[k],
275  static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size )
276  );
277  } else {
278  segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure);
279  publish_segment(s[k], allocate_segment(v, size_to_allocate));
280  k_segment_guard.dismiss();
281  }
282  return size_of_enabled_segment;
283 }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static size_type segment_size(segment_index_t k)
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
Acquire.
Definition: atomic.h:47
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:395
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
static segment_index_t segment_base(segment_index_t k)
void const char const char int ITT_FORMAT __itt_group_sync s
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:409
static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k)
assign first segment size. k - is index of last segment to be allocated, not a count of segments
static void * allocate_segment(concurrent_vector_base_v3 &v, size_type n)

References __TBB_ASSERT, tbb::acquire, tbb::internal::concurrent_vector_base_v3::default_initial_segments, tbb::internal::concurrent_vector_base_v3::enforce_segment_allocated, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::my_first_block, tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::segment_value_t::pointer(), s, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), tbb::internal::spin_wait_while(), and tbb::internal::spin_wait_while_eq().

Referenced by acquire_segment(), tbb::internal::concurrent_vector_base_v3::internal_assign(), tbb::internal::concurrent_vector_base_v3::internal_copy(), and tbb::internal::concurrent_vector_base_v3::internal_reserve().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ extend_segment_table()

void tbb::internal::concurrent_vector_base_v3::helper::extend_segment_table ( concurrent_vector_base_v3 v,
concurrent_vector_base_v3::size_type  start 
)
static

Definition at line 200 of file concurrent_vector.cpp.

200  {
202  // If other threads are trying to set pointers in the short segment, wait for them to finish their
203  // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it
204  for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){
205  if(v.my_storage[i].load<relaxed>() == segment_not_used()) {
206  ITT_NOTIFY(sync_prepare, &v.my_storage[i]);
207  atomic_backoff backoff(true);
208  while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) )
209  backoff.pause();
210  ITT_NOTIFY(sync_acquired, &v.my_storage[i]);
211  }
212  }
213  if( v.my_segment != v.my_storage ) return;
214 
215  segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );
216  __TBB_ASSERT(new_segment_table, "NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" );
217  std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table
218  //TODO: replace with static assert
219  __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, "size of the big table should be not lesser than of the small one, as we copy values to it" );
220  std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used
221  if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage )
222  NFS_Free( new_segment_table );
223  // else TODO: add ITT_NOTIFY signals for v.my_segment?
224 }
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
No ordering.
Definition: atomic.h:51
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:536
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static size_type segment_size(segment_index_t k)
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
static segment_index_t segment_base(segment_index_t k)
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
Number of slots for segment pointers inside the class.

References __TBB_ASSERT, __TBB_STATIC_ASSERT, ITT_NOTIFY, tbb::internal::concurrent_vector_base_v3::segment_t::load(), tbb::internal::concurrent_vector_base_v3::my_segment, tbb::internal::concurrent_vector_base_v3::my_storage, tbb::internal::NFS_Allocate(), tbb::internal::NFS_Free(), tbb::internal::atomic_backoff::pause(), tbb::internal::concurrent_vector_base_v3::pointers_per_long_table, tbb::internal::concurrent_vector_base_v3::pointers_per_short_table, tbb::relaxed, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), and start.

Referenced by extend_table_if_necessary().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ extend_table_if_necessary()

static void tbb::internal::concurrent_vector_base_v3::helper::extend_table_if_necessary ( concurrent_vector_base_v3 v,
size_type  k,
size_type  start 
)
inlinestatic

◆ find_segment_end()

static size_type tbb::internal::concurrent_vector_base_v3::helper::find_segment_end ( const concurrent_vector_base_v3 v)
inlinestatic

◆ first_segment()

void tbb::internal::concurrent_vector_base_v3::helper::first_segment ( )
throw (
)
inline

Definition at line 130 of file concurrent_vector.cpp.

130  {
131  __TBB_ASSERT( start <= finish, NULL );
132  __TBB_ASSERT( first_block || !finish, NULL );
133  if( k < first_block ) k = 0; // process solid segment at a time
134  size_type base = segment_base( k );
135  __TBB_ASSERT( base <= start, NULL );
136  finish -= base; start -= base; // rebase as offsets from segment k
137  sz = k ? base : segment_size( first_block ); // sz==base for k>0
138  }
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static size_type segment_size(segment_index_t k)
static segment_index_t segment_base(segment_index_t k)

References __TBB_ASSERT, finish, first_block, k, tbb::internal::concurrent_vector_base_v3::segment_base(), tbb::internal::concurrent_vector_base_v3::segment_size(), start, and sz.

Referenced by apply().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ get_segment_value()

segment_value_t tbb::internal::concurrent_vector_base_v3::helper::get_segment_value ( size_type  index,
bool  wait 
)
inline

Definition at line 155 of file concurrent_vector.cpp.

155  {
156  segment_t &s = table[index];
157  if( wait && (s.load<acquire>() == segment_not_used()) ) {
158  ITT_NOTIFY(sync_prepare, &s);
159  spin_wait_while(segment_not_used_predicate(s));
160  ITT_NOTIFY(sync_acquired, &s);
161  }
162  return s.load<relaxed>();
163  }
No ordering.
Definition: atomic.h:51
Acquire.
Definition: atomic.h:47
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void const char const char int ITT_FORMAT __itt_group_sync s
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:409

References tbb::acquire, ITT_NOTIFY, tbb::relaxed, s, tbb::internal::spin_wait_while(), and table.

Here is the call graph for this function:

◆ incompact_predicate()

static bool tbb::internal::concurrent_vector_base_v3::helper::incompact_predicate ( size_type  size)
inlinestatic

Definition at line 49 of file concurrent_vector.cpp.

49  { // assert size != 0, see source/test/test_vector_layout.cpp
50  return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details
51  }
static const size_type page_size
memory page size
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size

References page_size, and size.

Referenced by tbb::internal::concurrent_vector_base_v3::internal_compact().

Here is the caller graph for this function:

◆ next_segment()

void tbb::internal::concurrent_vector_base_v3::helper::next_segment ( )
throw (
)
inline

Definition at line 139 of file concurrent_vector.cpp.

References finish, first_block, k, tbb::internal::concurrent_vector_base_v3::segment_size(), start, and sz.

Referenced by apply().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ publish_segment()

template<typename argument_type >
static void tbb::internal::concurrent_vector_base_v3::helper::publish_segment ( segment_t s,
argument_type  rhs 
)
inlinestatic

Publish segment so other threads can see it.

Definition at line 87 of file concurrent_vector.cpp.

87  {
88  // see also itt_store_pointer_with_release_v3()
90  s.store<release>(rhs);
91  }
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void const char const char int ITT_FORMAT __itt_group_sync s
Release.
Definition: atomic.h:49

References ITT_NOTIFY, tbb::release, s, and sync_releasing.

Member Data Documentation

◆ element_size

size_type tbb::internal::concurrent_vector_base_v3::helper::element_size

Definition at line 127 of file concurrent_vector.cpp.

Referenced by acquire_segment(), and apply().

◆ finish

size_type tbb::internal::concurrent_vector_base_v3::helper::finish

Definition at line 127 of file concurrent_vector.cpp.

Referenced by apply(), first_segment(), next_segment(), and ~helper().

◆ first_block

size_type tbb::internal::concurrent_vector_base_v3::helper::first_block

◆ k

size_type tbb::internal::concurrent_vector_base_v3::helper::k

◆ page_size

const size_type tbb::internal::concurrent_vector_base_v3::helper::page_size = 4096
static

memory page size

Definition at line 47 of file concurrent_vector.cpp.

Referenced by incompact_predicate().

◆ start

size_type tbb::internal::concurrent_vector_base_v3::helper::start

◆ sz

size_type tbb::internal::concurrent_vector_base_v3::helper::sz

Definition at line 127 of file concurrent_vector.cpp.

Referenced by apply(), first_segment(), next_segment(), and ~helper().

◆ table

segment_t* tbb::internal::concurrent_vector_base_v3::helper::table

Definition at line 126 of file concurrent_vector.cpp.

Referenced by apply(), and get_segment_value().


The documentation for this class was generated from the following file:

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.