22 #if !defined( BOLT_AMP_SYNCVIEW_H )
23 #define BOLT_AMP_SYNCVIEW_H
34 template <
typename _Value_type,
int _Rank = 1>
38 typedef typename _Value_type value_type;
41 template <
typename _Container>
43 concurrency::array_view(_E0 , _E1, _Src)
48 concurrency::array_view<_Value_type, _Rank> (_E0 , _E1, _Src)
54 _Ret_
void * _Access(_Access_mode _Requested_mode,
const concurrency::index<_Rank>& _Index)
const __CPU_ONLY
56 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
57 int * _Ptr =
reinterpret_cast<int *
>(_M_buffer_descriptor._M_data_ptr);
59 return &_Ptr[_M_total_linear_offset + ((
sizeof(_Value_type)/
sizeof(
int)) * (_M_array_multiplier[0] * _Index[0] + _Index[1]))];
64 _Ret_
void * _Access(_Access_mode _Requested_mode,
const concurrency::index<_Rank>& _Index)
const __GPU_ONLY
67 UNREFERENCED_PARAMETER(_Requested_mode);
69 int * _Ptr =
reinterpret_cast<int *
>(_M_buffer_descriptor._M_data_ptr);
70 return &_Ptr[_M_total_linear_offset + ((
sizeof(_Value_type)/
sizeof(
int)) * (_M_array_multiplier[0] * _Index[0] + _Index[1]))];
75 value_type& operator() (
const concurrency::index<_Rank>& _Index)
const __GPU
77 void * _Ptr = _Access(_Read_write_access, _Index);
78 return *
reinterpret_cast<_Value_type*
>(_Ptr);
81 _Value_type& operator() (
int _I0,
int _I1)
const __GPU
83 static_assert(_Rank == 2,
"value_type& array_view::operator()(int,int) is only permissible on array_view<T, 2>");
84 return this->operator()(concurrency::index<2>(_I0,_I1));