LLVM OpenMP* Runtime Library
kmp_gsupport.c
1 /*
2  * kmp_gsupport.c
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #if defined(__x86_64) || defined (__powerpc64__) || defined(__aarch64__)
17 # define KMP_I8
18 #endif
19 #include "kmp.h"
20 #include "kmp_atomic.h"
21 
22 #if OMPT_SUPPORT
23 #include "ompt-specific.h"
24 #endif
25 
26 #ifdef __cplusplus
27  extern "C" {
28 #endif // __cplusplus
29 
30 #define MKLOC(loc,routine) \
31  static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
32 
33 #include "kmp_ftn_os.h"
34 
35 void
36 xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
37 {
38  int gtid = __kmp_entry_gtid();
39  MKLOC(loc, "GOMP_barrier");
40  KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
41  __kmpc_barrier(&loc, gtid);
42 }
43 
44 
45 //
46 // Mutual exclusion
47 //
48 
49 //
50 // The symbol that icc/ifort generates for unnamed for unnamed critical
51 // sections - .gomp_critical_user_ - is defined using .comm in any objects
52 // reference it. We can't reference it directly here in C code, as the
53 // symbol contains a ".".
54 //
55 // The RTL contains an assembly language definition of .gomp_critical_user_
56 // with another symbol __kmp_unnamed_critical_addr initialized with it's
57 // address.
58 //
59 extern kmp_critical_name *__kmp_unnamed_critical_addr;
60 
61 
62 void
63 xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
64 {
65  int gtid = __kmp_entry_gtid();
66  MKLOC(loc, "GOMP_critical_start");
67  KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
68  __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69 }
70 
71 
72 void
73 xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
74 {
75  int gtid = __kmp_get_gtid();
76  MKLOC(loc, "GOMP_critical_end");
77  KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
78  __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79 }
80 
81 
82 void
83 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
84 {
85  int gtid = __kmp_entry_gtid();
86  MKLOC(loc, "GOMP_critical_name_start");
87  KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
88  __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
89 }
90 
91 
92 void
93 xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
94 {
95  int gtid = __kmp_get_gtid();
96  MKLOC(loc, "GOMP_critical_name_end");
97  KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
98  __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
99 }
100 
101 
102 //
103 // The Gnu codegen tries to use locked operations to perform atomic updates
104 // inline. If it can't, then it calls GOMP_atomic_start() before performing
105 // the update and GOMP_atomic_end() afterward, regardless of the data type.
106 //
107 
108 void
109 xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
110 {
111  int gtid = __kmp_entry_gtid();
112  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
113 
114 #if OMPT_SUPPORT
115  __ompt_thread_assign_wait_id(0);
116 #endif
117 
118  __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
119 }
120 
121 
122 void
123 xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
124 {
125  int gtid = __kmp_get_gtid();
126  KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
127  __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
128 }
129 
130 
131 int
132 xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
133 {
134  int gtid = __kmp_entry_gtid();
135  MKLOC(loc, "GOMP_single_start");
136  KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
137 
138  if (! TCR_4(__kmp_init_parallel))
139  __kmp_parallel_initialize();
140 
141  //
142  // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
143  // workshare when USE_CHECKS is defined. We need to avoid the push,
144  // as there is no corresponding GOMP_single_end() call.
145  //
146  return __kmp_enter_single(gtid, &loc, FALSE);
147 }
148 
149 
150 void *
151 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
152 {
153  void *retval;
154  int gtid = __kmp_entry_gtid();
155  MKLOC(loc, "GOMP_single_copy_start");
156  KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
157 
158  if (! TCR_4(__kmp_init_parallel))
159  __kmp_parallel_initialize();
160 
161  //
162  // If this is the first thread to enter, return NULL. The generated
163  // code will then call GOMP_single_copy_end() for this thread only,
164  // with the copyprivate data pointer as an argument.
165  //
166  if (__kmp_enter_single(gtid, &loc, FALSE))
167  return NULL;
168 
169  //
170  // Wait for the first thread to set the copyprivate data pointer,
171  // and for all other threads to reach this point.
172  //
173  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
174 
175  //
176  // Retrieve the value of the copyprivate data point, and wait for all
177  // threads to do likewise, then return.
178  //
179  retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
180  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
181  return retval;
182 }
183 
184 
185 void
186 xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
187 {
188  int gtid = __kmp_get_gtid();
189  KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
190 
191  //
192  // Set the copyprivate data pointer fo the team, then hit the barrier
193  // so that the other threads will continue on and read it. Hit another
194  // barrier before continuing, so that the know that the copyprivate
195  // data pointer has been propagated to all threads before trying to
196  // reuse the t_copypriv_data field.
197  //
198  __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
199  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
201 }
202 
203 
204 void
205 xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
206 {
207  int gtid = __kmp_entry_gtid();
208  MKLOC(loc, "GOMP_ordered_start");
209  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
210  __kmpc_ordered(&loc, gtid);
211 }
212 
213 
214 void
215 xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
216 {
217  int gtid = __kmp_get_gtid();
218  MKLOC(loc, "GOMP_ordered_end");
219  KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
220  __kmpc_end_ordered(&loc, gtid);
221 }
222 
223 
224 //
225 // Dispatch macro defs
226 //
227 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
228 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
229 //
230 
231 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_MIPS64
232 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
233 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
234 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
235 #else
236 # define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
237 # define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
238 # define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
239 #endif /* KMP_ARCH_X86 */
240 
241 # define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
242 # define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
243 # define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
244 
245 
246 //
247 // The parallel contruct
248 //
249 
250 #ifndef KMP_DEBUG
251 static
252 #endif /* KMP_DEBUG */
253 void
254 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
255  void *data)
256 {
257 #if OMPT_SUPPORT
258  kmp_info_t *thr;
259  ompt_frame_t *ompt_frame;
260  ompt_state_t enclosing_state;
261 
262  if (ompt_status & ompt_status_track) {
263  // get pointer to thread data structure
264  thr = __kmp_threads[*gtid];
265 
266  // save enclosing task state; set current state for task
267  enclosing_state = thr->th.ompt_thread_info.state;
268  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
269 
270  // set task frame
271  ompt_frame = __ompt_get_task_frame_internal(0);
272  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
273  }
274 #endif
275 
276  task(data);
277 
278 #if OMPT_SUPPORT
279  if (ompt_status & ompt_status_track) {
280  // clear task frame
281  ompt_frame->exit_runtime_frame = NULL;
282 
283  // restore enclosing state
284  thr->th.ompt_thread_info.state = enclosing_state;
285  }
286 #endif
287 }
288 
289 
290 #ifndef KMP_DEBUG
291 static
292 #endif /* KMP_DEBUG */
293 void
294 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
295  void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
296  enum sched_type schedule, long start, long end, long incr, long chunk_size)
297 {
298  //
299  // Intialize the loop worksharing construct.
300  //
301  KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
302  schedule != kmp_sch_static);
303 
304 #if OMPT_SUPPORT
305  kmp_info_t *thr;
306  ompt_frame_t *ompt_frame;
307  ompt_state_t enclosing_state;
308 
309  if (ompt_status & ompt_status_track) {
310  thr = __kmp_threads[*gtid];
311  // save enclosing task state; set current state for task
312  enclosing_state = thr->th.ompt_thread_info.state;
313  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
314 
315  // set task frame
316  ompt_frame = __ompt_get_task_frame_internal(0);
317  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
318  }
319 #endif
320 
321  //
322  // Now invoke the microtask.
323  //
324  task(data);
325 
326 #if OMPT_SUPPORT
327  if (ompt_status & ompt_status_track) {
328  // clear task frame
329  ompt_frame->exit_runtime_frame = NULL;
330 
331  // reset enclosing state
332  thr->th.ompt_thread_info.state = enclosing_state;
333  }
334 #endif
335 }
336 
337 
338 #ifndef KMP_DEBUG
339 static
340 #endif /* KMP_DEBUG */
341 void
342 __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
343 {
344  int rc;
345  kmp_info_t *thr = __kmp_threads[gtid];
346  kmp_team_t *team = thr->th.th_team;
347  int tid = __kmp_tid_from_gtid(gtid);
348 
349  va_list ap;
350  va_start(ap, argc);
351 
352  rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
353 #if OMPT_SUPPORT
354  VOLATILE_CAST(void *) unwrapped_task,
355 #endif
356  wrapper, __kmp_invoke_task_func,
357 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
358  &ap
359 #else
360  ap
361 #endif
362  );
363 
364  va_end(ap);
365 
366  if (rc) {
367  __kmp_run_before_invoked_task(gtid, tid, thr, team);
368  }
369 
370 #if OMPT_SUPPORT
371  if (ompt_status & ompt_status_track) {
372 #if OMPT_TRACE
373  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
374  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
375 
376  // implicit task callback
377  if ((ompt_status == ompt_status_track_callback) &&
378  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
379  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
380  team_info->parallel_id, task_info->task_id);
381  }
382 #endif
383  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
384  }
385 #endif
386 }
387 
388 static void
389 __kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
390 {
391  __kmp_serialized_parallel(loc, gtid);
392 
393 #if OMPT_SUPPORT
394  if (ompt_status & ompt_status_track) {
395  ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
396  ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
397  kmp_info_t *thr = __kmp_threads[gtid];
398 
399  ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(gtid);
400  ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
401 
402  ompt_frame->exit_runtime_frame = NULL;
403 
404  // parallel region callback
405  if ((ompt_status == ompt_status_track_callback) &&
406  ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
407  int team_size = 1;
408  ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
409  ompt_task_id, ompt_frame, ompt_parallel_id,
410  team_size, (void *) task);
411  }
412 
413  // set up lightweight task
414  ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
415  __kmp_allocate(sizeof(ompt_lw_taskteam_t));
416  __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
417  lwt->ompt_task_info.task_id = my_ompt_task_id;
418  lwt->ompt_task_info.frame.exit_runtime_frame = 0;
419  __ompt_lw_taskteam_link(lwt, thr);
420 
421 #if OMPT_TRACE
422  // implicit task callback
423  if ((ompt_status == ompt_status_track_callback) &&
424  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
425  ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
426  ompt_parallel_id, my_ompt_task_id);
427  }
428  thr->th.ompt_thread_info.state = ompt_state_work_parallel;
429 #endif
430  }
431 #endif
432 }
433 
434 
435 void
436 xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
437 {
438  int gtid = __kmp_entry_gtid();
439 
440 #if OMPT_SUPPORT
441  ompt_frame_t *parent_frame;
442 
443  if (ompt_status & ompt_status_track) {
444  parent_frame = __ompt_get_task_frame_internal(0);
445  parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
446  }
447 #endif
448 
449  MKLOC(loc, "GOMP_parallel_start");
450  KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
451 
452  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
453  if (num_threads != 0) {
454  __kmp_push_num_threads(&loc, gtid, num_threads);
455  }
456  __kmp_GOMP_fork_call(&loc, gtid, task,
457  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
458  }
459  else {
460  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
461  }
462 
463 #if OMPT_SUPPORT
464  if (ompt_status & ompt_status_track) {
465  parent_frame->reenter_runtime_frame = NULL;
466  }
467 #endif
468 }
469 
470 
471 void
472 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
473 {
474  int gtid = __kmp_get_gtid();
475  kmp_info_t *thr;
476 
477  thr = __kmp_threads[gtid];
478 
479  MKLOC(loc, "GOMP_parallel_end");
480  KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
481 
482 
483 #if OMPT_SUPPORT
484  ompt_parallel_id_t parallel_id;
485  ompt_frame_t *ompt_frame = NULL;
486 
487  if (ompt_status & ompt_status_track) {
488  ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
489  parallel_id = team_info->parallel_id;
490 
491  ompt_frame = __ompt_get_task_frame_internal(0);
492  ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
493 
494 #if OMPT_TRACE
495  if ((ompt_status == ompt_status_track_callback) &&
496  ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
497  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
498  ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
499  parallel_id, task_info->task_id);
500  }
501 #endif
502 
503  // unlink if necessary. no-op if there is not a lightweight task.
504  ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
505  // GOMP allocates/frees lwt since it can't be kept on the stack
506  if (lwt) __kmp_free(lwt);
507  }
508 #endif
509 
510  if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
511  kmp_info_t *thr = __kmp_threads[gtid];
512  __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
513  thr->th.th_team);
514  __kmp_join_call(&loc, gtid);
515  }
516  else {
517  __kmpc_end_serialized_parallel(&loc, gtid);
518 
519 #if OMPT_SUPPORT
520  if (ompt_status & ompt_status_track) {
521  if ((ompt_status == ompt_status_track_callback) &&
522  ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
523  ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
524  ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
525  parallel_id, task_info->task_id);
526  }
527 
528  thr->th.ompt_thread_info.state =
529  (((thr->th.th_team)->t.t_serialized) ?
530  ompt_state_work_serial : ompt_state_work_parallel);
531  }
532 #endif
533 
534  }
535 }
536 
537 
538 //
539 // Loop worksharing constructs
540 //
541 
542 //
543 // The Gnu codegen passes in an exclusive upper bound for the overall range,
544 // but the libguide dispatch code expects an inclusive upper bound, hence the
545 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
546 // argument to __kmp_GOMP_fork_call).
547 //
548 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
549 // but the Gnu codegen expects an excluside upper bound, so the adjustment
550 // "*p_ub += stride" compenstates for the discrepancy.
551 //
552 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
553 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
554 // we still adjust p_ub by the actual stride value.
555 //
556 // The "runtime" versions do not take a chunk_sz parameter.
557 //
558 // The profile lib cannot support construct checking of unordered loops that
559 // are predetermined by the compiler to be statically scheduled, as the gcc
560 // codegen will not always emit calls to GOMP_loop_static_next() to get the
561 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
562 // num and calculate the iteration space using the result. It doesn't do this
563 // with ordered static loop, so they can be checked.
564 //
565 
566 #define LOOP_START(func,schedule) \
567  int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
568  long *p_ub) \
569  { \
570  int status; \
571  long stride; \
572  int gtid = __kmp_entry_gtid(); \
573  MKLOC(loc, #func); \
574  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
575  gtid, lb, ub, str, chunk_sz )); \
576  \
577  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
578  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
579  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
580  (schedule) != kmp_sch_static); \
581  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
582  (kmp_int *)p_ub, (kmp_int *)&stride); \
583  if (status) { \
584  KMP_DEBUG_ASSERT(stride == str); \
585  *p_ub += (str > 0) ? 1 : -1; \
586  } \
587  } \
588  else { \
589  status = 0; \
590  } \
591  \
592  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
593  gtid, *p_lb, *p_ub, status)); \
594  return status; \
595  }
596 
597 
598 #define LOOP_RUNTIME_START(func,schedule) \
599  int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
600  { \
601  int status; \
602  long stride; \
603  long chunk_sz = 0; \
604  int gtid = __kmp_entry_gtid(); \
605  MKLOC(loc, #func); \
606  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
607  gtid, lb, ub, str, chunk_sz )); \
608  \
609  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
610  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
611  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
612  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
613  (kmp_int *)p_ub, (kmp_int *)&stride); \
614  if (status) { \
615  KMP_DEBUG_ASSERT(stride == str); \
616  *p_ub += (str > 0) ? 1 : -1; \
617  } \
618  } \
619  else { \
620  status = 0; \
621  } \
622  \
623  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
624  gtid, *p_lb, *p_ub, status)); \
625  return status; \
626  }
627 
628 
629 #define LOOP_NEXT(func,fini_code) \
630  int func(long *p_lb, long *p_ub) \
631  { \
632  int status; \
633  long stride; \
634  int gtid = __kmp_get_gtid(); \
635  MKLOC(loc, #func); \
636  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
637  \
638  fini_code \
639  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
640  (kmp_int *)p_ub, (kmp_int *)&stride); \
641  if (status) { \
642  *p_ub += (stride > 0) ? 1 : -1; \
643  } \
644  \
645  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
646  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
647  return status; \
648  }
649 
650 
651 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
652 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
653 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
654 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
655 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
656 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
657 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
658 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
659 
660 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
661 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
662  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
663 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
664 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
665  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
666 LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
667 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
668  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
669 LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
670 LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
671  { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
672 
673 
674 void
675 xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
676 {
677  int gtid = __kmp_get_gtid();
678  KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
679 
680  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
681 
682  KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
683 }
684 
685 
686 void
687 xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
688 {
689  KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
690 }
691 
692 
693 //
694 // Unsigned long long loop worksharing constructs
695 //
696 // These are new with gcc 4.4
697 //
698 
699 #define LOOP_START_ULL(func,schedule) \
700  int func (int up, unsigned long long lb, unsigned long long ub, \
701  unsigned long long str, unsigned long long chunk_sz, \
702  unsigned long long *p_lb, unsigned long long *p_ub) \
703  { \
704  int status; \
705  long long str2 = up ? ((long long)str) : -((long long)str); \
706  long long stride; \
707  int gtid = __kmp_entry_gtid(); \
708  MKLOC(loc, #func); \
709  \
710  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
711  gtid, up, lb, ub, str, chunk_sz )); \
712  \
713  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
714  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
715  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
716  (schedule) != kmp_sch_static); \
717  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
718  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
719  if (status) { \
720  KMP_DEBUG_ASSERT(stride == str2); \
721  *p_ub += (str > 0) ? 1 : -1; \
722  } \
723  } \
724  else { \
725  status = 0; \
726  } \
727  \
728  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
729  gtid, *p_lb, *p_ub, status)); \
730  return status; \
731  }
732 
733 
734 #define LOOP_RUNTIME_START_ULL(func,schedule) \
735  int func (int up, unsigned long long lb, unsigned long long ub, \
736  unsigned long long str, unsigned long long *p_lb, \
737  unsigned long long *p_ub) \
738  { \
739  int status; \
740  long long str2 = up ? ((long long)str) : -((long long)str); \
741  unsigned long long stride; \
742  unsigned long long chunk_sz = 0; \
743  int gtid = __kmp_entry_gtid(); \
744  MKLOC(loc, #func); \
745  \
746  KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
747  gtid, up, lb, ub, str, chunk_sz )); \
748  \
749  if ((str > 0) ? (lb < ub) : (lb > ub)) { \
750  KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
751  (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
752  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
753  (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
754  if (status) { \
755  KMP_DEBUG_ASSERT((long long)stride == str2); \
756  *p_ub += (str > 0) ? 1 : -1; \
757  } \
758  } \
759  else { \
760  status = 0; \
761  } \
762  \
763  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
764  gtid, *p_lb, *p_ub, status)); \
765  return status; \
766  }
767 
768 
769 #define LOOP_NEXT_ULL(func,fini_code) \
770  int func(unsigned long long *p_lb, unsigned long long *p_ub) \
771  { \
772  int status; \
773  long long stride; \
774  int gtid = __kmp_get_gtid(); \
775  MKLOC(loc, #func); \
776  KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
777  \
778  fini_code \
779  status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
780  (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
781  if (status) { \
782  *p_ub += (stride > 0) ? 1 : -1; \
783  } \
784  \
785  KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
786  "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
787  return status; \
788  }
789 
790 
791 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
792 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
793 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
794 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
795 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
796 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
797 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
798 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
799 
800 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
801 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
802  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
803 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
804 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
805  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
806 LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
807 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
808  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
809 LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
810 LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
811  { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
812 
813 
814 //
815 // Combined parallel / loop worksharing constructs
816 //
817 // There are no ull versions (yet).
818 //
819 
820 #define PARALLEL_LOOP_START(func, schedule) \
821  void func (void (*task) (void *), void *data, unsigned num_threads, \
822  long lb, long ub, long str, long chunk_sz) \
823  { \
824  int gtid = __kmp_entry_gtid(); \
825  MKLOC(loc, #func); \
826  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
827  gtid, lb, ub, str, chunk_sz )); \
828  \
829  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
830  if (num_threads != 0) { \
831  __kmp_push_num_threads(&loc, gtid, num_threads); \
832  } \
833  __kmp_GOMP_fork_call(&loc, gtid, task, \
834  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
835  task, data, num_threads, &loc, (schedule), lb, \
836  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
837  } \
838  else { \
839  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
840  } \
841  \
842  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
843  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
844  (schedule) != kmp_sch_static); \
845  \
846  KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
847  }
848 
849 
850 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), kmp_sch_static)
851 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
852 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), kmp_sch_guided_chunked)
853 PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), kmp_sch_runtime)
854 
855 
856 //
857 // Tasking constructs
858 //
859 
860 void
861 xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
862  long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
863 {
864  MKLOC(loc, "GOMP_task");
865  int gtid = __kmp_entry_gtid();
866  kmp_int32 flags = 0;
867  kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
868 
869  KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
870 
871  // The low-order bit is the "tied" flag
872  if (gomp_flags & 1) {
873  input_flags->tiedness = 1;
874  }
875  input_flags->native = 1;
876  // __kmp_task_alloc() sets up all other flags
877 
878  if (! if_cond) {
879  arg_size = 0;
880  }
881 
882  kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
883  sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
884  (kmp_routine_entry_t)func);
885 
886  if (arg_size > 0) {
887  if (arg_align > 0) {
888  task->shareds = (void *)((((size_t)task->shareds)
889  + arg_align - 1) / arg_align * arg_align);
890  }
891  //else error??
892 
893  if (copy_func) {
894  (*copy_func)(task->shareds, data);
895  }
896  else {
897  KMP_MEMCPY(task->shareds, data, arg_size);
898  }
899  }
900 
901  if (if_cond) {
902  __kmpc_omp_task(&loc, gtid, task);
903  }
904  else {
905 #if OMPT_SUPPORT
906  ompt_thread_info_t oldInfo;
907  kmp_info_t *thread;
908  kmp_taskdata_t *taskdata;
909  if (ompt_status & ompt_status_track) {
910  // Store the threads states and restore them after the task
911  thread = __kmp_threads[ gtid ];
912  taskdata = KMP_TASK_TO_TASKDATA(task);
913  oldInfo = thread->th.ompt_thread_info;
914  thread->th.ompt_thread_info.wait_id = 0;
915  thread->th.ompt_thread_info.state = ompt_state_work_parallel;
916  taskdata->ompt_task_info.frame.exit_runtime_frame =
917  __builtin_frame_address(0);
918  }
919 #endif
920 
921  __kmpc_omp_task_begin_if0(&loc, gtid, task);
922  func(data);
923  __kmpc_omp_task_complete_if0(&loc, gtid, task);
924 
925 #if OMPT_SUPPORT
926  if (ompt_status & ompt_status_track) {
927  thread->th.ompt_thread_info = oldInfo;
928  taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
929  }
930 #endif
931  }
932 
933  KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
934 }
935 
936 
937 void
938 xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
939 {
940  MKLOC(loc, "GOMP_taskwait");
941  int gtid = __kmp_entry_gtid();
942 
943  KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
944 
945  __kmpc_omp_taskwait(&loc, gtid);
946 
947  KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
948 }
949 
950 
951 //
952 // Sections worksharing constructs
953 //
954 
955 //
956 // For the sections construct, we initialize a dynamically scheduled loop
957 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
958 // that its returns as sections ids.
959 //
960 // There are no special entry points for ordered sections, so we always use
961 // the dynamically scheduled workshare, even if the sections aren't ordered.
962 //
963 
964 unsigned
965 xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
966 {
967  int status;
968  kmp_int lb, ub, stride;
969  int gtid = __kmp_entry_gtid();
970  MKLOC(loc, "GOMP_sections_start");
971  KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
972 
973  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
974 
975  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
976  if (status) {
977  KMP_DEBUG_ASSERT(stride == 1);
978  KMP_DEBUG_ASSERT(lb > 0);
979  KMP_ASSERT(lb == ub);
980  }
981  else {
982  lb = 0;
983  }
984 
985  KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
986  (unsigned)lb));
987  return (unsigned)lb;
988 }
989 
990 
991 unsigned
992 xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
993 {
994  int status;
995  kmp_int lb, ub, stride;
996  int gtid = __kmp_get_gtid();
997  MKLOC(loc, "GOMP_sections_next");
998  KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
999 
1000  status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1001  if (status) {
1002  KMP_DEBUG_ASSERT(stride == 1);
1003  KMP_DEBUG_ASSERT(lb > 0);
1004  KMP_ASSERT(lb == ub);
1005  }
1006  else {
1007  lb = 0;
1008  }
1009 
1010  KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1011  (unsigned)lb));
1012  return (unsigned)lb;
1013 }
1014 
1015 
1016 void
1017 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
1018  unsigned num_threads, unsigned count)
1019 {
1020  int gtid = __kmp_entry_gtid();
1021 
1022 #if OMPT_SUPPORT
1023  ompt_frame_t *parent_frame;
1024 
1025  if (ompt_status & ompt_status_track) {
1026  parent_frame = __ompt_get_task_frame_internal(0);
1027  parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
1028  }
1029 #endif
1030 
1031  MKLOC(loc, "GOMP_parallel_sections_start");
1032  KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1033 
1034  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1035  if (num_threads != 0) {
1036  __kmp_push_num_threads(&loc, gtid, num_threads);
1037  }
1038  __kmp_GOMP_fork_call(&loc, gtid, task,
1039  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1040  num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1041  (kmp_int)count, (kmp_int)1, (kmp_int)1);
1042  }
1043  else {
1044  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1045  }
1046 
1047 #if OMPT_SUPPORT
1048  if (ompt_status & ompt_status_track) {
1049  parent_frame->reenter_runtime_frame = NULL;
1050  }
1051 #endif
1052 
1053  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1054 
1055  KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1056 }
1057 
1058 
1059 void
1060 xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
1061 {
1062  int gtid = __kmp_get_gtid();
1063  KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1064 
1065  __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1066 
1067  KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1068 }
1069 
1070 
1071 void
1072 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
1073 {
1074  KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1075 }
1076 
1077 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1078 void
1079 xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1080 {
1081  KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1082  return;
1083 }
1084 
1085 #if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1086 
1087 void
1088 xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1089 {
1090  int gtid = __kmp_entry_gtid();
1091  MKLOC(loc, "GOMP_parallel");
1092  KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1093 
1094  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1095  if (num_threads != 0) {
1096  __kmp_push_num_threads(&loc, gtid, num_threads);
1097  }
1098  if(flags != 0) {
1099  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1100  }
1101  __kmp_GOMP_fork_call(&loc, gtid, task,
1102  (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1103  }
1104  else {
1105  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1106  }
1107  task(data);
1108  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1109 }
1110 
1111 void
1112 xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1113  unsigned num_threads, unsigned count, unsigned flags)
1114 {
1115  int gtid = __kmp_entry_gtid();
1116  MKLOC(loc, "GOMP_parallel_sections");
1117  KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1118 
1119  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1120  if (num_threads != 0) {
1121  __kmp_push_num_threads(&loc, gtid, num_threads);
1122  }
1123  if(flags != 0) {
1124  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1125  }
1126  __kmp_GOMP_fork_call(&loc, gtid, task,
1127  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1128  num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1129  (kmp_int)count, (kmp_int)1, (kmp_int)1);
1130  }
1131  else {
1132  __kmp_GOMP_serialized_parallel(&loc, gtid, task);
1133  }
1134 
1135  KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1136 
1137  task(data);
1138  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1139  KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1140 }
1141 
1142 #define PARALLEL_LOOP(func, schedule) \
1143  void func (void (*task) (void *), void *data, unsigned num_threads, \
1144  long lb, long ub, long str, long chunk_sz, unsigned flags) \
1145  { \
1146  int gtid = __kmp_entry_gtid(); \
1147  MKLOC(loc, #func); \
1148  KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1149  gtid, lb, ub, str, chunk_sz )); \
1150  \
1151  if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1152  if (num_threads != 0) { \
1153  __kmp_push_num_threads(&loc, gtid, num_threads); \
1154  } \
1155  if (flags != 0) { \
1156  __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1157  } \
1158  __kmp_GOMP_fork_call(&loc, gtid, task, \
1159  (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
1160  task, data, num_threads, &loc, (schedule), lb, \
1161  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1162  } \
1163  else { \
1164  __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
1165  } \
1166  \
1167  KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1168  (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1169  (schedule) != kmp_sch_static); \
1170  task(data); \
1171  xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1172  \
1173  KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
1174  }
1175 
1176 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static)
1177 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked)
1178 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked)
1179 PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime)
1180 
1181 
1182 void
1183 xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1184 {
1185  int gtid = __kmp_get_gtid();
1186  MKLOC(loc, "GOMP_taskgroup_start");
1187  KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1188 
1189  __kmpc_taskgroup(&loc, gtid);
1190 
1191  return;
1192 }
1193 
1194 void
1195 xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1196 {
1197  int gtid = __kmp_get_gtid();
1198  MKLOC(loc, "GOMP_taskgroup_end");
1199  KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1200 
1201  __kmpc_end_taskgroup(&loc, gtid);
1202 
1203  return;
1204 }
1205 
1206 #ifndef KMP_DEBUG
1207 static
1208 #endif /* KMP_DEBUG */
1209 kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1210  kmp_int32 cncl_kind = 0;
1211  switch(gomp_kind) {
1212  case 1:
1213  cncl_kind = cancel_parallel;
1214  break;
1215  case 2:
1216  cncl_kind = cancel_loop;
1217  break;
1218  case 4:
1219  cncl_kind = cancel_sections;
1220  break;
1221  case 8:
1222  cncl_kind = cancel_taskgroup;
1223  break;
1224  }
1225  return cncl_kind;
1226 }
1227 
1228 bool
1229 xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1230 {
1231  if(__kmp_omp_cancellation) {
1232  KMP_FATAL(NoGompCancellation);
1233  }
1234  int gtid = __kmp_get_gtid();
1235  MKLOC(loc, "GOMP_cancellation_point");
1236  KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1237 
1238  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1239 
1240  return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1241 }
1242 
1243 bool
1244 xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1245 {
1246  if(__kmp_omp_cancellation) {
1247  KMP_FATAL(NoGompCancellation);
1248  }
1249  KMP_FATAL(NoGompCancellation);
1250  int gtid = __kmp_get_gtid();
1251  MKLOC(loc, "GOMP_barrier_cancel");
1252  KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1253 
1254  return __kmpc_cancel_barrier(&loc, gtid);
1255 }
1256 
1257 bool
1258 xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1259 {
1260  if(__kmp_omp_cancellation) {
1261  KMP_FATAL(NoGompCancellation);
1262  } else {
1263  return FALSE;
1264  }
1265 
1266  int gtid = __kmp_get_gtid();
1267  MKLOC(loc, "GOMP_cancel");
1268  KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1269 
1270  kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1271 
1272  if(do_cancel == FALSE) {
1273  return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1274  } else {
1275  return __kmpc_cancel(&loc, gtid, cncl_kind);
1276  }
1277 }
1278 
1279 bool
1280 xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1281 {
1282  if(__kmp_omp_cancellation) {
1283  KMP_FATAL(NoGompCancellation);
1284  }
1285  int gtid = __kmp_get_gtid();
1286  MKLOC(loc, "GOMP_sections_end_cancel");
1287  KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1288 
1289  return __kmpc_cancel_barrier(&loc, gtid);
1290 }
1291 
1292 bool
1293 xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1294 {
1295  if(__kmp_omp_cancellation) {
1296  KMP_FATAL(NoGompCancellation);
1297  }
1298  int gtid = __kmp_get_gtid();
1299  MKLOC(loc, "GOMP_loop_end_cancel");
1300  KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1301 
1302  return __kmpc_cancel_barrier(&loc, gtid);
1303 }
1304 
1305 // All target functions are empty as of 2014-05-29
1306 void
1307 xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1308  size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1309 {
1310  return;
1311 }
1312 
1313 void
1314 xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1315  void **hostaddrs, size_t *sizes, unsigned char *kinds)
1316 {
1317  return;
1318 }
1319 
1320 void
1321 xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1322 {
1323  return;
1324 }
1325 
1326 void
1327 xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1328  void **hostaddrs, size_t *sizes, unsigned char *kinds)
1329 {
1330  return;
1331 }
1332 
1333 void
1334 xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1335 {
1336  return;
1337 }
1338 #endif // OMP_40_ENABLED
1339 
1340 
1341 /*
1342  The following sections of code create aliases for the GOMP_* functions,
1343  then create versioned symbols using the assembler directive .symver.
1344  This is only pertinent for ELF .so library
1345  xaliasify and xversionify are defined in kmp_ftn_os.h
1346 */
1347 
1348 #ifdef KMP_USE_VERSION_SYMBOLS
1349 
1350 // GOMP_1.0 aliases
1351 xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1352 xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1353 xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1354 xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1355 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1356 xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1357 xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1358 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1359 xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1360 xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1361 xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1362 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1363 xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1364 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1365 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1366 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1367 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1368 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1369 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1370 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1371 xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1372 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1373 xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1374 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1375 xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1376 xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1377 xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1378 xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1379 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1380 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1381 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1382 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1383 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1384 xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1385 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1386 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1387 xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1388 xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1389 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1390 xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1391 xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1392 
1393 // GOMP_2.0 aliases
1394 xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1395 xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
1396 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1397 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1398 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1399 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1400 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1401 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1402 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1403 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1404 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1405 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1406 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1407 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1408 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1409 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1410 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1411 xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1412 
1413 // GOMP_3.0 aliases
1414 xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1415 
1416 // GOMP_4.0 aliases
1417 // The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1418 #if OMP_40_ENABLED
1419 xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1420 xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1421 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1422 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1423 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1424 xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1425 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1426 xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1427 xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1428 xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1429 xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1430 xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1431 xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1432 xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1433 xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1434 xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1435 xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1436 xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1437 #endif
1438 
1439 // GOMP_1.0 versioned symbols
1440 xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1441 xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1442 xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1443 xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1444 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1445 xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1446 xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1447 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1448 xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1449 xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1450 xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1451 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1452 xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1453 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1454 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1455 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1456 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1457 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1458 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1459 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1460 xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1461 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1462 xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1463 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1464 xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1465 xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1466 xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1467 xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1468 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1469 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1470 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1471 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1472 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1473 xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1474 xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1475 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1476 xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1477 xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1478 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1479 xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1480 xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1481 
1482 // GOMP_2.0 versioned symbols
1483 xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1484 xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1485 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1486 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1487 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1488 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1489 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1490 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1491 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1492 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1493 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1494 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1495 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1496 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1497 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1498 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1499 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1500 xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1501 
1502 // GOMP_3.0 versioned symbols
1503 xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1504 
1505 // GOMP_4.0 versioned symbols
1506 #if OMP_40_ENABLED
1507 xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1508 xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1509 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1510 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1511 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1512 xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1513 xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1514 xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1515 xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1516 xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1517 xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1518 xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1519 xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1520 xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1521 xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1522 xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1523 xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1524 xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1525 #endif
1526 
1527 #endif // KMP_USE_VERSION_SYMBOLS
1528 
1529 #ifdef __cplusplus
1530  } //extern "C"
1531 #endif // __cplusplus
1532 
1533 
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:844
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:445
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:778
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
Definition: kmp.h:198
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
Definition: kmp_csupport.c:161
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
Definition: kmp_csupport.c:653
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
sched_type
Definition: kmp.h:300