queue 742 ompi/mca/osc/pt2pt/osc_pt2pt_passive_target.c bool queue = false; queue 749 ompi/mca/osc/pt2pt/osc_pt2pt_passive_target.c queue = true; queue 759 ompi/mca/osc/pt2pt/osc_pt2pt_passive_target.c queue = !opal_atomic_compare_exchange_strong_32 (&module->lock_status, &_tmp_value, -1); queue 762 ompi/mca/osc/pt2pt/osc_pt2pt_passive_target.c if (queue) { queue 541 ompi/mca/pml/ob1/pml_ob1.c static void mca_pml_ob1_dump_frag_list(opal_list_t* queue, bool is_req) queue 546 ompi/mca/pml/ob1/pml_ob1.c for( item = opal_list_get_first(queue); queue 547 ompi/mca/pml/ob1/pml_ob1.c item != opal_list_get_end(queue); queue 575 ompi/mca/pml/ob1/pml_ob1.c void mca_pml_ob1_dump_cant_match(mca_pml_ob1_recv_frag_t* queue) queue 577 ompi/mca/pml/ob1/pml_ob1.c mca_pml_ob1_recv_frag_t* item = queue; queue 589 ompi/mca/pml/ob1/pml_ob1.c } while( item != queue ); queue 78 ompi/mca/pml/ob1/pml_ob1_recvfrag.c append_frag_to_list(opal_list_t *queue, mca_btl_base_module_t *btl, queue 86 ompi/mca/pml/ob1/pml_ob1_recvfrag.c opal_list_append(queue, (opal_list_item_t*)frag); queue 92 ompi/mca/pml/ob1/pml_ob1_recvfrag.c append_frag_to_umq(custom_match_umq *queue, mca_btl_base_module_t *btl, queue 100 ompi/mca/pml/ob1/pml_ob1_recvfrag.c custom_match_umq_append(queue, hdr->hdr_tag, hdr->hdr_src, frag); queue 118 ompi/mca/pml/ob1/pml_ob1_recvfrag.c append_frag_to_ordered_list(mca_pml_ob1_recv_frag_t** queue, queue 130 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if( NULL == *queue ) { /* no pending fragments yet */ queue 131 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = frag; queue 135 ompi/mca/pml/ob1/pml_ob1_recvfrag.c prior = *queue; queue 148 ompi/mca/pml/ob1/pml_ob1_recvfrag.c (d1 > d2) && (prior != *queue) ); queue 192 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if( abs(parent->hdr.hdr_match.hdr_seq - seq) < abs((*queue)->hdr.hdr_match.hdr_seq - seq)) queue 193 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = parent; queue 228 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if( next == *queue ) queue 229 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = parent; queue 237 ompi/mca/pml/ob1/pml_ob1_recvfrag.c remove_head_from_ordered_list(mca_pml_ob1_recv_frag_t** queue) queue 239 ompi/mca/pml/ob1/pml_ob1_recvfrag.c mca_pml_ob1_recv_frag_t* frag = *queue; queue 241 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if( NULL == *queue ) queue 248 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = NULL; queue 251 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = (mca_pml_ob1_recv_frag_t*)frag->super.super.opal_list_next; queue 259 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *queue = (mca_pml_ob1_recv_frag_t*)range; queue 703 ompi/mca/pml/ob1/pml_ob1_recvfrag.c static inline mca_pml_ob1_recv_request_t* get_posted_recv(opal_list_t *queue) queue 705 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if(opal_list_get_size(queue) == 0) queue 708 ompi/mca/pml/ob1/pml_ob1_recvfrag.c return (mca_pml_ob1_recv_request_t*)opal_list_get_first(queue); queue 712 ompi/mca/pml/ob1/pml_ob1_recvfrag.c opal_list_t *queue, queue 717 ompi/mca/pml/ob1/pml_ob1_recvfrag.c if(opal_list_get_end(queue) == i) queue 743 ompi/mca/pml/ob1/pml_ob1_recvfrag.c opal_list_t *queue; queue 749 ompi/mca/pml/ob1/pml_ob1_recvfrag.c queue = &comm->wild_receives; queue 753 ompi/mca/pml/ob1/pml_ob1_recvfrag.c queue = &proc->specific_receives; queue 759 ompi/mca/pml/ob1/pml_ob1_recvfrag.c opal_list_remove_item(queue, (opal_list_item_t*)(*match)); queue 765 ompi/mca/pml/ob1/pml_ob1_recvfrag.c *match = get_next_posted_recv(queue, *match); queue 178 ompi/mca/pml/ob1/pml_ob1_recvfrag.h void append_frag_to_ordered_list(mca_pml_ob1_recv_frag_t** queue, queue 182 ompi/mca/pml/ob1/pml_ob1_recvfrag.h extern void mca_pml_ob1_dump_cant_match(mca_pml_ob1_recv_frag_t* queue); queue 1083 ompi/mca/pml/ob1/pml_ob1_recvreq.c static inline void append_recv_req_to_queue(opal_list_t *queue, queue 1086 ompi/mca/pml/ob1/pml_ob1_recvreq.c opal_list_append(queue, (opal_list_item_t*)req); queue 1240 ompi/mca/pml/ob1/pml_ob1_recvreq.c opal_list_t *queue; queue 1272 ompi/mca/pml/ob1/pml_ob1_recvreq.c queue = &ob1_comm->wild_receives; queue 1292 ompi/mca/pml/ob1/pml_ob1_recvreq.c queue = &proc->specific_receives; queue 1310 ompi/mca/pml/ob1/pml_ob1_recvreq.c append_recv_req_to_queue(queue, req); queue 84 opal/mca/btl/smcuda/btl_smcuda.h volatile void **queue; queue 269 opal/mca/btl/smcuda/btl_smcuda.h fifo->queue = (volatile void **) VIRTUAL2RELATIVE(fifo->queue_recv); queue 290 opal/mca/btl/smcuda/btl_smcuda.h volatile void **q = (volatile void **) RELATIVE2VIRTUAL(fifo->queue); queue 1399 opal/mca/event/libevent2022/libevent/event.c event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr) queue 1405 opal/mca/event/libevent2022/libevent/event.c while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) { queue 1407 opal/mca/event/libevent2022/libevent/event.c TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next); queue 1408 opal/mca/event/libevent2022/libevent/event.c --queue->active_count; queue 1409 opal/mca/event/libevent2022/libevent/event.c UNLOCK_DEFERRED_QUEUE(queue); queue 1413 opal/mca/event/libevent2022/libevent/event.c LOCK_DEFERRED_QUEUE(queue); queue 2361 opal/mca/event/libevent2022/libevent/event.c event_deferred_cb_cancel(struct deferred_cb_queue *queue, queue 2364 opal/mca/event/libevent2022/libevent/event.c if (!queue) { queue 2366 opal/mca/event/libevent2022/libevent/event.c queue = ¤t_base->defer_queue; queue 2371 opal/mca/event/libevent2022/libevent/event.c LOCK_DEFERRED_QUEUE(queue); queue 2373 opal/mca/event/libevent2022/libevent/event.c TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next); queue 2374 opal/mca/event/libevent2022/libevent/event.c --queue->active_count; queue 2377 opal/mca/event/libevent2022/libevent/event.c UNLOCK_DEFERRED_QUEUE(queue); queue 2381 opal/mca/event/libevent2022/libevent/event.c event_deferred_cb_schedule(struct deferred_cb_queue *queue, queue 2384 opal/mca/event/libevent2022/libevent/event.c if (!queue) { queue 2386 opal/mca/event/libevent2022/libevent/event.c queue = ¤t_base->defer_queue; queue 2391 opal/mca/event/libevent2022/libevent/event.c LOCK_DEFERRED_QUEUE(queue); queue 2394 opal/mca/event/libevent2022/libevent/event.c TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next); queue 2395 opal/mca/event/libevent2022/libevent/event.c ++queue->active_count; queue 2396 opal/mca/event/libevent2022/libevent/event.c if (queue->notify_fn) queue 2397 opal/mca/event/libevent2022/libevent/event.c queue->notify_fn(queue, queue->notify_arg); queue 2399 opal/mca/event/libevent2022/libevent/event.c UNLOCK_DEFERRED_QUEUE(queue); queue 2526 opal/mca/event/libevent2022/libevent/event.c event_queue_remove(struct event_base *base, struct event *ev, int queue) queue 2530 opal/mca/event/libevent2022/libevent/event.c if (!(ev->ev_flags & queue)) { queue 2532 opal/mca/event/libevent2022/libevent/event.c ev, EV_SOCK_ARG(ev->ev_fd), queue); queue 2539 opal/mca/event/libevent2022/libevent/event.c ev->ev_flags &= ~queue; queue 2540 opal/mca/event/libevent2022/libevent/event.c switch (queue) { queue 2560 opal/mca/event/libevent2022/libevent/event.c event_errx(1, "%s: unknown queue %x", __func__, queue); queue 2596 opal/mca/event/libevent2022/libevent/event.c event_queue_insert(struct event_base *base, struct event *ev, int queue) queue 2600 opal/mca/event/libevent2022/libevent/event.c if (ev->ev_flags & queue) { queue 2602 opal/mca/event/libevent2022/libevent/event.c if (queue & EVLIST_ACTIVE) queue 2606 opal/mca/event/libevent2022/libevent/event.c ev, EV_SOCK_ARG(ev->ev_fd), queue); queue 2613 opal/mca/event/libevent2022/libevent/event.c ev->ev_flags |= queue; queue 2614 opal/mca/event/libevent2022/libevent/event.c switch (queue) { queue 2633 opal/mca/event/libevent2022/libevent/event.c event_errx(1, "%s: unknown queue %x", __func__, queue); queue 414 opal/mca/event/libevent2022/libevent/test/regress_thread.c struct deferred_cb_queue *queue; queue 438 opal/mca/event/libevent2022/libevent/test/regress_thread.c event_deferred_cb_schedule(data->queue, &data->cbs[i]); queue 467 opal/mca/event/libevent2022/libevent/test/regress_thread.c struct deferred_cb_queue *queue; queue 471 opal/mca/event/libevent2022/libevent/test/regress_thread.c queue = event_base_get_deferred_cb_queue(data->base); queue 472 opal/mca/event/libevent2022/libevent/test/regress_thread.c tt_assert(queue); queue 475 opal/mca/event/libevent2022/libevent/test/regress_thread.c deferred_data[i].queue = queue; queue 628 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c pmix_ptl_queue_t *queue = (pmix_ptl_queue_t*)cbdata; queue 632 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_ACQUIRE_OBJECT(queue); queue 634 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c if (NULL == queue->peer || queue->peer->sd < 0 || queue 635 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c NULL == queue->peer->info || NULL == queue->peer->nptr) { queue 637 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c if (NULL != queue->buf) { queue 638 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_RELEASE(queue->buf); queue 640 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_RELEASE(queue); queue 647 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c (queue->peer)->info->pname.nspace, queue 648 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c (queue->peer)->info->pname.rank, (queue->tag)); queue 650 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c if (NULL == queue->buf) { queue 652 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_RELEASE(queue); queue 658 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c snd->hdr.tag = htonl(queue->tag); queue 659 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c snd->hdr.nbytes = htonl((queue->buf)->bytes_used); queue 660 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c snd->data = (queue->buf); queue 666 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c if (NULL == (queue->peer)->send_msg) { queue 667 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c (queue->peer)->send_msg = snd; queue 670 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c pmix_list_append(&(queue->peer)->send_queue, &snd->super); queue 673 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c if (!(queue->peer)->send_ev_active) { queue 674 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c (queue->peer)->send_ev_active = true; queue 675 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_POST_OBJECT(queue->peer); queue 676 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c pmix_event_add(&(queue->peer)->send_event, 0); queue 678 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/base/ptl_base_sendrecv.c PMIX_RELEASE(queue); queue 900 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c pmix_ptl_queue_t *queue = (pmix_ptl_queue_t*)cbdata; queue 904 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c PMIX_ACQUIRE_OBJECT(queue); queue 906 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c if (NULL == queue->peer || queue->peer->sd < 0 || queue 907 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c NULL == queue->peer->info || NULL == queue->peer->nptr) { queue 909 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c PMIX_RELEASE(queue); queue 912 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c PMIX_POST_OBJECT(queue); queue 919 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c (queue->peer)->info->pname.nspace, queue 920 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c (queue->peer)->info->pname.rank, (queue->tag)); queue 924 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c snd->hdr.tag = htonl(queue->tag); queue 925 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c snd->hdr.nbytes = htonl((queue->buf)->bytes_used); queue 926 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c snd->data = (queue->buf); queue 932 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c if (NULL == (queue->peer)->send_msg) { queue 933 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c (queue->peer)->send_msg = snd; queue 936 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c pmix_list_append(&(queue->peer)->send_queue, &snd->super); queue 939 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c if (!(queue->peer)->send_ev_active) { queue 940 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c (queue->peer)->send_ev_active = true; queue 941 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c PMIX_POST_OBJECT(queue->peer); queue 942 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c pmix_event_add(&(queue->peer)->send_event, 0); queue 944 opal/mca/pmix/pmix4x/pmix/src/mca/ptl/usock/ptl_usock.c PMIX_RELEASE(queue); queue 67 orte/mca/ras/gridengine/ras_gridengine_module.c char buf[1024], *tok, *num, *queue, *arch, *ptr, *tmp; queue 98 orte/mca/ras/gridengine/ras_gridengine_module.c queue = strtok_r(NULL, " \n", &tok); queue 164 orte/mca/ras/gridengine/ras_gridengine_module.c char buf[1024], *tok, *name, *num, *queue, *arch; queue 179 orte/mca/ras/gridengine/ras_gridengine_module.c queue = strtok_r(NULL, " \n", &tok);