root/opal/mca/event/libevent2022/libevent/buffer.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. evbuffer_chain_new
  2. evbuffer_chain_free
  3. evbuffer_free_all_chains
  4. evbuffer_chains_all_empty
  5. evbuffer_chains_all_empty
  6. evbuffer_free_trailing_empty_chains
  7. evbuffer_chain_insert
  8. evbuffer_chain_insert_new
  9. _evbuffer_chain_pin
  10. _evbuffer_chain_unpin
  11. evbuffer_new
  12. evbuffer_set_flags
  13. evbuffer_clear_flags
  14. _evbuffer_incref
  15. _evbuffer_incref_and_lock
  16. evbuffer_defer_callbacks
  17. evbuffer_enable_locking
  18. evbuffer_set_parent
  19. evbuffer_run_callbacks
  20. evbuffer_invoke_callbacks
  21. evbuffer_deferred_callback
  22. evbuffer_remove_all_callbacks
  23. _evbuffer_decref_and_unlock
  24. evbuffer_free
  25. evbuffer_lock
  26. evbuffer_unlock
  27. evbuffer_get_length
  28. evbuffer_get_contiguous_space
  29. evbuffer_reserve_space
  30. advance_last_with_data
  31. evbuffer_commit_space
  32. HAS_PINNED_R
  33. ZERO_CHAIN
  34. PRESERVE_PINNED
  35. RESTORE_PINNED
  36. COPY_CHAIN
  37. APPEND_CHAIN
  38. PREPEND_CHAIN
  39. evbuffer_add_buffer
  40. evbuffer_prepend_buffer
  41. evbuffer_drain
  42. evbuffer_remove
  43. evbuffer_copyout
  44. evbuffer_remove_buffer
  45. evbuffer_pullup
  46. evbuffer_readline
  47. evbuffer_strchr
  48. find_eol_char
  49. evbuffer_find_eol_char
  50. evbuffer_strspn
  51. evbuffer_getchr
  52. evbuffer_search_eol
  53. evbuffer_readln
  54. evbuffer_add
  55. evbuffer_prepend
  56. evbuffer_chain_align
  57. evbuffer_chain_should_realign
  58. evbuffer_expand_singlechain
  59. _evbuffer_expand_fast
  60. evbuffer_expand
  61. _evbuffer_read_setup_vecs
  62. get_n_bytes_readable_on_socket
  63. evbuffer_read
  64. evbuffer_readfile
  65. evbuffer_write_iovec
  66. evbuffer_write_sendfile
  67. evbuffer_write_atmost
  68. evbuffer_write
  69. evbuffer_find
  70. evbuffer_ptr_set
  71. evbuffer_ptr_memcmp
  72. evbuffer_search
  73. evbuffer_search_range
  74. evbuffer_peek
  75. evbuffer_add_vprintf
  76. evbuffer_add_printf
  77. evbuffer_add_reference
  78. evbuffer_add_file
  79. evbuffer_setcb
  80. evbuffer_add_cb
  81. evbuffer_remove_cb_entry
  82. evbuffer_remove_cb
  83. evbuffer_cb_set_flags
  84. evbuffer_cb_clear_flags
  85. evbuffer_freeze
  86. evbuffer_unfreeze
  87. evbuffer_cb_suspend
  88. evbuffer_cb_unsuspend
  89. _evbuffer_testing_use_sendfile
  90. _evbuffer_testing_use_mmap
  91. _evbuffer_testing_use_linear_file_access

   1 /*
   2  * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
   3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
   4  *
   5  * Redistribution and use in source and binary forms, with or without
   6  * modification, are permitted provided that the following conditions
   7  * are met:
   8  * 1. Redistributions of source code must retain the above copyright
   9  *    notice, this list of conditions and the following disclaimer.
  10  * 2. Redistributions in binary form must reproduce the above copyright
  11  *    notice, this list of conditions and the following disclaimer in the
  12  *    documentation and/or other materials provided with the distribution.
  13  * 3. The name of the author may not be used to endorse or promote products
  14  *    derived from this software without specific prior written permission.
  15  *
  16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26  */
  27 
  28 #include "event2/event-config.h"
  29 
  30 #ifdef WIN32
  31 #include <winsock2.h>
  32 #include <windows.h>
  33 #include <io.h>
  34 #endif
  35 
  36 #ifdef _EVENT_HAVE_VASPRINTF
  37 /* If we have vasprintf, we need to define this before we include stdio.h. */
  38 #define _GNU_SOURCE
  39 #endif
  40 
  41 #include <sys/types.h>
  42 
  43 #ifdef _EVENT_HAVE_SYS_TIME_H
  44 #include <sys/time.h>
  45 #endif
  46 
  47 #ifdef _EVENT_HAVE_SYS_SOCKET_H
  48 #include <sys/socket.h>
  49 #endif
  50 
  51 #ifdef _EVENT_HAVE_SYS_UIO_H
  52 #include <sys/uio.h>
  53 #endif
  54 
  55 #ifdef _EVENT_HAVE_SYS_IOCTL_H
  56 #include <sys/ioctl.h>
  57 #endif
  58 
  59 #ifdef _EVENT_HAVE_SYS_MMAN_H
  60 #include <sys/mman.h>
  61 #endif
  62 
  63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
  64 #include <sys/sendfile.h>
  65 #endif
  66 
  67 #include <errno.h>
  68 #include <stdio.h>
  69 #include <stdlib.h>
  70 #include <string.h>
  71 #ifdef _EVENT_HAVE_STDARG_H
  72 #include <stdarg.h>
  73 #endif
  74 #ifdef _EVENT_HAVE_UNISTD_H
  75 #include <unistd.h>
  76 #endif
  77 #include <limits.h>
  78 
  79 #include "event2/event.h"
  80 #include "event2/buffer.h"
  81 #include "event2/buffer_compat.h"
  82 #include "event2/bufferevent.h"
  83 #include "event2/bufferevent_compat.h"
  84 #include "event2/bufferevent_struct.h"
  85 #include "event2/thread.h"
  86 #include "event2/event-config.h"
  87 #include "log-internal.h"
  88 #include "mm-internal.h"
  89 #include "util-internal.h"
  90 #include "evthread-internal.h"
  91 #include "evbuffer-internal.h"
  92 #include "bufferevent-internal.h"
  93 
  94 /* some systems do not have MAP_FAILED */
  95 #ifndef MAP_FAILED
  96 #define MAP_FAILED      ((void *)-1)
  97 #endif
  98 
  99 /* send file support */
 100 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
 101 #define USE_SENDFILE            1
 102 #define SENDFILE_IS_LINUX       1
 103 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
 104 #define USE_SENDFILE            1
 105 #define SENDFILE_IS_FREEBSD     1
 106 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
 107 #define USE_SENDFILE            1
 108 #define SENDFILE_IS_MACOSX      1
 109 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
 110 #define USE_SENDFILE            1
 111 #define SENDFILE_IS_SOLARIS     1
 112 #endif
 113 
 114 #ifdef USE_SENDFILE
 115 static int use_sendfile = 1;
 116 #endif
 117 #ifdef _EVENT_HAVE_MMAP
 118 static int use_mmap = 1;
 119 #endif
 120 
 121 
 122 /* Mask of user-selectable callback flags. */
 123 #define EVBUFFER_CB_USER_FLAGS      0xffff
 124 /* Mask of all internal-use-only flags. */
 125 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
 126 
 127 /* Flag set if the callback is using the cb_obsolete function pointer  */
 128 #define EVBUFFER_CB_OBSOLETE           0x00040000
 129 
 130 /* evbuffer_chain support */
 131 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
 132 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
 133             0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
 134 
 135 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
 136 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
 137 
 138 static void evbuffer_chain_align(struct evbuffer_chain *chain);
 139 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
 140     size_t datalen);
 141 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
 142 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
 143     const struct evbuffer_ptr *pos, const char *mem, size_t len);
 144 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
 145     size_t datlen);
 146 
 147 #ifdef WIN32
 148 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
 149     ev_ssize_t howmuch);
 150 #else
 151 #define evbuffer_readfile evbuffer_read
 152 #endif
 153 
 154 static struct evbuffer_chain *
 155 evbuffer_chain_new(size_t size)
 156 {
 157         struct evbuffer_chain *chain;
 158         size_t to_alloc;
 159 
 160         if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
 161                 return (NULL);
 162 
 163         size += EVBUFFER_CHAIN_SIZE;
 164 
 165         /* get the next largest memory that can hold the buffer */
 166         if (size < EVBUFFER_CHAIN_MAX / 2) {
 167                 to_alloc = MIN_BUFFER_SIZE;
 168                 while (to_alloc < size) {
 169                         to_alloc <<= 1;
 170                 }
 171         } else {
 172                 to_alloc = size;
 173         }
 174 
 175         /* we get everything in one chunk */
 176         if ((chain = mm_malloc(to_alloc)) == NULL)
 177                 return (NULL);
 178 
 179         memset(chain, 0, EVBUFFER_CHAIN_SIZE);
 180 
 181         chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
 182 
 183         /* this way we can manipulate the buffer to different addresses,
 184          * which is required for mmap for example.
 185          */
 186         chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
 187 
 188         return (chain);
 189 }
 190 
 191 static inline void
 192 evbuffer_chain_free(struct evbuffer_chain *chain)
 193 {
 194         if (CHAIN_PINNED(chain)) {
 195                 chain->flags |= EVBUFFER_DANGLING;
 196                 return;
 197         }
 198         if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
 199                 EVBUFFER_REFERENCE)) {
 200                 if (chain->flags & EVBUFFER_REFERENCE) {
 201                         struct evbuffer_chain_reference *info =
 202                             EVBUFFER_CHAIN_EXTRA(
 203                                     struct evbuffer_chain_reference,
 204                                     chain);
 205                         if (info->cleanupfn)
 206                                 (*info->cleanupfn)(chain->buffer,
 207                                     chain->buffer_len,
 208                                     info->extra);
 209                 }
 210 #ifdef _EVENT_HAVE_MMAP
 211                 if (chain->flags & EVBUFFER_MMAP) {
 212                         struct evbuffer_chain_fd *info =
 213                             EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
 214                                 chain);
 215                         if (munmap(chain->buffer, chain->buffer_len) == -1)
 216                                 event_warn("%s: munmap failed", __func__);
 217                         if (close(info->fd) == -1)
 218                                 event_warn("%s: close(%d) failed",
 219                                     __func__, info->fd);
 220                 }
 221 #endif
 222 #ifdef USE_SENDFILE
 223                 if (chain->flags & EVBUFFER_SENDFILE) {
 224                         struct evbuffer_chain_fd *info =
 225                             EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
 226                                 chain);
 227                         if (close(info->fd) == -1)
 228                                 event_warn("%s: close(%d) failed",
 229                                     __func__, info->fd);
 230                 }
 231 #endif
 232         }
 233 
 234         mm_free(chain);
 235 }
 236 
 237 static void
 238 evbuffer_free_all_chains(struct evbuffer_chain *chain)
 239 {
 240         struct evbuffer_chain *next;
 241         for (; chain; chain = next) {
 242                 next = chain->next;
 243                 evbuffer_chain_free(chain);
 244         }
 245 }
 246 
 247 #ifndef NDEBUG
 248 static int
 249 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
 250 {
 251         for (; chain; chain = chain->next) {
 252                 if (chain->off)
 253                         return 0;
 254         }
 255         return 1;
 256 }
 257 #else
 258 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
 259 "unused variable" warnings. */
 260 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
 261         return 1;
 262 }
 263 #endif
 264 
 265 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
 266  * to replacing them all with a new chain.  Return a pointer to the place
 267  * where the new chain will go.
 268  *
 269  * Internal; requires lock.  The caller must fix up buf->last and buf->first
 270  * as needed; they might have been freed.
 271  */
 272 static struct evbuffer_chain **
 273 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
 274 {
 275         struct evbuffer_chain **ch = buf->last_with_datap;
 276         /* Find the first victim chain.  It might be *last_with_datap */
 277         while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
 278                 ch = &(*ch)->next;
 279         if (*ch) {
 280                 EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
 281                 evbuffer_free_all_chains(*ch);
 282                 *ch = NULL;
 283         }
 284         return ch;
 285 }
 286 
 287 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
 288  * chains as necessary.  Requires lock.  Does not schedule callbacks.
 289  */
 290 static void
 291 evbuffer_chain_insert(struct evbuffer *buf,
 292     struct evbuffer_chain *chain)
 293 {
 294         ASSERT_EVBUFFER_LOCKED(buf);
 295         if (*buf->last_with_datap == NULL) {
 296                 /* There are no chains data on the buffer at all. */
 297                 EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
 298                 EVUTIL_ASSERT(buf->first == NULL);
 299                 buf->first = buf->last = chain;
 300         } else {
 301                 struct evbuffer_chain **ch = buf->last_with_datap;
 302                 /* Find the first victim chain.  It might be *last_with_datap */
 303                 while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
 304                         ch = &(*ch)->next;
 305                 if (*ch == NULL) {
 306                         /* There is no victim; just append this new chain. */
 307                         buf->last->next = chain;
 308                         if (chain->off)
 309                                 buf->last_with_datap = &buf->last->next;
 310                 } else {
 311                         /* Replace all victim chains with this chain. */
 312                         EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
 313                         evbuffer_free_all_chains(*ch);
 314                         *ch = chain;
 315                 }
 316                 buf->last = chain;
 317         }
 318         buf->total_len += chain->off;
 319 }
 320 
 321 static inline struct evbuffer_chain *
 322 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
 323 {
 324         struct evbuffer_chain *chain;
 325         if ((chain = evbuffer_chain_new(datlen)) == NULL)
 326                 return NULL;
 327         evbuffer_chain_insert(buf, chain);
 328         return chain;
 329 }
 330 
 331 void
 332 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
 333 {
 334         EVUTIL_ASSERT((chain->flags & flag) == 0);
 335         chain->flags |= flag;
 336 }
 337 
 338 void
 339 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
 340 {
 341         EVUTIL_ASSERT((chain->flags & flag) != 0);
 342         chain->flags &= ~flag;
 343         if (chain->flags & EVBUFFER_DANGLING)
 344                 evbuffer_chain_free(chain);
 345 }
 346 
 347 struct evbuffer *
 348 evbuffer_new(void)
 349 {
 350         struct evbuffer *buffer;
 351 
 352         buffer = mm_calloc(1, sizeof(struct evbuffer));
 353         if (buffer == NULL)
 354                 return (NULL);
 355 
 356         TAILQ_INIT(&buffer->callbacks);
 357         buffer->refcnt = 1;
 358         buffer->last_with_datap = &buffer->first;
 359 
 360         return (buffer);
 361 }
 362 
 363 int
 364 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
 365 {
 366         EVBUFFER_LOCK(buf);
 367         buf->flags |= (ev_uint32_t)flags;
 368         EVBUFFER_UNLOCK(buf);
 369         return 0;
 370 }
 371 
 372 int
 373 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
 374 {
 375         EVBUFFER_LOCK(buf);
 376         buf->flags &= ~(ev_uint32_t)flags;
 377         EVBUFFER_UNLOCK(buf);
 378         return 0;
 379 }
 380 
 381 void
 382 _evbuffer_incref(struct evbuffer *buf)
 383 {
 384         EVBUFFER_LOCK(buf);
 385         ++buf->refcnt;
 386         EVBUFFER_UNLOCK(buf);
 387 }
 388 
 389 void
 390 _evbuffer_incref_and_lock(struct evbuffer *buf)
 391 {
 392         EVBUFFER_LOCK(buf);
 393         ++buf->refcnt;
 394 }
 395 
 396 int
 397 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
 398 {
 399         EVBUFFER_LOCK(buffer);
 400         buffer->cb_queue = event_base_get_deferred_cb_queue(base);
 401         buffer->deferred_cbs = 1;
 402         event_deferred_cb_init(&buffer->deferred,
 403             evbuffer_deferred_callback, buffer);
 404         EVBUFFER_UNLOCK(buffer);
 405         return 0;
 406 }
 407 
 408 int
 409 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
 410 {
 411 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
 412         return -1;
 413 #else
 414         if (buf->lock)
 415                 return -1;
 416 
 417         if (!lock) {
 418                 EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
 419                 if (!lock)
 420                         return -1;
 421                 buf->lock = lock;
 422                 buf->own_lock = 1;
 423         } else {
 424                 buf->lock = lock;
 425                 buf->own_lock = 0;
 426         }
 427 
 428         return 0;
 429 #endif
 430 }
 431 
 432 void
 433 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
 434 {
 435         EVBUFFER_LOCK(buf);
 436         buf->parent = bev;
 437         EVBUFFER_UNLOCK(buf);
 438 }
 439 
 440 static void
 441 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
 442 {
 443         struct evbuffer_cb_entry *cbent, *next;
 444         struct evbuffer_cb_info info;
 445         size_t new_size;
 446         ev_uint32_t mask, masked_val;
 447         int clear = 1;
 448 
 449         if (running_deferred) {
 450                 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
 451                 masked_val = EVBUFFER_CB_ENABLED;
 452         } else if (buffer->deferred_cbs) {
 453                 mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
 454                 masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
 455                 /* Don't zero-out n_add/n_del, since the deferred callbacks
 456                    will want to see them. */
 457                 clear = 0;
 458         } else {
 459                 mask = EVBUFFER_CB_ENABLED;
 460                 masked_val = EVBUFFER_CB_ENABLED;
 461         }
 462 
 463         ASSERT_EVBUFFER_LOCKED(buffer);
 464 
 465         if (TAILQ_EMPTY(&buffer->callbacks)) {
 466                 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
 467                 return;
 468         }
 469         if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
 470                 return;
 471 
 472         new_size = buffer->total_len;
 473         info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
 474         info.n_added = buffer->n_add_for_cb;
 475         info.n_deleted = buffer->n_del_for_cb;
 476         if (clear) {
 477                 buffer->n_add_for_cb = 0;
 478                 buffer->n_del_for_cb = 0;
 479         }
 480         for (cbent = TAILQ_FIRST(&buffer->callbacks);
 481              cbent != TAILQ_END(&buffer->callbacks);
 482              cbent = next) {
 483                 /* Get the 'next' pointer now in case this callback decides
 484                  * to remove itself or something. */
 485                 next = TAILQ_NEXT(cbent, next);
 486 
 487                 if ((cbent->flags & mask) != masked_val)
 488                         continue;
 489 
 490                 if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
 491                         cbent->cb.cb_obsolete(buffer,
 492                             info.orig_size, new_size, cbent->cbarg);
 493                 else
 494                         cbent->cb.cb_func(buffer, &info, cbent->cbarg);
 495         }
 496 }
 497 
 498 void
 499 evbuffer_invoke_callbacks(struct evbuffer *buffer)
 500 {
 501         if (TAILQ_EMPTY(&buffer->callbacks)) {
 502                 buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
 503                 return;
 504         }
 505 
 506         if (buffer->deferred_cbs) {
 507                 if (buffer->deferred.queued)
 508                         return;
 509                 _evbuffer_incref_and_lock(buffer);
 510                 if (buffer->parent)
 511                         bufferevent_incref(buffer->parent);
 512                 EVBUFFER_UNLOCK(buffer);
 513                 event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
 514         }
 515 
 516         evbuffer_run_callbacks(buffer, 0);
 517 }
 518 
 519 static void
 520 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
 521 {
 522         struct bufferevent *parent = NULL;
 523         struct evbuffer *buffer = arg;
 524 
 525         /* XXXX It would be better to run these callbacks without holding the
 526          * lock */
 527         EVBUFFER_LOCK(buffer);
 528         parent = buffer->parent;
 529         evbuffer_run_callbacks(buffer, 1);
 530         _evbuffer_decref_and_unlock(buffer);
 531         if (parent)
 532                 bufferevent_decref(parent);
 533 }
 534 
 535 static void
 536 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
 537 {
 538         struct evbuffer_cb_entry *cbent;
 539 
 540         while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
 541             TAILQ_REMOVE(&buffer->callbacks, cbent, next);
 542             mm_free(cbent);
 543         }
 544 }
 545 
 546 void
 547 _evbuffer_decref_and_unlock(struct evbuffer *buffer)
 548 {
 549         struct evbuffer_chain *chain, *next;
 550         ASSERT_EVBUFFER_LOCKED(buffer);
 551 
 552         EVUTIL_ASSERT(buffer->refcnt > 0);
 553 
 554         if (--buffer->refcnt > 0) {
 555                 EVBUFFER_UNLOCK(buffer);
 556                 return;
 557         }
 558 
 559         for (chain = buffer->first; chain != NULL; chain = next) {
 560                 next = chain->next;
 561                 evbuffer_chain_free(chain);
 562         }
 563         evbuffer_remove_all_callbacks(buffer);
 564         if (buffer->deferred_cbs)
 565                 event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
 566 
 567         EVBUFFER_UNLOCK(buffer);
 568         if (buffer->own_lock)
 569                 EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
 570         mm_free(buffer);
 571 }
 572 
 573 void
 574 evbuffer_free(struct evbuffer *buffer)
 575 {
 576         EVBUFFER_LOCK(buffer);
 577         _evbuffer_decref_and_unlock(buffer);
 578 }
 579 
 580 void
 581 evbuffer_lock(struct evbuffer *buf)
 582 {
 583         EVBUFFER_LOCK(buf);
 584 }
 585 
 586 void
 587 evbuffer_unlock(struct evbuffer *buf)
 588 {
 589         EVBUFFER_UNLOCK(buf);
 590 }
 591 
 592 size_t
 593 evbuffer_get_length(const struct evbuffer *buffer)
 594 {
 595         size_t result;
 596 
 597         EVBUFFER_LOCK(buffer);
 598 
 599         result = (buffer->total_len);
 600 
 601         EVBUFFER_UNLOCK(buffer);
 602 
 603         return result;
 604 }
 605 
 606 size_t
 607 evbuffer_get_contiguous_space(const struct evbuffer *buf)
 608 {
 609         struct evbuffer_chain *chain;
 610         size_t result;
 611 
 612         EVBUFFER_LOCK(buf);
 613         chain = buf->first;
 614         result = (chain != NULL ? chain->off : 0);
 615         EVBUFFER_UNLOCK(buf);
 616 
 617         return result;
 618 }
 619 
 620 int
 621 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
 622     struct evbuffer_iovec *vec, int n_vecs)
 623 {
 624         struct evbuffer_chain *chain, **chainp;
 625         int n = -1;
 626 
 627         EVBUFFER_LOCK(buf);
 628         if (buf->freeze_end)
 629                 goto done;
 630         if (n_vecs < 1)
 631                 goto done;
 632         if (n_vecs == 1) {
 633                 if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
 634                         goto done;
 635 
 636                 vec[0].iov_base = CHAIN_SPACE_PTR(chain);
 637                 vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
 638                 EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
 639                 n = 1;
 640         } else {
 641                 if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
 642                         goto done;
 643                 n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
 644                                 &chainp, 0);
 645         }
 646 
 647 done:
 648         EVBUFFER_UNLOCK(buf);
 649         return n;
 650 
 651 }
 652 
 653 static int
 654 advance_last_with_data(struct evbuffer *buf)
 655 {
 656         int n = 0;
 657         ASSERT_EVBUFFER_LOCKED(buf);
 658 
 659         if (!*buf->last_with_datap)
 660                 return 0;
 661 
 662         while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
 663                 buf->last_with_datap = &(*buf->last_with_datap)->next;
 664                 ++n;
 665         }
 666         return n;
 667 }
 668 
 669 int
 670 evbuffer_commit_space(struct evbuffer *buf,
 671     struct evbuffer_iovec *vec, int n_vecs)
 672 {
 673         struct evbuffer_chain *chain, **firstchainp, **chainp;
 674         int result = -1;
 675         size_t added = 0;
 676         int i;
 677 
 678         EVBUFFER_LOCK(buf);
 679 
 680         if (buf->freeze_end)
 681                 goto done;
 682         if (n_vecs == 0) {
 683                 result = 0;
 684                 goto done;
 685         } else if (n_vecs == 1 &&
 686             (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
 687                 /* The user only got or used one chain; it might not
 688                  * be the first one with space in it. */
 689                 if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
 690                         goto done;
 691                 buf->last->off += vec[0].iov_len;
 692                 added = vec[0].iov_len;
 693                 if (added)
 694                         advance_last_with_data(buf);
 695                 goto okay;
 696         }
 697 
 698         /* Advance 'firstchain' to the first chain with space in it. */
 699         firstchainp = buf->last_with_datap;
 700         if (!*firstchainp)
 701                 goto done;
 702         if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
 703                 firstchainp = &(*firstchainp)->next;
 704         }
 705 
 706         chain = *firstchainp;
 707         /* pass 1: make sure that the pointers and lengths of vecs[] are in
 708          * bounds before we try to commit anything. */
 709         for (i=0; i<n_vecs; ++i) {
 710                 if (!chain)
 711                         goto done;
 712                 if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
 713                     (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
 714                         goto done;
 715                 chain = chain->next;
 716         }
 717         /* pass 2: actually adjust all the chains. */
 718         chainp = firstchainp;
 719         for (i=0; i<n_vecs; ++i) {
 720                 (*chainp)->off += vec[i].iov_len;
 721                 added += vec[i].iov_len;
 722                 if (vec[i].iov_len) {
 723                         buf->last_with_datap = chainp;
 724                 }
 725                 chainp = &(*chainp)->next;
 726         }
 727 
 728 okay:
 729         buf->total_len += added;
 730         buf->n_add_for_cb += added;
 731         result = 0;
 732         evbuffer_invoke_callbacks(buf);
 733 
 734 done:
 735         EVBUFFER_UNLOCK(buf);
 736         return result;
 737 }
 738 
 739 static inline int
 740 HAS_PINNED_R(struct evbuffer *buf)
 741 {
 742         return (buf->last && CHAIN_PINNED_R(buf->last));
 743 }
 744 
 745 static inline void
 746 ZERO_CHAIN(struct evbuffer *dst)
 747 {
 748         ASSERT_EVBUFFER_LOCKED(dst);
 749         dst->first = NULL;
 750         dst->last = NULL;
 751         dst->last_with_datap = &(dst)->first;
 752         dst->total_len = 0;
 753 }
 754 
 755 /* Prepares the contents of src to be moved to another buffer by removing
 756  * read-pinned chains. The first pinned chain is saved in first, and the
 757  * last in last. If src has no read-pinned chains, first and last are set
 758  * to NULL. */
 759 static int
 760 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
 761                 struct evbuffer_chain **last)
 762 {
 763         struct evbuffer_chain *chain, **pinned;
 764 
 765         ASSERT_EVBUFFER_LOCKED(src);
 766 
 767         if (!HAS_PINNED_R(src)) {
 768                 *first = *last = NULL;
 769                 return 0;
 770         }
 771 
 772         pinned = src->last_with_datap;
 773         if (!CHAIN_PINNED_R(*pinned))
 774                 pinned = &(*pinned)->next;
 775         EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
 776         chain = *first = *pinned;
 777         *last = src->last;
 778 
 779         /* If there's data in the first pinned chain, we need to allocate
 780          * a new chain and copy the data over. */
 781         if (chain->off) {
 782                 struct evbuffer_chain *tmp;
 783 
 784                 EVUTIL_ASSERT(pinned == src->last_with_datap);
 785                 tmp = evbuffer_chain_new(chain->off);
 786                 if (!tmp)
 787                         return -1;
 788                 memcpy(tmp->buffer, chain->buffer + chain->misalign,
 789                         chain->off);
 790                 tmp->off = chain->off;
 791                 *src->last_with_datap = tmp;
 792                 src->last = tmp;
 793                 chain->misalign += chain->off;
 794                 chain->off = 0;
 795         } else {
 796                 src->last = *src->last_with_datap;
 797                 *pinned = NULL;
 798         }
 799 
 800         return 0;
 801 }
 802 
 803 static inline void
 804 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
 805                 struct evbuffer_chain *last)
 806 {
 807         ASSERT_EVBUFFER_LOCKED(src);
 808 
 809         if (!pinned) {
 810                 ZERO_CHAIN(src);
 811                 return;
 812         }
 813 
 814         src->first = pinned;
 815         src->last = last;
 816         src->last_with_datap = &src->first;
 817         src->total_len = 0;
 818 }
 819 
 820 static inline void
 821 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
 822 {
 823         ASSERT_EVBUFFER_LOCKED(dst);
 824         ASSERT_EVBUFFER_LOCKED(src);
 825         dst->first = src->first;
 826         if (src->last_with_datap == &src->first)
 827                 dst->last_with_datap = &dst->first;
 828         else
 829                 dst->last_with_datap = src->last_with_datap;
 830         dst->last = src->last;
 831         dst->total_len = src->total_len;
 832 }
 833 
 834 static void
 835 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
 836 {
 837         ASSERT_EVBUFFER_LOCKED(dst);
 838         ASSERT_EVBUFFER_LOCKED(src);
 839         dst->last->next = src->first;
 840         if (src->last_with_datap == &src->first)
 841                 dst->last_with_datap = &dst->last->next;
 842         else
 843                 dst->last_with_datap = src->last_with_datap;
 844         dst->last = src->last;
 845         dst->total_len += src->total_len;
 846 }
 847 
 848 static void
 849 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
 850 {
 851         ASSERT_EVBUFFER_LOCKED(dst);
 852         ASSERT_EVBUFFER_LOCKED(src);
 853         src->last->next = dst->first;
 854         dst->first = src->first;
 855         dst->total_len += src->total_len;
 856         if (*dst->last_with_datap == NULL) {
 857                 if (src->last_with_datap == &(src)->first)
 858                         dst->last_with_datap = &dst->first;
 859                 else
 860                         dst->last_with_datap = src->last_with_datap;
 861         } else if (dst->last_with_datap == &dst->first) {
 862                 dst->last_with_datap = &src->last->next;
 863         }
 864 }
 865 
 866 int
 867 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
 868 {
 869         struct evbuffer_chain *pinned, *last;
 870         size_t in_total_len, out_total_len;
 871         int result = 0;
 872 
 873         EVBUFFER_LOCK2(inbuf, outbuf);
 874         in_total_len = inbuf->total_len;
 875         out_total_len = outbuf->total_len;
 876 
 877         if (in_total_len == 0 || outbuf == inbuf)
 878                 goto done;
 879 
 880         if (outbuf->freeze_end || inbuf->freeze_start) {
 881                 result = -1;
 882                 goto done;
 883         }
 884 
 885         if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
 886                 result = -1;
 887                 goto done;
 888         }
 889 
 890         if (out_total_len == 0) {
 891                 /* There might be an empty chain at the start of outbuf; free
 892                  * it. */
 893                 evbuffer_free_all_chains(outbuf->first);
 894                 COPY_CHAIN(outbuf, inbuf);
 895         } else {
 896                 APPEND_CHAIN(outbuf, inbuf);
 897         }
 898 
 899         RESTORE_PINNED(inbuf, pinned, last);
 900 
 901         inbuf->n_del_for_cb += in_total_len;
 902         outbuf->n_add_for_cb += in_total_len;
 903 
 904         evbuffer_invoke_callbacks(inbuf);
 905         evbuffer_invoke_callbacks(outbuf);
 906 
 907 done:
 908         EVBUFFER_UNLOCK2(inbuf, outbuf);
 909         return result;
 910 }
 911 
 912 int
 913 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
 914 {
 915         struct evbuffer_chain *pinned, *last;
 916         size_t in_total_len, out_total_len;
 917         int result = 0;
 918 
 919         EVBUFFER_LOCK2(inbuf, outbuf);
 920 
 921         in_total_len = inbuf->total_len;
 922         out_total_len = outbuf->total_len;
 923 
 924         if (!in_total_len || inbuf == outbuf)
 925                 goto done;
 926 
 927         if (outbuf->freeze_start || inbuf->freeze_start) {
 928                 result = -1;
 929                 goto done;
 930         }
 931 
 932         if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
 933                 result = -1;
 934                 goto done;
 935         }
 936 
 937         if (out_total_len == 0) {
 938                 /* There might be an empty chain at the start of outbuf; free
 939                  * it. */
 940                 evbuffer_free_all_chains(outbuf->first);
 941                 COPY_CHAIN(outbuf, inbuf);
 942         } else {
 943                 PREPEND_CHAIN(outbuf, inbuf);
 944         }
 945 
 946         RESTORE_PINNED(inbuf, pinned, last);
 947 
 948         inbuf->n_del_for_cb += in_total_len;
 949         outbuf->n_add_for_cb += in_total_len;
 950 
 951         evbuffer_invoke_callbacks(inbuf);
 952         evbuffer_invoke_callbacks(outbuf);
 953 done:
 954         EVBUFFER_UNLOCK2(inbuf, outbuf);
 955         return result;
 956 }
 957 
 958 int
 959 evbuffer_drain(struct evbuffer *buf, size_t len)
 960 {
 961         struct evbuffer_chain *chain, *next;
 962         size_t remaining, old_len;
 963         int result = 0;
 964 
 965         EVBUFFER_LOCK(buf);
 966         old_len = buf->total_len;
 967 
 968         if (old_len == 0)
 969                 goto done;
 970 
 971         if (buf->freeze_start) {
 972                 result = -1;
 973                 goto done;
 974         }
 975 
 976         if (len >= old_len && !HAS_PINNED_R(buf)) {
 977                 len = old_len;
 978                 for (chain = buf->first; chain != NULL; chain = next) {
 979                         next = chain->next;
 980                         evbuffer_chain_free(chain);
 981                 }
 982 
 983                 ZERO_CHAIN(buf);
 984         } else {
 985                 if (len >= old_len)
 986                         len = old_len;
 987 
 988                 buf->total_len -= len;
 989                 remaining = len;
 990                 for (chain = buf->first;
 991                      remaining >= chain->off;
 992                      chain = next) {
 993                         next = chain->next;
 994                         remaining -= chain->off;
 995 
 996                         if (chain == *buf->last_with_datap) {
 997                                 buf->last_with_datap = &buf->first;
 998                         }
 999                         if (&chain->next == buf->last_with_datap)
1000                                 buf->last_with_datap = &buf->first;
1001 
1002                         if (CHAIN_PINNED_R(chain)) {
1003                                 EVUTIL_ASSERT(remaining == 0);
1004                                 chain->misalign += chain->off;
1005                                 chain->off = 0;
1006                                 break;
1007                         } else
1008                                 evbuffer_chain_free(chain);
1009                 }
1010 
1011                 buf->first = chain;
1012                 if (chain) {
1013                         EVUTIL_ASSERT(remaining <= chain->off);
1014                         chain->misalign += remaining;
1015                         chain->off -= remaining;
1016                 }
1017         }
1018 
1019         buf->n_del_for_cb += len;
1020         /* Tell someone about changes in this buffer */
1021         evbuffer_invoke_callbacks(buf);
1022 
1023 done:
1024         EVBUFFER_UNLOCK(buf);
1025         return result;
1026 }
1027 
1028 /* Reads data from an event buffer and drains the bytes read */
1029 int
1030 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
1031 {
1032         ev_ssize_t n;
1033         EVBUFFER_LOCK(buf);
1034         n = evbuffer_copyout(buf, data_out, datlen);
1035         if (n > 0) {
1036                 if (evbuffer_drain(buf, n)<0)
1037                         n = -1;
1038         }
1039         EVBUFFER_UNLOCK(buf);
1040         return (int)n;
1041 }
1042 
1043 ev_ssize_t
1044 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
1045 {
1046         /*XXX fails badly on sendfile case. */
1047         struct evbuffer_chain *chain;
1048         char *data = data_out;
1049         size_t nread;
1050         ev_ssize_t result = 0;
1051 
1052         EVBUFFER_LOCK(buf);
1053 
1054         chain = buf->first;
1055 
1056         if (datlen >= buf->total_len)
1057                 datlen = buf->total_len;
1058 
1059         if (datlen == 0)
1060                 goto done;
1061 
1062         if (buf->freeze_start) {
1063                 result = -1;
1064                 goto done;
1065         }
1066 
1067         nread = datlen;
1068 
1069         while (datlen && datlen >= chain->off) {
1070                 memcpy(data, chain->buffer + chain->misalign, chain->off);
1071                 data += chain->off;
1072                 datlen -= chain->off;
1073 
1074                 chain = chain->next;
1075                 EVUTIL_ASSERT(chain || datlen==0);
1076         }
1077 
1078         if (datlen) {
1079                 EVUTIL_ASSERT(chain);
1080                 EVUTIL_ASSERT(datlen <= chain->off);
1081                 memcpy(data, chain->buffer + chain->misalign, datlen);
1082         }
1083 
1084         result = nread;
1085 done:
1086         EVBUFFER_UNLOCK(buf);
1087         return result;
1088 }
1089 
1090 /* reads data from the src buffer to the dst buffer, avoids memcpy as
1091  * possible. */
1092 /*  XXXX should return ev_ssize_t */
1093 int
1094 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
1095     size_t datlen)
1096 {
1097         /*XXX We should have an option to force this to be zero-copy.*/
1098 
1099         /*XXX can fail badly on sendfile case. */
1100         struct evbuffer_chain *chain, *previous;
1101         size_t nread = 0;
1102         int result;
1103 
1104         EVBUFFER_LOCK2(src, dst);
1105 
1106         chain = previous = src->first;
1107 
1108         if (datlen == 0 || dst == src) {
1109                 result = 0;
1110                 goto done;
1111         }
1112 
1113         if (dst->freeze_end || src->freeze_start) {
1114                 result = -1;
1115                 goto done;
1116         }
1117 
1118         /* short-cut if there is no more data buffered */
1119         if (datlen >= src->total_len) {
1120                 datlen = src->total_len;
1121                 evbuffer_add_buffer(dst, src);
1122                 result = (int)datlen; /*XXXX should return ev_ssize_t*/
1123                 goto done;
1124         }
1125 
1126         /* removes chains if possible */
1127         while (chain->off <= datlen) {
1128                 /* We can't remove the last with data from src unless we
1129                  * remove all chains, in which case we would have done the if
1130                  * block above */
1131                 EVUTIL_ASSERT(chain != *src->last_with_datap);
1132                 nread += chain->off;
1133                 datlen -= chain->off;
1134                 previous = chain;
1135                 if (src->last_with_datap == &chain->next)
1136                         src->last_with_datap = &src->first;
1137                 chain = chain->next;
1138         }
1139 
1140         if (nread) {
1141                 /* we can remove the chain */
1142                 struct evbuffer_chain **chp;
1143                 chp = evbuffer_free_trailing_empty_chains(dst);
1144 
1145                 if (dst->first == NULL) {
1146                         dst->first = src->first;
1147                 } else {
1148                         *chp = src->first;
1149                 }
1150                 dst->last = previous;
1151                 previous->next = NULL;
1152                 src->first = chain;
1153                 advance_last_with_data(dst);
1154 
1155                 dst->total_len += nread;
1156                 dst->n_add_for_cb += nread;
1157         }
1158 
1159         /* we know that there is more data in the src buffer than
1160          * we want to read, so we manually drain the chain */
1161         evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
1162         chain->misalign += datlen;
1163         chain->off -= datlen;
1164         nread += datlen;
1165 
1166         /* You might think we would want to increment dst->n_add_for_cb
1167          * here too.  But evbuffer_add above already took care of that.
1168          */
1169         src->total_len -= nread;
1170         src->n_del_for_cb += nread;
1171 
1172         if (nread) {
1173                 evbuffer_invoke_callbacks(dst);
1174                 evbuffer_invoke_callbacks(src);
1175         }
1176         result = (int)nread;/*XXXX should change return type */
1177 
1178 done:
1179         EVBUFFER_UNLOCK2(src, dst);
1180         return result;
1181 }
1182 
1183 unsigned char *
1184 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
1185 {
1186         struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
1187         unsigned char *buffer, *result = NULL;
1188         ev_ssize_t remaining;
1189         int removed_last_with_data = 0;
1190         int removed_last_with_datap = 0;
1191 
1192         EVBUFFER_LOCK(buf);
1193 
1194         chain = buf->first;
1195 
1196         if (size < 0)
1197                 size = buf->total_len;
1198         /* if size > buf->total_len, we cannot guarantee to the user that she
1199          * is going to have a long enough buffer afterwards; so we return
1200          * NULL */
1201         if (size == 0 || (size_t)size > buf->total_len)
1202                 goto done;
1203 
1204         /* No need to pull up anything; the first size bytes are
1205          * already here. */
1206         if (chain->off >= (size_t)size) {
1207                 result = chain->buffer + chain->misalign;
1208                 goto done;
1209         }
1210 
1211         /* Make sure that none of the chains we need to copy from is pinned. */
1212         remaining = size - chain->off;
1213         EVUTIL_ASSERT(remaining >= 0);
1214         for (tmp=chain->next; tmp; tmp=tmp->next) {
1215                 if (CHAIN_PINNED(tmp))
1216                         goto done;
1217                 if (tmp->off >= (size_t)remaining)
1218                         break;
1219                 remaining -= tmp->off;
1220         }
1221 
1222         if (CHAIN_PINNED(chain)) {
1223                 size_t old_off = chain->off;
1224                 if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
1225                         /* not enough room at end of chunk. */
1226                         goto done;
1227                 }
1228                 buffer = CHAIN_SPACE_PTR(chain);
1229                 tmp = chain;
1230                 tmp->off = size;
1231                 size -= old_off;
1232                 chain = chain->next;
1233         } else if (chain->buffer_len - chain->misalign >= (size_t)size) {
1234                 /* already have enough space in the first chain */
1235                 size_t old_off = chain->off;
1236                 buffer = chain->buffer + chain->misalign + chain->off;
1237                 tmp = chain;
1238                 tmp->off = size;
1239                 size -= old_off;
1240                 chain = chain->next;
1241         } else {
1242                 if ((tmp = evbuffer_chain_new(size)) == NULL) {
1243                         event_warn("%s: out of memory", __func__);
1244                         goto done;
1245                 }
1246                 buffer = tmp->buffer;
1247                 tmp->off = size;
1248                 buf->first = tmp;
1249         }
1250 
1251         /* TODO(niels): deal with buffers that point to NULL like sendfile */
1252 
1253         /* Copy and free every chunk that will be entirely pulled into tmp */
1254         last_with_data = *buf->last_with_datap;
1255         for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
1256                 next = chain->next;
1257 
1258                 memcpy(buffer, chain->buffer + chain->misalign, chain->off);
1259                 size -= chain->off;
1260                 buffer += chain->off;
1261                 if (chain == last_with_data)
1262                         removed_last_with_data = 1;
1263                 if (&chain->next == buf->last_with_datap)
1264                         removed_last_with_datap = 1;
1265 
1266                 evbuffer_chain_free(chain);
1267         }
1268 
1269         if (chain != NULL) {
1270                 memcpy(buffer, chain->buffer + chain->misalign, size);
1271                 chain->misalign += size;
1272                 chain->off -= size;
1273         } else {
1274                 buf->last = tmp;
1275         }
1276 
1277         tmp->next = chain;
1278 
1279         if (removed_last_with_data) {
1280                 buf->last_with_datap = &buf->first;
1281         } else if (removed_last_with_datap) {
1282                 if (buf->first->next && buf->first->next->off)
1283                         buf->last_with_datap = &buf->first->next;
1284                 else
1285                         buf->last_with_datap = &buf->first;
1286         }
1287 
1288         result = (tmp->buffer + tmp->misalign);
1289 
1290 done:
1291         EVBUFFER_UNLOCK(buf);
1292         return result;
1293 }
1294 
1295 /*
1296  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
1297  * The returned buffer needs to be freed by the called.
1298  */
1299 char *
1300 evbuffer_readline(struct evbuffer *buffer)
1301 {
1302         return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
1303 }
1304 
1305 static inline ev_ssize_t
1306 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
1307 {
1308         struct evbuffer_chain *chain = it->_internal.chain;
1309         size_t i = it->_internal.pos_in_chain;
1310         while (chain != NULL) {
1311                 char *buffer = (char *)chain->buffer + chain->misalign;
1312                 char *cp = memchr(buffer+i, chr, chain->off-i);
1313                 if (cp) {
1314                         it->_internal.chain = chain;
1315                         it->_internal.pos_in_chain = cp - buffer;
1316                         it->pos += (cp - buffer - i);
1317                         return it->pos;
1318                 }
1319                 it->pos += chain->off - i;
1320                 i = 0;
1321                 chain = chain->next;
1322         }
1323 
1324         return (-1);
1325 }
1326 
1327 static inline char *
1328 find_eol_char(char *s, size_t len)
1329 {
1330 #define CHUNK_SZ 128
1331         /* Lots of benchmarking found this approach to be faster in practice
1332          * than doing two memchrs over the whole buffer, doin a memchr on each
1333          * char of the buffer, or trying to emulate memchr by hand. */
1334         char *s_end, *cr, *lf;
1335         s_end = s+len;
1336         while (s < s_end) {
1337                 size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
1338                 cr = memchr(s, '\r', chunk);
1339                 lf = memchr(s, '\n', chunk);
1340                 if (cr) {
1341                         if (lf && lf < cr)
1342                                 return lf;
1343                         return cr;
1344                 } else if (lf) {
1345                         return lf;
1346                 }
1347                 s += CHUNK_SZ;
1348         }
1349 
1350         return NULL;
1351 #undef CHUNK_SZ
1352 }
1353 
1354 static ev_ssize_t
1355 evbuffer_find_eol_char(struct evbuffer_ptr *it)
1356 {
1357         struct evbuffer_chain *chain = it->_internal.chain;
1358         size_t i = it->_internal.pos_in_chain;
1359         while (chain != NULL) {
1360                 char *buffer = (char *)chain->buffer + chain->misalign;
1361                 char *cp = find_eol_char(buffer+i, chain->off-i);
1362                 if (cp) {
1363                         it->_internal.chain = chain;
1364                         it->_internal.pos_in_chain = cp - buffer;
1365                         it->pos += (cp - buffer) - i;
1366                         return it->pos;
1367                 }
1368                 it->pos += chain->off - i;
1369                 i = 0;
1370                 chain = chain->next;
1371         }
1372 
1373         return (-1);
1374 }
1375 
1376 static inline int
1377 evbuffer_strspn(
1378         struct evbuffer_ptr *ptr, const char *chrset)
1379 {
1380         int count = 0;
1381         struct evbuffer_chain *chain = ptr->_internal.chain;
1382         size_t i = ptr->_internal.pos_in_chain;
1383 
1384         if (!chain)
1385                 return -1;
1386 
1387         while (1) {
1388                 char *buffer = (char *)chain->buffer + chain->misalign;
1389                 for (; i < chain->off; ++i) {
1390                         const char *p = chrset;
1391                         while (*p) {
1392                                 if (buffer[i] == *p++)
1393                                         goto next;
1394                         }
1395                         ptr->_internal.chain = chain;
1396                         ptr->_internal.pos_in_chain = i;
1397                         ptr->pos += count;
1398                         return count;
1399                 next:
1400                         ++count;
1401                 }
1402                 i = 0;
1403 
1404                 if (! chain->next) {
1405                         ptr->_internal.chain = chain;
1406                         ptr->_internal.pos_in_chain = i;
1407                         ptr->pos += count;
1408                         return count;
1409                 }
1410 
1411                 chain = chain->next;
1412         }
1413 }
1414 
1415 
1416 static inline char
1417 evbuffer_getchr(struct evbuffer_ptr *it)
1418 {
1419         struct evbuffer_chain *chain = it->_internal.chain;
1420         size_t off = it->_internal.pos_in_chain;
1421 
1422         return chain->buffer[chain->misalign + off];
1423 }
1424 
1425 struct evbuffer_ptr
1426 evbuffer_search_eol(struct evbuffer *buffer,
1427     struct evbuffer_ptr *start, size_t *eol_len_out,
1428     enum evbuffer_eol_style eol_style)
1429 {
1430         struct evbuffer_ptr it, it2;
1431         size_t extra_drain = 0;
1432         int ok = 0;
1433 
1434         EVBUFFER_LOCK(buffer);
1435 
1436         if (start) {
1437                 memcpy(&it, start, sizeof(it));
1438         } else {
1439                 it.pos = 0;
1440                 it._internal.chain = buffer->first;
1441                 it._internal.pos_in_chain = 0;
1442         }
1443 
1444         /* the eol_style determines our first stop character and how many
1445          * characters we are going to drain afterwards. */
1446         switch (eol_style) {
1447         case EVBUFFER_EOL_ANY:
1448                 if (evbuffer_find_eol_char(&it) < 0)
1449                         goto done;
1450                 memcpy(&it2, &it, sizeof(it));
1451                 extra_drain = evbuffer_strspn(&it2, "\r\n");
1452                 break;
1453         case EVBUFFER_EOL_CRLF_STRICT: {
1454                 it = evbuffer_search(buffer, "\r\n", 2, &it);
1455                 if (it.pos < 0)
1456                         goto done;
1457                 extra_drain = 2;
1458                 break;
1459         }
1460         case EVBUFFER_EOL_CRLF:
1461                 while (1) {
1462                         if (evbuffer_find_eol_char(&it) < 0)
1463                                 goto done;
1464                         if (evbuffer_getchr(&it) == '\n') {
1465                                 extra_drain = 1;
1466                                 break;
1467                         } else if (!evbuffer_ptr_memcmp(
1468                                     buffer, &it, "\r\n", 2)) {
1469                                 extra_drain = 2;
1470                                 break;
1471                         } else {
1472                                 if (evbuffer_ptr_set(buffer, &it, 1,
1473                                         EVBUFFER_PTR_ADD)<0)
1474                                         goto done;
1475                         }
1476                 }
1477                 break;
1478         case EVBUFFER_EOL_LF:
1479                 if (evbuffer_strchr(&it, '\n') < 0)
1480                         goto done;
1481                 extra_drain = 1;
1482                 break;
1483         default:
1484                 goto done;
1485         }
1486 
1487         ok = 1;
1488 done:
1489         EVBUFFER_UNLOCK(buffer);
1490 
1491         if (!ok) {
1492                 it.pos = -1;
1493         }
1494         if (eol_len_out)
1495                 *eol_len_out = extra_drain;
1496 
1497         return it;
1498 }
1499 
1500 char *
1501 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
1502                 enum evbuffer_eol_style eol_style)
1503 {
1504         struct evbuffer_ptr it;
1505         char *line;
1506         size_t n_to_copy=0, extra_drain=0;
1507         char *result = NULL;
1508 
1509         EVBUFFER_LOCK(buffer);
1510 
1511         if (buffer->freeze_start) {
1512                 goto done;
1513         }
1514 
1515         it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
1516         if (it.pos < 0)
1517                 goto done;
1518         n_to_copy = it.pos;
1519 
1520         if ((line = mm_malloc(n_to_copy+1)) == NULL) {
1521                 event_warn("%s: out of memory", __func__);
1522                 goto done;
1523         }
1524 
1525         evbuffer_remove(buffer, line, n_to_copy);
1526         line[n_to_copy] = '\0';
1527 
1528         evbuffer_drain(buffer, extra_drain);
1529         result = line;
1530 done:
1531         EVBUFFER_UNLOCK(buffer);
1532 
1533         if (n_read_out)
1534                 *n_read_out = result ? n_to_copy : 0;
1535 
1536         return result;
1537 }
1538 
1539 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
1540 
1541 /* Adds data to an event buffer */
1542 
1543 int
1544 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
1545 {
1546         struct evbuffer_chain *chain, *tmp;
1547         const unsigned char *data = data_in;
1548         size_t remain, to_alloc;
1549         int result = -1;
1550 
1551         EVBUFFER_LOCK(buf);
1552 
1553         if (buf->freeze_end) {
1554                 goto done;
1555         }
1556         /* Prevent buf->total_len overflow */
1557         if (datlen > EV_SIZE_MAX - buf->total_len) {
1558                 goto done;
1559         }
1560 
1561         chain = buf->last;
1562 
1563         /* If there are no chains allocated for this buffer, allocate one
1564          * big enough to hold all the data. */
1565         if (chain == NULL) {
1566                 chain = evbuffer_chain_new(datlen);
1567                 if (!chain)
1568                         goto done;
1569                 evbuffer_chain_insert(buf, chain);
1570         }
1571 
1572         if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1573                 /* Always true for mutable buffers */
1574                 EVUTIL_ASSERT(chain->misalign >= 0 &&
1575                     (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1576                 remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
1577                 if (remain >= datlen) {
1578                         /* there's enough space to hold all the data in the
1579                          * current last chain */
1580                         memcpy(chain->buffer + chain->misalign + chain->off,
1581                             data, datlen);
1582                         chain->off += datlen;
1583                         buf->total_len += datlen;
1584                         buf->n_add_for_cb += datlen;
1585                         goto out;
1586                 } else if (!CHAIN_PINNED(chain) &&
1587                     evbuffer_chain_should_realign(chain, datlen)) {
1588                         /* we can fit the data into the misalignment */
1589                         evbuffer_chain_align(chain);
1590 
1591                         memcpy(chain->buffer + chain->off, data, datlen);
1592                         chain->off += datlen;
1593                         buf->total_len += datlen;
1594                         buf->n_add_for_cb += datlen;
1595                         goto out;
1596                 }
1597         } else {
1598                 /* we cannot write any data to the last chain */
1599                 remain = 0;
1600         }
1601 
1602         /* we need to add another chain */
1603         to_alloc = chain->buffer_len;
1604         if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
1605                 to_alloc <<= 1;
1606         if (datlen > to_alloc)
1607                 to_alloc = datlen;
1608         tmp = evbuffer_chain_new(to_alloc);
1609         if (tmp == NULL)
1610                 goto done;
1611 
1612         if (remain) {
1613                 memcpy(chain->buffer + chain->misalign + chain->off,
1614                     data, remain);
1615                 chain->off += remain;
1616                 buf->total_len += remain;
1617                 buf->n_add_for_cb += remain;
1618         }
1619 
1620         data += remain;
1621         datlen -= remain;
1622 
1623         memcpy(tmp->buffer, data, datlen);
1624         tmp->off = datlen;
1625         evbuffer_chain_insert(buf, tmp);
1626         buf->n_add_for_cb += datlen;
1627 
1628 out:
1629         evbuffer_invoke_callbacks(buf);
1630         result = 0;
1631 done:
1632         EVBUFFER_UNLOCK(buf);
1633         return result;
1634 }
1635 
1636 int
1637 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
1638 {
1639         struct evbuffer_chain *chain, *tmp;
1640         int result = -1;
1641 
1642         EVBUFFER_LOCK(buf);
1643 
1644         if (buf->freeze_start) {
1645                 goto done;
1646         }
1647         if (datlen > EV_SIZE_MAX - buf->total_len) {
1648                 goto done;
1649         }
1650 
1651         chain = buf->first;
1652 
1653         if (chain == NULL) {
1654                 chain = evbuffer_chain_new(datlen);
1655                 if (!chain)
1656                         goto done;
1657                 evbuffer_chain_insert(buf, chain);
1658         }
1659 
1660         /* we cannot touch immutable buffers */
1661         if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
1662                 /* Always true for mutable buffers */
1663                 EVUTIL_ASSERT(chain->misalign >= 0 &&
1664                     (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
1665 
1666                 /* If this chain is empty, we can treat it as
1667                  * 'empty at the beginning' rather than 'empty at the end' */
1668                 if (chain->off == 0)
1669                         chain->misalign = chain->buffer_len;
1670 
1671                 if ((size_t)chain->misalign >= datlen) {
1672                         /* we have enough space to fit everything */
1673                         memcpy(chain->buffer + chain->misalign - datlen,
1674                             data, datlen);
1675                         chain->off += datlen;
1676                         chain->misalign -= datlen;
1677                         buf->total_len += datlen;
1678                         buf->n_add_for_cb += datlen;
1679                         goto out;
1680                 } else if (chain->misalign) {
1681                         /* we can only fit some of the data. */
1682                         memcpy(chain->buffer,
1683                             (char*)data + datlen - chain->misalign,
1684                             (size_t)chain->misalign);
1685                         chain->off += (size_t)chain->misalign;
1686                         buf->total_len += (size_t)chain->misalign;
1687                         buf->n_add_for_cb += (size_t)chain->misalign;
1688                         datlen -= (size_t)chain->misalign;
1689                         chain->misalign = 0;
1690                 }
1691         }
1692 
1693         /* we need to add another chain */
1694         if ((tmp = evbuffer_chain_new(datlen)) == NULL)
1695                 goto done;
1696         buf->first = tmp;
1697         if (buf->last_with_datap == &buf->first)
1698                 buf->last_with_datap = &tmp->next;
1699 
1700         tmp->next = chain;
1701 
1702         tmp->off = datlen;
1703         EVUTIL_ASSERT(datlen <= tmp->buffer_len);
1704         tmp->misalign = tmp->buffer_len - datlen;
1705 
1706         memcpy(tmp->buffer + tmp->misalign, data, datlen);
1707         buf->total_len += datlen;
1708         buf->n_add_for_cb += (size_t)chain->misalign;
1709 
1710 out:
1711         evbuffer_invoke_callbacks(buf);
1712         result = 0;
1713 done:
1714         EVBUFFER_UNLOCK(buf);
1715         return result;
1716 }
1717 
1718 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
1719 static void
1720 evbuffer_chain_align(struct evbuffer_chain *chain)
1721 {
1722         EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
1723         EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
1724         memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
1725         chain->misalign = 0;
1726 }
1727 
1728 #define MAX_TO_COPY_IN_EXPAND 4096
1729 #define MAX_TO_REALIGN_IN_EXPAND 2048
1730 
1731 /** Helper: return true iff we should realign chain to fit datalen bytes of
1732     data in it. */
1733 static int
1734 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
1735     size_t datlen)
1736 {
1737         return chain->buffer_len - chain->off >= datlen &&
1738             (chain->off < chain->buffer_len / 2) &&
1739             (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
1740 }
1741 
1742 /* Expands the available space in the event buffer to at least datlen, all in
1743  * a single chunk.  Return that chunk. */
1744 static struct evbuffer_chain *
1745 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
1746 {
1747         struct evbuffer_chain *chain, **chainp;
1748         struct evbuffer_chain *result = NULL;
1749         ASSERT_EVBUFFER_LOCKED(buf);
1750 
1751         chainp = buf->last_with_datap;
1752 
1753         /* XXX If *chainp is no longer writeable, but has enough space in its
1754          * misalign, this might be a bad idea: we could still use *chainp, not
1755          * (*chainp)->next. */
1756         if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
1757                 chainp = &(*chainp)->next;
1758 
1759         /* 'chain' now points to the first chain with writable space (if any)
1760          * We will either use it, realign it, replace it, or resize it. */
1761         chain = *chainp;
1762 
1763         if (chain == NULL ||
1764             (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
1765                 /* We can't use the last_with_data chain at all.  Just add a
1766                  * new one that's big enough. */
1767                 goto insert_new;
1768         }
1769 
1770         /* If we can fit all the data, then we don't have to do anything */
1771         if (CHAIN_SPACE_LEN(chain) >= datlen) {
1772                 result = chain;
1773                 goto ok;
1774         }
1775 
1776         /* If the chain is completely empty, just replace it by adding a new
1777          * empty chain. */
1778         if (chain->off == 0) {
1779                 goto insert_new;
1780         }
1781 
1782         /* If the misalignment plus the remaining space fulfills our data
1783          * needs, we could just force an alignment to happen.  Afterwards, we
1784          * have enough space.  But only do this if we're saving a lot of space
1785          * and not moving too much data.  Otherwise the space savings are
1786          * probably offset by the time lost in copying.
1787          */
1788         if (evbuffer_chain_should_realign(chain, datlen)) {
1789                 evbuffer_chain_align(chain);
1790                 result = chain;
1791                 goto ok;
1792         }
1793 
1794         /* At this point, we can either resize the last chunk with space in
1795          * it, use the next chunk after it, or   If we add a new chunk, we waste
1796          * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
1797          * resize, we have to copy chain->off bytes.
1798          */
1799 
1800         /* Would expanding this chunk be affordable and worthwhile? */
1801         if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
1802             chain->off > MAX_TO_COPY_IN_EXPAND ||
1803             (datlen < EVBUFFER_CHAIN_MAX &&
1804                 EVBUFFER_CHAIN_MAX - datlen >= chain->off)) {
1805                 /* It's not worth resizing this chain. Can the next one be
1806                  * used? */
1807                 if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
1808                         /* Yes, we can just use the next chain (which should
1809                          * be empty. */
1810                         result = chain->next;
1811                         goto ok;
1812                 } else {
1813                         /* No; append a new chain (which will free all
1814                          * terminal empty chains.) */
1815                         goto insert_new;
1816                 }
1817         } else {
1818                 /* Okay, we're going to try to resize this chain: Not doing so
1819                  * would waste at least 1/8 of its current allocation, and we
1820                  * can do so without having to copy more than
1821                  * MAX_TO_COPY_IN_EXPAND bytes. */
1822                 /* figure out how much space we need */
1823                 size_t length = chain->off + datlen;
1824                 struct evbuffer_chain *tmp = evbuffer_chain_new(length);
1825                 if (tmp == NULL)
1826                         goto err;
1827 
1828                 /* copy the data over that we had so far */
1829                 tmp->off = chain->off;
1830                 memcpy(tmp->buffer, chain->buffer + chain->misalign,
1831                     chain->off);
1832                 /* fix up the list */
1833                 EVUTIL_ASSERT(*chainp == chain);
1834                 result = *chainp = tmp;
1835 
1836                 if (buf->last == chain)
1837                         buf->last = tmp;
1838 
1839                 tmp->next = chain->next;
1840                 evbuffer_chain_free(chain);
1841                 goto ok;
1842         }
1843 
1844 insert_new:
1845         result = evbuffer_chain_insert_new(buf, datlen);
1846         if (!result)
1847                 goto err;
1848 ok:
1849         EVUTIL_ASSERT(result);
1850         EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
1851 err:
1852         return result;
1853 }
1854 
1855 /* Make sure that datlen bytes are available for writing in the last n
1856  * chains.  Never copies or moves data. */
1857 int
1858 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
1859 {
1860         struct evbuffer_chain *chain = buf->last, *tmp, *next;
1861         size_t avail;
1862         int used;
1863 
1864         ASSERT_EVBUFFER_LOCKED(buf);
1865         EVUTIL_ASSERT(n >= 2);
1866 
1867         if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
1868                 /* There is no last chunk, or we can't touch the last chunk.
1869                  * Just add a new chunk. */
1870                 chain = evbuffer_chain_new(datlen);
1871                 if (chain == NULL)
1872                         return (-1);
1873 
1874                 evbuffer_chain_insert(buf, chain);
1875                 return (0);
1876         }
1877 
1878         used = 0; /* number of chains we're using space in. */
1879         avail = 0; /* how much space they have. */
1880         /* How many bytes can we stick at the end of buffer as it is?  Iterate
1881          * over the chains at the end of the buffer, tring to see how much
1882          * space we have in the first n. */
1883         for (chain = *buf->last_with_datap; chain; chain = chain->next) {
1884                 if (chain->off) {
1885                         size_t space = (size_t) CHAIN_SPACE_LEN(chain);
1886                         EVUTIL_ASSERT(chain == *buf->last_with_datap);
1887                         if (space) {
1888                                 avail += space;
1889                                 ++used;
1890                         }
1891                 } else {
1892                         /* No data in chain; realign it. */
1893                         chain->misalign = 0;
1894                         avail += chain->buffer_len;
1895                         ++used;
1896                 }
1897                 if (avail >= datlen) {
1898                         /* There is already enough space.  Just return */
1899                         return (0);
1900                 }
1901                 if (used == n)
1902                         break;
1903         }
1904 
1905         /* There wasn't enough space in the first n chains with space in
1906          * them. Either add a new chain with enough space, or replace all
1907          * empty chains with one that has enough space, depending on n. */
1908         if (used < n) {
1909                 /* The loop ran off the end of the chains before it hit n
1910                  * chains; we can add another. */
1911                 EVUTIL_ASSERT(chain == NULL);
1912 
1913                 tmp = evbuffer_chain_new(datlen - avail);
1914                 if (tmp == NULL)
1915                         return (-1);
1916 
1917                 buf->last->next = tmp;
1918                 buf->last = tmp;
1919                 /* (we would only set last_with_data if we added the first
1920                  * chain. But if the buffer had no chains, we would have
1921                  * just allocated a new chain earlier) */
1922                 return (0);
1923         } else {
1924                 /* Nuke _all_ the empty chains. */
1925                 int rmv_all = 0; /* True iff we removed last_with_data. */
1926                 chain = *buf->last_with_datap;
1927                 if (!chain->off) {
1928                         EVUTIL_ASSERT(chain == buf->first);
1929                         rmv_all = 1;
1930                         avail = 0;
1931                 } else {
1932                         /* can't overflow, since only mutable chains have
1933                          * huge misaligns. */
1934                         avail = (size_t) CHAIN_SPACE_LEN(chain);
1935                         chain = chain->next;
1936                 }
1937 
1938 
1939                 for (; chain; chain = next) {
1940                         next = chain->next;
1941                         EVUTIL_ASSERT(chain->off == 0);
1942                         evbuffer_chain_free(chain);
1943                 }
1944                 EVUTIL_ASSERT(datlen >= avail);
1945                 tmp = evbuffer_chain_new(datlen - avail);
1946                 if (tmp == NULL) {
1947                         if (rmv_all) {
1948                                 ZERO_CHAIN(buf);
1949                         } else {
1950                                 buf->last = *buf->last_with_datap;
1951                                 (*buf->last_with_datap)->next = NULL;
1952                         }
1953                         return (-1);
1954                 }
1955 
1956                 if (rmv_all) {
1957                         buf->first = buf->last = tmp;
1958                         buf->last_with_datap = &buf->first;
1959                 } else {
1960                         (*buf->last_with_datap)->next = tmp;
1961                         buf->last = tmp;
1962                 }
1963                 return (0);
1964         }
1965 }
1966 
1967 int
1968 evbuffer_expand(struct evbuffer *buf, size_t datlen)
1969 {
1970         struct evbuffer_chain *chain;
1971 
1972         EVBUFFER_LOCK(buf);
1973         chain = evbuffer_expand_singlechain(buf, datlen);
1974         EVBUFFER_UNLOCK(buf);
1975         return chain ? 0 : -1;
1976 }
1977 
1978 /*
1979  * Reads data from a file descriptor into a buffer.
1980  */
1981 
1982 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
1983 #define USE_IOVEC_IMPL
1984 #endif
1985 
1986 #ifdef USE_IOVEC_IMPL
1987 
1988 #ifdef _EVENT_HAVE_SYS_UIO_H
1989 /* number of iovec we use for writev, fragmentation is going to determine
1990  * how much we end up writing */
1991 
1992 #define DEFAULT_WRITE_IOVEC 128
1993 
1994 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
1995 #define NUM_WRITE_IOVEC UIO_MAXIOV
1996 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
1997 #define NUM_WRITE_IOVEC IOV_MAX
1998 #else
1999 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
2000 #endif
2001 
2002 #define IOV_TYPE struct iovec
2003 #define IOV_PTR_FIELD iov_base
2004 #define IOV_LEN_FIELD iov_len
2005 #define IOV_LEN_TYPE size_t
2006 #else
2007 #define NUM_WRITE_IOVEC 16
2008 #define IOV_TYPE WSABUF
2009 #define IOV_PTR_FIELD buf
2010 #define IOV_LEN_FIELD len
2011 #define IOV_LEN_TYPE unsigned long
2012 #endif
2013 #endif
2014 #define NUM_READ_IOVEC 4
2015 
2016 #define EVBUFFER_MAX_READ       4096
2017 
2018 /** Helper function to figure out which space to use for reading data into
2019     an evbuffer.  Internal use only.
2020 
2021     @param buf The buffer to read into
2022     @param howmuch How much we want to read.
2023     @param vecs An array of two or more iovecs or WSABUFs.
2024     @param n_vecs_avail The length of vecs
2025     @param chainp A pointer to a variable to hold the first chain we're
2026       reading into.
2027     @param exact Boolean: if true, we do not provide more than 'howmuch'
2028       space in the vectors, even if more space is available.
2029     @return The number of buffers we're using.
2030  */
2031 int
2032 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
2033     struct evbuffer_iovec *vecs, int n_vecs_avail,
2034     struct evbuffer_chain ***chainp, int exact)
2035 {
2036         struct evbuffer_chain *chain;
2037         struct evbuffer_chain **firstchainp;
2038         size_t so_far;
2039         int i;
2040         ASSERT_EVBUFFER_LOCKED(buf);
2041 
2042         if (howmuch < 0)
2043                 return -1;
2044 
2045         so_far = 0;
2046         /* Let firstchain be the first chain with any space on it */
2047         firstchainp = buf->last_with_datap;
2048         if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
2049                 firstchainp = &(*firstchainp)->next;
2050         }
2051 
2052         chain = *firstchainp;
2053         for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
2054                 size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
2055                 if (avail > (howmuch - so_far) && exact)
2056                         avail = howmuch - so_far;
2057                 vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
2058                 vecs[i].iov_len = avail;
2059                 so_far += avail;
2060                 chain = chain->next;
2061         }
2062 
2063         *chainp = firstchainp;
2064         return i;
2065 }
2066 
2067 static int
2068 get_n_bytes_readable_on_socket(evutil_socket_t fd)
2069 {
2070 #if defined(FIONREAD) && defined(WIN32)
2071         unsigned long lng = EVBUFFER_MAX_READ;
2072         if (ioctlsocket(fd, FIONREAD, &lng) < 0)
2073                 return -1;
2074         /* Can overflow, but mostly harmlessly. XXXX */
2075         return (int)lng;
2076 #elif defined(FIONREAD)
2077         int n = EVBUFFER_MAX_READ;
2078         if (ioctl(fd, FIONREAD, &n) < 0)
2079                 return -1;
2080         return n;
2081 #else
2082         return EVBUFFER_MAX_READ;
2083 #endif
2084 }
2085 
2086 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
2087  * as howmuch? */
2088 int
2089 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
2090 {
2091         struct evbuffer_chain **chainp;
2092         int n;
2093         int result;
2094 
2095 #ifdef USE_IOVEC_IMPL
2096         int nvecs, i, remaining;
2097 #else
2098         struct evbuffer_chain *chain;
2099         unsigned char *p;
2100 #endif
2101 
2102         EVBUFFER_LOCK(buf);
2103 
2104         if (buf->freeze_end) {
2105                 result = -1;
2106                 goto done;
2107         }
2108 
2109         n = get_n_bytes_readable_on_socket(fd);
2110         if (n <= 0 || n > EVBUFFER_MAX_READ)
2111                 n = EVBUFFER_MAX_READ;
2112         if (howmuch < 0 || howmuch > n)
2113                 howmuch = n;
2114 
2115 #ifdef USE_IOVEC_IMPL
2116         /* Since we can use iovecs, we're willing to use the last
2117          * NUM_READ_IOVEC chains. */
2118         if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
2119                 result = -1;
2120                 goto done;
2121         } else {
2122                 IOV_TYPE vecs[NUM_READ_IOVEC];
2123 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
2124                 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
2125                     NUM_READ_IOVEC, &chainp, 1);
2126 #else
2127                 /* We aren't using the native struct iovec.  Therefore,
2128                    we are on win32. */
2129                 struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
2130                 nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
2131                     &chainp, 1);
2132 
2133                 for (i=0; i < nvecs; ++i)
2134                         WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
2135 #endif
2136 
2137 #ifdef WIN32
2138                 {
2139                         DWORD bytesRead;
2140                         DWORD flags=0;
2141                         if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
2142                                 /* The read failed. It might be a close,
2143                                  * or it might be an error. */
2144                                 if (WSAGetLastError() == WSAECONNABORTED)
2145                                         n = 0;
2146                                 else
2147                                         n = -1;
2148                         } else
2149                                 n = bytesRead;
2150                 }
2151 #else
2152                 n = readv(fd, vecs, nvecs);
2153 #endif
2154         }
2155 
2156 #else /*!USE_IOVEC_IMPL*/
2157         /* If we don't have FIONREAD, we might waste some space here */
2158         /* XXX we _will_ waste some space here if there is any space left
2159          * over on buf->last. */
2160         if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
2161                 result = -1;
2162                 goto done;
2163         }
2164 
2165         /* We can append new data at this point */
2166         p = chain->buffer + chain->misalign + chain->off;
2167 
2168 #ifndef WIN32
2169         n = read(fd, p, howmuch);
2170 #else
2171         n = recv(fd, p, howmuch, 0);
2172 #endif
2173 #endif /* USE_IOVEC_IMPL */
2174 
2175         if (n == -1) {
2176                 result = -1;
2177                 goto done;
2178         }
2179         if (n == 0) {
2180                 result = 0;
2181                 goto done;
2182         }
2183 
2184 #ifdef USE_IOVEC_IMPL
2185         remaining = n;
2186         for (i=0; i < nvecs; ++i) {
2187                 /* can't overflow, since only mutable chains have
2188                  * huge misaligns. */
2189                 size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
2190                 /* XXXX This is a kludge that can waste space in perverse
2191                  * situations. */
2192                 if (space > EVBUFFER_CHAIN_MAX)
2193                         space = EVBUFFER_CHAIN_MAX;
2194                 if ((ev_ssize_t)space < remaining) {
2195                         (*chainp)->off += space;
2196                         remaining -= (int)space;
2197                 } else {
2198                         (*chainp)->off += remaining;
2199                         buf->last_with_datap = chainp;
2200                         break;
2201                 }
2202                 chainp = &(*chainp)->next;
2203         }
2204 #else
2205         chain->off += n;
2206         advance_last_with_data(buf);
2207 #endif
2208         buf->total_len += n;
2209         buf->n_add_for_cb += n;
2210 
2211         /* Tell someone about changes in this buffer */
2212         evbuffer_invoke_callbacks(buf);
2213         result = n;
2214 done:
2215         EVBUFFER_UNLOCK(buf);
2216         return result;
2217 }
2218 
2219 #ifdef WIN32
2220 static int
2221 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
2222 {
2223         int result;
2224         int nchains, n;
2225         struct evbuffer_iovec v[2];
2226 
2227         EVBUFFER_LOCK(buf);
2228 
2229         if (buf->freeze_end) {
2230                 result = -1;
2231                 goto done;
2232         }
2233 
2234         if (howmuch < 0)
2235                 howmuch = 16384;
2236 
2237 
2238         /* XXX we _will_ waste some space here if there is any space left
2239          * over on buf->last. */
2240         nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
2241         if (nchains < 1 || nchains > 2) {
2242                 result = -1;
2243                 goto done;
2244         }
2245         n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
2246         if (n <= 0) {
2247                 result = n;
2248                 goto done;
2249         }
2250         v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
2251         if (nchains > 1) {
2252                 n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
2253                 if (n <= 0) {
2254                         result = (unsigned long) v[0].iov_len;
2255                         evbuffer_commit_space(buf, v, 1);
2256                         goto done;
2257                 }
2258                 v[1].iov_len = n;
2259         }
2260         evbuffer_commit_space(buf, v, nchains);
2261 
2262         result = n;
2263 done:
2264         EVBUFFER_UNLOCK(buf);
2265         return result;
2266 }
2267 #endif
2268 
2269 #ifdef USE_IOVEC_IMPL
2270 static inline int
2271 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
2272     ev_ssize_t howmuch)
2273 {
2274         IOV_TYPE iov[NUM_WRITE_IOVEC];
2275         struct evbuffer_chain *chain = buffer->first;
2276         int n, i = 0;
2277 
2278         if (howmuch < 0)
2279                 return -1;
2280 
2281         ASSERT_EVBUFFER_LOCKED(buffer);
2282         /* XXX make this top out at some maximal data length?  if the
2283          * buffer has (say) 1MB in it, split over 128 chains, there's
2284          * no way it all gets written in one go. */
2285         while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
2286 #ifdef USE_SENDFILE
2287                 /* we cannot write the file info via writev */
2288                 if (chain->flags & EVBUFFER_SENDFILE)
2289                         break;
2290 #endif
2291                 iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
2292                 if ((size_t)howmuch >= chain->off) {
2293                         /* XXXcould be problematic when windows supports mmap*/
2294                         iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
2295                         howmuch -= chain->off;
2296                 } else {
2297                         /* XXXcould be problematic when windows supports mmap*/
2298                         iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
2299                         break;
2300                 }
2301                 chain = chain->next;
2302         }
2303         if (! i)
2304                 return 0;
2305 #ifdef WIN32
2306         {
2307                 DWORD bytesSent;
2308                 if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
2309                         n = -1;
2310                 else
2311                         n = bytesSent;
2312         }
2313 #else
2314         n = writev(fd, iov, i);
2315 #endif
2316         return (n);
2317 }
2318 #endif
2319 
2320 #ifdef USE_SENDFILE
2321 static inline int
2322 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
2323     ev_ssize_t howmuch)
2324 {
2325         struct evbuffer_chain *chain = buffer->first;
2326         struct evbuffer_chain_fd *info =
2327             EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2328 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
2329         int res;
2330         off_t len = chain->off;
2331 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
2332         ev_ssize_t res;
2333         off_t offset = chain->misalign;
2334 #endif
2335 
2336         ASSERT_EVBUFFER_LOCKED(buffer);
2337 
2338 #if defined(SENDFILE_IS_MACOSX)
2339         res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
2340         if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2341                 return (-1);
2342 
2343         return (len);
2344 #elif defined(SENDFILE_IS_FREEBSD)
2345         res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
2346         if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
2347                 return (-1);
2348 
2349         return (len);
2350 #elif defined(SENDFILE_IS_LINUX)
2351         /* TODO(niels): implement splice */
2352         res = sendfile(fd, info->fd, &offset, chain->off);
2353         if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2354                 /* if this is EAGAIN or EINTR return 0; otherwise, -1 */
2355                 return (0);
2356         }
2357         return (res);
2358 #elif defined(SENDFILE_IS_SOLARIS)
2359         {
2360                 const off_t offset_orig = offset;
2361                 res = sendfile(fd, info->fd, &offset, chain->off);
2362                 if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
2363                         if (offset - offset_orig)
2364                                 return offset - offset_orig;
2365                         /* if this is EAGAIN or EINTR and no bytes were
2366                          * written, return 0 */
2367                         return (0);
2368                 }
2369                 return (res);
2370         }
2371 #endif
2372 }
2373 #endif
2374 
2375 int
2376 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
2377     ev_ssize_t howmuch)
2378 {
2379         int n = -1;
2380 
2381         EVBUFFER_LOCK(buffer);
2382 
2383         if (buffer->freeze_start) {
2384                 goto done;
2385         }
2386 
2387         if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
2388                 howmuch = buffer->total_len;
2389 
2390         if (howmuch > 0) {
2391 #ifdef USE_SENDFILE
2392                 struct evbuffer_chain *chain = buffer->first;
2393                 if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
2394                         n = evbuffer_write_sendfile(buffer, fd, howmuch);
2395                 else {
2396 #endif
2397 #ifdef USE_IOVEC_IMPL
2398                 n = evbuffer_write_iovec(buffer, fd, howmuch);
2399 #elif defined(WIN32)
2400                 /* XXX(nickm) Don't disable this code until we know if
2401                  * the WSARecv code above works. */
2402                 void *p = evbuffer_pullup(buffer, howmuch);
2403                 EVUTIL_ASSERT(p || !howmuch);
2404                 n = send(fd, p, howmuch, 0);
2405 #else
2406                 void *p = evbuffer_pullup(buffer, howmuch);
2407                 EVUTIL_ASSERT(p || !howmuch);
2408                 n = write(fd, p, howmuch);
2409 #endif
2410 #ifdef USE_SENDFILE
2411                 }
2412 #endif
2413         }
2414 
2415         if (n > 0)
2416                 evbuffer_drain(buffer, n);
2417 
2418 done:
2419         EVBUFFER_UNLOCK(buffer);
2420         return (n);
2421 }
2422 
2423 int
2424 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
2425 {
2426         return evbuffer_write_atmost(buffer, fd, -1);
2427 }
2428 
2429 unsigned char *
2430 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
2431 {
2432         unsigned char *search;
2433         struct evbuffer_ptr ptr;
2434 
2435         EVBUFFER_LOCK(buffer);
2436 
2437         ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
2438         if (ptr.pos < 0) {
2439                 search = NULL;
2440         } else {
2441                 search = evbuffer_pullup(buffer, ptr.pos + len);
2442                 if (search)
2443                         search += ptr.pos;
2444         }
2445         EVBUFFER_UNLOCK(buffer);
2446         return search;
2447 }
2448 
2449 int
2450 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
2451     size_t position, enum evbuffer_ptr_how how)
2452 {
2453         size_t left = position;
2454         struct evbuffer_chain *chain = NULL;
2455 
2456         EVBUFFER_LOCK(buf);
2457 
2458         switch (how) {
2459         case EVBUFFER_PTR_SET:
2460                 chain = buf->first;
2461                 pos->pos = position;
2462                 position = 0;
2463                 break;
2464         case EVBUFFER_PTR_ADD:
2465                 /* this avoids iterating over all previous chains if
2466                    we just want to advance the position */
2467                 if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
2468                         EVBUFFER_UNLOCK(buf);
2469                         return -1;
2470                 }
2471                 chain = pos->_internal.chain;
2472                 pos->pos += position;
2473                 position = pos->_internal.pos_in_chain;
2474                 break;
2475         }
2476 
2477         EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
2478         while (chain && position + left >= chain->off) {
2479                 left -= chain->off - position;
2480                 chain = chain->next;
2481                 position = 0;
2482         }
2483         if (chain) {
2484                 pos->_internal.chain = chain;
2485                 pos->_internal.pos_in_chain = position + left;
2486         } else {
2487                 pos->_internal.chain = NULL;
2488                 pos->pos = -1;
2489         }
2490 
2491         EVBUFFER_UNLOCK(buf);
2492 
2493         return chain != NULL ? 0 : -1;
2494 }
2495 
2496 /**
2497    Compare the bytes in buf at position pos to the len bytes in mem.  Return
2498    less than 0, 0, or greater than 0 as memcmp.
2499  */
2500 static int
2501 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
2502     const char *mem, size_t len)
2503 {
2504         struct evbuffer_chain *chain;
2505         size_t position;
2506         int r;
2507 
2508         ASSERT_EVBUFFER_LOCKED(buf);
2509 
2510         if (pos->pos < 0 ||
2511             EV_SIZE_MAX - len < (size_t)pos->pos ||
2512             pos->pos + len > buf->total_len)
2513                 return -1;
2514 
2515         chain = pos->_internal.chain;
2516         position = pos->_internal.pos_in_chain;
2517         while (len && chain) {
2518                 size_t n_comparable;
2519                 if (len + position > chain->off)
2520                         n_comparable = chain->off - position;
2521                 else
2522                         n_comparable = len;
2523                 r = memcmp(chain->buffer + chain->misalign + position, mem,
2524                     n_comparable);
2525                 if (r)
2526                         return r;
2527                 mem += n_comparable;
2528                 len -= n_comparable;
2529                 position = 0;
2530                 chain = chain->next;
2531         }
2532 
2533         return 0;
2534 }
2535 
2536 struct evbuffer_ptr
2537 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
2538 {
2539         return evbuffer_search_range(buffer, what, len, start, NULL);
2540 }
2541 
2542 struct evbuffer_ptr
2543 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
2544 {
2545         struct evbuffer_ptr pos;
2546         struct evbuffer_chain *chain, *last_chain = NULL;
2547         const unsigned char *p;
2548         char first;
2549 
2550         EVBUFFER_LOCK(buffer);
2551 
2552         if (start) {
2553                 memcpy(&pos, start, sizeof(pos));
2554                 chain = pos._internal.chain;
2555         } else {
2556                 pos.pos = 0;
2557                 chain = pos._internal.chain = buffer->first;
2558                 pos._internal.pos_in_chain = 0;
2559         }
2560 
2561         if (end)
2562                 last_chain = end->_internal.chain;
2563 
2564         if (!len || len > EV_SSIZE_MAX)
2565                 goto done;
2566 
2567         first = what[0];
2568 
2569         while (chain) {
2570                 const unsigned char *start_at =
2571                     chain->buffer + chain->misalign +
2572                     pos._internal.pos_in_chain;
2573                 p = memchr(start_at, first,
2574                     chain->off - pos._internal.pos_in_chain);
2575                 if (p) {
2576                         pos.pos += p - start_at;
2577                         pos._internal.pos_in_chain += p - start_at;
2578                         if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
2579                                 if (end && pos.pos + (ev_ssize_t)len > end->pos)
2580                                         goto not_found;
2581                                 else
2582                                         goto done;
2583                         }
2584                         ++pos.pos;
2585                         ++pos._internal.pos_in_chain;
2586                         if (pos._internal.pos_in_chain == chain->off) {
2587                                 chain = pos._internal.chain = chain->next;
2588                                 pos._internal.pos_in_chain = 0;
2589                         }
2590                 } else {
2591                         if (chain == last_chain)
2592                                 goto not_found;
2593                         pos.pos += chain->off - pos._internal.pos_in_chain;
2594                         chain = pos._internal.chain = chain->next;
2595                         pos._internal.pos_in_chain = 0;
2596                 }
2597         }
2598 
2599 not_found:
2600         pos.pos = -1;
2601         pos._internal.chain = NULL;
2602 done:
2603         EVBUFFER_UNLOCK(buffer);
2604         return pos;
2605 }
2606 
2607 int
2608 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
2609     struct evbuffer_ptr *start_at,
2610     struct evbuffer_iovec *vec, int n_vec)
2611 {
2612         struct evbuffer_chain *chain;
2613         int idx = 0;
2614         ev_ssize_t len_so_far = 0;
2615 
2616         EVBUFFER_LOCK(buffer);
2617 
2618         if (start_at) {
2619                 chain = start_at->_internal.chain;
2620                 len_so_far = chain->off
2621                     - start_at->_internal.pos_in_chain;
2622                 idx = 1;
2623                 if (n_vec > 0) {
2624                         vec[0].iov_base = chain->buffer + chain->misalign
2625                             + start_at->_internal.pos_in_chain;
2626                         vec[0].iov_len = len_so_far;
2627                 }
2628                 chain = chain->next;
2629         } else {
2630                 chain = buffer->first;
2631         }
2632 
2633         if (n_vec == 0 && len < 0) {
2634                 /* If no vectors are provided and they asked for "everything",
2635                  * pretend they asked for the actual available amount. */
2636                 len = buffer->total_len;
2637                 if (start_at) {
2638                         len -= start_at->pos;
2639                 }
2640         }
2641 
2642         while (chain) {
2643                 if (len >= 0 && len_so_far >= len)
2644                         break;
2645                 if (idx<n_vec) {
2646                         vec[idx].iov_base = chain->buffer + chain->misalign;
2647                         vec[idx].iov_len = chain->off;
2648                 } else if (len<0) {
2649                         break;
2650                 }
2651                 ++idx;
2652                 len_so_far += chain->off;
2653                 chain = chain->next;
2654         }
2655 
2656         EVBUFFER_UNLOCK(buffer);
2657 
2658         return idx;
2659 }
2660 
2661 
2662 int
2663 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
2664 {
2665         char *buffer;
2666         size_t space;
2667         int sz, result = -1;
2668         va_list aq;
2669         struct evbuffer_chain *chain;
2670 
2671 
2672         EVBUFFER_LOCK(buf);
2673 
2674         if (buf->freeze_end) {
2675                 goto done;
2676         }
2677 
2678         /* make sure that at least some space is available */
2679         if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
2680                 goto done;
2681 
2682         for (;;) {
2683 #if 0
2684                 size_t used = chain->misalign + chain->off;
2685                 buffer = (char *)chain->buffer + chain->misalign + chain->off;
2686                 EVUTIL_ASSERT(chain->buffer_len >= used);
2687                 space = chain->buffer_len - used;
2688 #endif
2689                 buffer = (char*) CHAIN_SPACE_PTR(chain);
2690                 space = (size_t) CHAIN_SPACE_LEN(chain);
2691 
2692 #ifndef va_copy
2693 #define va_copy(dst, src)       memcpy(&(dst), &(src), sizeof(va_list))
2694 #endif
2695                 va_copy(aq, ap);
2696 
2697                 sz = evutil_vsnprintf(buffer, space, fmt, aq);
2698 
2699                 va_end(aq);
2700 
2701                 if (sz < 0)
2702                         goto done;
2703                 if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
2704                     (size_t)sz >= EVBUFFER_CHAIN_MAX)
2705                         goto done;
2706                 if ((size_t)sz < space) {
2707                         chain->off += sz;
2708                         buf->total_len += sz;
2709                         buf->n_add_for_cb += sz;
2710 
2711                         advance_last_with_data(buf);
2712                         evbuffer_invoke_callbacks(buf);
2713                         result = sz;
2714                         goto done;
2715                 }
2716                 if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
2717                         goto done;
2718         }
2719         /* NOTREACHED */
2720 
2721 done:
2722         EVBUFFER_UNLOCK(buf);
2723         return result;
2724 }
2725 
2726 int
2727 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
2728 {
2729         int res = -1;
2730         va_list ap;
2731 
2732         va_start(ap, fmt);
2733         res = evbuffer_add_vprintf(buf, fmt, ap);
2734         va_end(ap);
2735 
2736         return (res);
2737 }
2738 
2739 int
2740 evbuffer_add_reference(struct evbuffer *outbuf,
2741     const void *data, size_t datlen,
2742     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
2743 {
2744         struct evbuffer_chain *chain;
2745         struct evbuffer_chain_reference *info;
2746         int result = -1;
2747 
2748         chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
2749         if (!chain)
2750                 return (-1);
2751         chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
2752         chain->buffer = (u_char *)data;
2753         chain->buffer_len = datlen;
2754         chain->off = datlen;
2755 
2756         info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
2757         info->cleanupfn = cleanupfn;
2758         info->extra = extra;
2759 
2760         EVBUFFER_LOCK(outbuf);
2761         if (outbuf->freeze_end) {
2762                 /* don't call chain_free; we do not want to actually invoke
2763                  * the cleanup function */
2764                 mm_free(chain);
2765                 goto done;
2766         }
2767         evbuffer_chain_insert(outbuf, chain);
2768         outbuf->n_add_for_cb += datlen;
2769 
2770         evbuffer_invoke_callbacks(outbuf);
2771 
2772         result = 0;
2773 done:
2774         EVBUFFER_UNLOCK(outbuf);
2775 
2776         return result;
2777 }
2778 
2779 /* TODO(niels): maybe we don't want to own the fd, however, in that
2780  * case, we should dup it - dup is cheap.  Perhaps, we should use a
2781  * callback instead?
2782  */
2783 /* TODO(niels): we may want to add to automagically convert to mmap, in
2784  * case evbuffer_remove() or evbuffer_pullup() are being used.
2785  */
2786 int
2787 evbuffer_add_file(struct evbuffer *outbuf, int fd,
2788     ev_off_t offset, ev_off_t length)
2789 {
2790 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
2791         struct evbuffer_chain *chain;
2792         struct evbuffer_chain_fd *info;
2793 #endif
2794 #if defined(USE_SENDFILE)
2795         int sendfile_okay = 1;
2796 #endif
2797         int ok = 1;
2798 
2799         if (offset < 0 || length < 0 ||
2800             ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
2801             (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
2802                 return (-1);
2803 
2804 #if defined(USE_SENDFILE)
2805         if (use_sendfile) {
2806                 EVBUFFER_LOCK(outbuf);
2807                 sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
2808                 EVBUFFER_UNLOCK(outbuf);
2809         }
2810 
2811         if (use_sendfile && sendfile_okay) {
2812                 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2813                 if (chain == NULL) {
2814                         event_warn("%s: out of memory", __func__);
2815                         return (-1);
2816                 }
2817 
2818                 chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
2819                 chain->buffer = NULL;   /* no reading possible */
2820                 chain->buffer_len = length + offset;
2821                 chain->off = length;
2822                 chain->misalign = offset;
2823 
2824                 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2825                 info->fd = fd;
2826 
2827                 EVBUFFER_LOCK(outbuf);
2828                 if (outbuf->freeze_end) {
2829                         mm_free(chain);
2830                         ok = 0;
2831                 } else {
2832                         outbuf->n_add_for_cb += length;
2833                         evbuffer_chain_insert(outbuf, chain);
2834                 }
2835         } else
2836 #endif
2837 #if defined(_EVENT_HAVE_MMAP)
2838         if (use_mmap) {
2839                 void *mapped = mmap(NULL, length + offset, PROT_READ,
2840 #ifdef MAP_NOCACHE
2841                     MAP_NOCACHE |
2842 #endif
2843 #ifdef MAP_FILE
2844                     MAP_FILE |
2845 #endif
2846                     MAP_PRIVATE,
2847                     fd, 0);
2848                 /* some mmap implementations require offset to be a multiple of
2849                  * the page size.  most users of this api, are likely to use 0
2850                  * so mapping everything is not likely to be a problem.
2851                  * TODO(niels): determine page size and round offset to that
2852                  * page size to avoid mapping too much memory.
2853                  */
2854                 if (mapped == MAP_FAILED) {
2855                         event_warn("%s: mmap(%d, %d, %zu) failed",
2856                             __func__, fd, 0, (size_t)(offset + length));
2857                         return (-1);
2858                 }
2859                 chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
2860                 if (chain == NULL) {
2861                         event_warn("%s: out of memory", __func__);
2862                         munmap(mapped, length);
2863                         return (-1);
2864                 }
2865 
2866                 chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
2867                 chain->buffer = mapped;
2868                 chain->buffer_len = length + offset;
2869                 chain->off = length + offset;
2870 
2871                 info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
2872                 info->fd = fd;
2873 
2874                 EVBUFFER_LOCK(outbuf);
2875                 if (outbuf->freeze_end) {
2876                         info->fd = -1;
2877                         evbuffer_chain_free(chain);
2878                         ok = 0;
2879                 } else {
2880                         outbuf->n_add_for_cb += length;
2881 
2882                         evbuffer_chain_insert(outbuf, chain);
2883 
2884                         /* we need to subtract whatever we don't need */
2885                         evbuffer_drain(outbuf, offset);
2886                 }
2887         } else
2888 #endif
2889         {
2890                 /* the default implementation */
2891                 struct evbuffer *tmp = evbuffer_new();
2892                 ev_ssize_t read;
2893 
2894                 if (tmp == NULL)
2895                         return (-1);
2896 
2897 #ifdef WIN32
2898 #define lseek _lseeki64
2899 #endif
2900                 if (lseek(fd, offset, SEEK_SET) == -1) {
2901                         evbuffer_free(tmp);
2902                         return (-1);
2903                 }
2904 
2905                 /* we add everything to a temporary buffer, so that we
2906                  * can abort without side effects if the read fails.
2907                  */
2908                 while (length) {
2909                         ev_ssize_t to_read = length > EV_SSIZE_MAX ? EV_SSIZE_MAX : (ev_ssize_t)length;
2910                         read = evbuffer_readfile(tmp, fd, to_read);
2911                         if (read == -1) {
2912                                 evbuffer_free(tmp);
2913                                 return (-1);
2914                         }
2915 
2916                         length -= read;
2917                 }
2918 
2919                 EVBUFFER_LOCK(outbuf);
2920                 if (outbuf->freeze_end) {
2921                         evbuffer_free(tmp);
2922                         ok = 0;
2923                 } else {
2924                         evbuffer_add_buffer(outbuf, tmp);
2925                         evbuffer_free(tmp);
2926 
2927 #ifdef WIN32
2928 #define close _close
2929 #endif
2930                         close(fd);
2931                 }
2932         }
2933 
2934         if (ok)
2935                 evbuffer_invoke_callbacks(outbuf);
2936         EVBUFFER_UNLOCK(outbuf);
2937 
2938         return ok ? 0 : -1;
2939 }
2940 
2941 
2942 void
2943 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
2944 {
2945         EVBUFFER_LOCK(buffer);
2946 
2947         if (!TAILQ_EMPTY(&buffer->callbacks))
2948                 evbuffer_remove_all_callbacks(buffer);
2949 
2950         if (cb) {
2951                 struct evbuffer_cb_entry *ent =
2952                     evbuffer_add_cb(buffer, NULL, cbarg);
2953                 ent->cb.cb_obsolete = cb;
2954                 ent->flags |= EVBUFFER_CB_OBSOLETE;
2955         }
2956         EVBUFFER_UNLOCK(buffer);
2957 }
2958 
2959 struct evbuffer_cb_entry *
2960 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2961 {
2962         struct evbuffer_cb_entry *e;
2963         if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
2964                 return NULL;
2965         EVBUFFER_LOCK(buffer);
2966         e->cb.cb_func = cb;
2967         e->cbarg = cbarg;
2968         e->flags = EVBUFFER_CB_ENABLED;
2969         TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
2970         EVBUFFER_UNLOCK(buffer);
2971         return e;
2972 }
2973 
2974 int
2975 evbuffer_remove_cb_entry(struct evbuffer *buffer,
2976                          struct evbuffer_cb_entry *ent)
2977 {
2978         EVBUFFER_LOCK(buffer);
2979         TAILQ_REMOVE(&buffer->callbacks, ent, next);
2980         EVBUFFER_UNLOCK(buffer);
2981         mm_free(ent);
2982         return 0;
2983 }
2984 
2985 int
2986 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
2987 {
2988         struct evbuffer_cb_entry *cbent;
2989         int result = -1;
2990         EVBUFFER_LOCK(buffer);
2991         TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
2992                 if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
2993                         result = evbuffer_remove_cb_entry(buffer, cbent);
2994                         goto done;
2995                 }
2996         }
2997 done:
2998         EVBUFFER_UNLOCK(buffer);
2999         return result;
3000 }
3001 
3002 int
3003 evbuffer_cb_set_flags(struct evbuffer *buffer,
3004                       struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3005 {
3006         /* the user isn't allowed to mess with these. */
3007         flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3008         EVBUFFER_LOCK(buffer);
3009         cb->flags |= flags;
3010         EVBUFFER_UNLOCK(buffer);
3011         return 0;
3012 }
3013 
3014 int
3015 evbuffer_cb_clear_flags(struct evbuffer *buffer,
3016                       struct evbuffer_cb_entry *cb, ev_uint32_t flags)
3017 {
3018         /* the user isn't allowed to mess with these. */
3019         flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
3020         EVBUFFER_LOCK(buffer);
3021         cb->flags &= ~flags;
3022         EVBUFFER_UNLOCK(buffer);
3023         return 0;
3024 }
3025 
3026 int
3027 evbuffer_freeze(struct evbuffer *buffer, int start)
3028 {
3029         EVBUFFER_LOCK(buffer);
3030         if (start)
3031                 buffer->freeze_start = 1;
3032         else
3033                 buffer->freeze_end = 1;
3034         EVBUFFER_UNLOCK(buffer);
3035         return 0;
3036 }
3037 
3038 int
3039 evbuffer_unfreeze(struct evbuffer *buffer, int start)
3040 {
3041         EVBUFFER_LOCK(buffer);
3042         if (start)
3043                 buffer->freeze_start = 0;
3044         else
3045                 buffer->freeze_end = 0;
3046         EVBUFFER_UNLOCK(buffer);
3047         return 0;
3048 }
3049 
3050 #if 0
3051 void
3052 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3053 {
3054         if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
3055                 cb->size_before_suspend = evbuffer_get_length(buffer);
3056                 cb->flags |= EVBUFFER_CB_SUSPENDED;
3057         }
3058 }
3059 
3060 void
3061 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
3062 {
3063         if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
3064                 unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
3065                 size_t sz = cb->size_before_suspend;
3066                 cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
3067                                EVBUFFER_CB_CALL_ON_UNSUSPEND);
3068                 cb->size_before_suspend = 0;
3069                 if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
3070                         cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
3071                 }
3072         }
3073 }
3074 #endif
3075 
3076 /* These hooks are exposed so that the unit tests can temporarily disable
3077  * sendfile support in order to test mmap, or both to test linear
3078  * access. Don't use it; if we need to add a way to disable sendfile support
3079  * in the future, it will probably be via an alternate version of
3080  * evbuffer_add_file() with a 'flags' argument.
3081  */
3082 int _evbuffer_testing_use_sendfile(void);
3083 int _evbuffer_testing_use_mmap(void);
3084 int _evbuffer_testing_use_linear_file_access(void);
3085 
3086 int
3087 _evbuffer_testing_use_sendfile(void)
3088 {
3089         int ok = 0;
3090 #ifdef USE_SENDFILE
3091         use_sendfile = 1;
3092         ok = 1;
3093 #endif
3094 #ifdef _EVENT_HAVE_MMAP
3095         use_mmap = 0;
3096 #endif
3097         return ok;
3098 }
3099 int
3100 _evbuffer_testing_use_mmap(void)
3101 {
3102         int ok = 0;
3103 #ifdef USE_SENDFILE
3104         use_sendfile = 0;
3105 #endif
3106 #ifdef _EVENT_HAVE_MMAP
3107         use_mmap = 1;
3108         ok = 1;
3109 #endif
3110         return ok;
3111 }
3112 int
3113 _evbuffer_testing_use_linear_file_access(void)
3114 {
3115 #ifdef USE_SENDFILE
3116         use_sendfile = 0;
3117 #endif
3118 #ifdef _EVENT_HAVE_MMAP
3119         use_mmap = 0;
3120 #endif
3121         return 1;
3122 }

/* [<][>][^][v][top][bottom][index][help] */