diff -ruN curl-7.65.1/lib/hash.h curl-7.65.1-fix-dns-segfaults/lib/hash.h --- curl-7.65.1/lib/hash.h 2019-03-25 09:42:46.000000000 +0100 +++ curl-7.65.1-fix-dns-segfaults/lib/hash.h 2019-06-28 17:01:29.205878100 +0200 @@ -7,7 +7,7 @@ * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * - * Copyright (C) 1998 - 2017, Daniel Stenberg, , et al. + * Copyright (C) 1998 - 2019, Daniel Stenberg, , et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms @@ -80,7 +80,7 @@ void *Curl_hash_pick(struct curl_hash *, void *key, size_t key_len); void Curl_hash_apply(struct curl_hash *h, void *user, void (*cb)(void *user, void *ptr)); -int Curl_hash_count(struct curl_hash *h); +#define Curl_hash_count(h) ((h)->size) void Curl_hash_destroy(struct curl_hash *h); void Curl_hash_clean(struct curl_hash *h); void Curl_hash_clean_with_criterium(struct curl_hash *h, void *user, diff -ruN curl-7.65.1/lib/multi.c curl-7.65.1-fix-dns-segfaults/lib/multi.c --- curl-7.65.1/lib/multi.c 2019-06-02 23:06:16.000000000 +0200 +++ curl-7.65.1-fix-dns-segfaults/lib/multi.c 2019-06-28 17:01:35.917879673 +0200 @@ -189,7 +189,7 @@ */ struct Curl_sh_entry { - struct curl_llist list; /* list of easy handles using this socket */ + struct curl_hash transfers; /* hash of transfers using this socket */ unsigned int action; /* what combined action READ/WRITE this socket waits for */ void *socketp; /* settable by users with curl_multi_assign() */ @@ -206,12 +206,36 @@ static struct Curl_sh_entry *sh_getentry(struct curl_hash *sh, curl_socket_t s) { - if(s != CURL_SOCKET_BAD) + if(s != CURL_SOCKET_BAD) { /* only look for proper sockets */ return Curl_hash_pick(sh, (char *)&s, sizeof(curl_socket_t)); + } return NULL; } +#define TRHASH_SIZE 13 +static size_t trhash(void *key, size_t key_length, size_t slots_num) +{ + size_t keyval = (size_t)*(struct Curl_easy **)key; + (void) key_length; + + return (keyval % slots_num); +} + +static size_t trhash_compare(void *k1, size_t k1_len, void *k2, size_t k2_len) +{ + (void)k1_len; + (void)k2_len; + + return *(struct Curl_easy **)k1 == *(struct Curl_easy **)k2; +} + +static void trhash_dtor(void *nada) +{ + (void)nada; +} + + /* make sure this socket is present in the hash for this handle */ static struct Curl_sh_entry *sh_addentry(struct curl_hash *sh, curl_socket_t s) @@ -219,16 +243,21 @@ struct Curl_sh_entry *there = sh_getentry(sh, s); struct Curl_sh_entry *check; - if(there) + if(there) { /* it is present, return fine */ return there; + } /* not present, add it */ check = calloc(1, sizeof(struct Curl_sh_entry)); if(!check) return NULL; /* major failure */ - Curl_llist_init(&check->list, NULL); + if(Curl_hash_init(&check->transfers, TRHASH_SIZE, trhash, + trhash_compare, trhash_dtor)) { + free(check); + return NULL; + } /* make/add new hash entry */ if(!Curl_hash_add(sh, (char *)&s, sizeof(curl_socket_t), check)) { @@ -244,14 +273,8 @@ static void sh_delentry(struct Curl_sh_entry *entry, struct curl_hash *sh, curl_socket_t s) { - struct curl_llist *list = &entry->list; - struct curl_llist_element *e; - /* clear the list of transfers first */ - for(e = list->head; e; e = list->head) { - struct Curl_easy *dta = e->ptr; - Curl_llist_remove(&entry->list, e, NULL); - dta->sh_entry = NULL; - } + Curl_hash_destroy(&entry->transfers); + /* We remove the hash entry. This will end up in a call to sh_freeentry(). */ Curl_hash_delete(sh, (char *)&s, sizeof(curl_socket_t)); @@ -320,17 +343,6 @@ return CURLM_OK; } -/* - * multi_freeamsg() - * - * Callback used by the llist system when a single list entry is destroyed. - */ -static void multi_freeamsg(void *a, void *b) -{ - (void)a; - (void)b; -} - struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */ int chashsize) /* connection hash */ { @@ -350,8 +362,8 @@ if(Curl_conncache_init(&multi->conn_cache, chashsize)) goto error; - Curl_llist_init(&multi->msglist, multi_freeamsg); - Curl_llist_init(&multi->pending, multi_freeamsg); + Curl_llist_init(&multi->msglist, NULL); + Curl_llist_init(&multi->pending, NULL); /* -1 means it not set by user, use the default value */ multi->maxconnects = -1; @@ -789,11 +801,6 @@ static void detach_connnection(struct Curl_easy *data) { struct connectdata *conn = data->conn; - if(data->sh_entry) { - /* still listed as a user of a socket hash entry, remove it */ - Curl_llist_remove(&data->sh_entry->list, &data->sh_queue, NULL); - data->sh_entry = NULL; - } if(conn) Curl_llist_remove(&conn->easyq, &data->conn_queue, NULL); data->conn = NULL; @@ -1266,6 +1273,9 @@ bool stream_error = FALSE; rc = CURLM_OK; + DEBUGASSERT((data->mstate <= CURLM_STATE_CONNECT) || + (data->mstate >= CURLM_STATE_DONE) || + data->conn); if(!data->conn && data->mstate > CURLM_STATE_CONNECT && data->mstate < CURLM_STATE_DONE) { @@ -2287,30 +2297,22 @@ if(action & CURL_POLL_OUT) entry->writers++; - /* add 'data' to the list of handles using this socket! */ - Curl_llist_insert_next(&entry->list, entry->list.tail, - data, &data->sh_queue); - data->sh_entry = entry; + /* add 'data' to the transfer hash on this socket! */ + if(!Curl_hash_add(&entry->transfers, (char *)&data, /* hash key */ + sizeof(struct Curl_easy *), data)) + return CURLM_OUT_OF_MEMORY; } comboaction = (entry->writers? CURL_POLL_OUT : 0) | (entry->readers ? CURL_POLL_IN : 0); -#if 0 - infof(data, "--- Comboaction: %u readers %u writers\n", - entry->readers, entry->writers); -#endif - /* check if it has the same action set */ - if(entry->action == comboaction) + /* socket existed before and has the same action set as before */ + if(sincebefore && (entry->action == comboaction)) /* same, continue */ continue; - /* we know (entry != NULL) at this point, see the logic above */ if(multi->socket_cb) - multi->socket_cb(data, - s, - comboaction, - multi->socket_userp, + multi->socket_cb(data, s, comboaction, multi->socket_userp, entry->socketp); entry->action = comboaction; /* store the current action state */ @@ -2352,6 +2354,13 @@ entry->socketp); sh_delentry(entry, &multi->sockhash, s); } + else { + /* still users, but remove this handle as a user of this socket */ + if(Curl_hash_delete(&entry->transfers, (char *)&data, + sizeof(struct Curl_easy *))) { + DEBUGASSERT(NULL); + } + } } } /* for loop over numsocks */ @@ -2495,19 +2504,14 @@ and just move on. */ ; else { - struct curl_llist *list = &entry->list; - struct curl_llist_element *e; - struct curl_llist_element *enext; - SIGPIPE_VARIABLE(pipe_st); + struct curl_hash_iterator iter; + struct curl_hash_element *he; /* the socket can be shared by many transfers, iterate */ - for(e = list->head; e; e = enext) { - data = (struct Curl_easy *)e->ptr; - - /* assign 'enext' here since the 'e' struct might be cleared - further down in the singlesocket() call */ - enext = e->next; - + Curl_hash_start_iterate(&entry->transfers, &iter); + for(he = Curl_hash_next_element(&iter); he; + he = Curl_hash_next_element(&iter)) { + data = (struct Curl_easy *)he->ptr; DEBUGASSERT(data); DEBUGASSERT(data->magic == CURLEASY_MAGIC_NUMBER); @@ -2515,21 +2519,7 @@ /* set socket event bitmask if they're not locked */ data->conn->cselect_bits = ev_bitmask; - sigpipe_ignore(data, &pipe_st); - result = multi_runsingle(multi, now, data); - sigpipe_restore(&pipe_st); - - if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK)) - /* clear the bitmask only if not locked */ - data->conn->cselect_bits = 0; - - if(CURLM_OK >= result) { - /* get the socket(s) and check if the state has been changed since - last */ - result = singlesocket(multi, data); - if(result) - return result; - } + Curl_expire(data, 0, EXPIRE_RUN_NOW); } /* Now we fall-through and do the timer-based stuff, since we don't want diff -ruN curl-7.65.1/lib/multi.c.orig curl-7.65.1-fix-dns-segfaults/lib/multi.c.orig --- curl-7.65.1/lib/multi.c.orig 1970-01-01 01:00:00.000000000 +0100 +++ curl-7.65.1-fix-dns-segfaults/lib/multi.c.orig 2019-06-28 17:01:29.214878103 +0200 @@ -0,0 +1,3074 @@ +/*************************************************************************** + * _ _ ____ _ + * Project ___| | | | _ \| | + * / __| | | | |_) | | + * | (__| |_| | _ <| |___ + * \___|\___/|_| \_\_____| + * + * Copyright (C) 1998 - 2019, Daniel Stenberg, , et al. + * + * This software is licensed as described in the file COPYING, which + * you should have received as part of this distribution. The terms + * are also available at https://curl.haxx.se/docs/copyright.html. + * + * You may opt to use, copy, modify, merge, publish, distribute and/or sell + * copies of the Software, and permit persons to whom the Software is + * furnished to do so, under the terms of the COPYING file. + * + * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY + * KIND, either express or implied. + * + ***************************************************************************/ + +#include "curl_setup.h" + +#include + +#include "urldata.h" +#include "transfer.h" +#include "url.h" +#include "connect.h" +#include "progress.h" +#include "easyif.h" +#include "share.h" +#include "psl.h" +#include "multiif.h" +#include "sendf.h" +#include "timeval.h" +#include "http.h" +#include "select.h" +#include "warnless.h" +#include "speedcheck.h" +#include "conncache.h" +#include "multihandle.h" +#include "sigpipe.h" +#include "vtls/vtls.h" +#include "connect.h" +#include "http_proxy.h" +#include "http2.h" +/* The last 3 #include files should be in this order */ +#include "curl_printf.h" +#include "curl_memory.h" +#include "memdebug.h" + +/* + CURL_SOCKET_HASH_TABLE_SIZE should be a prime number. Increasing it from 97 + to 911 takes on a 32-bit machine 4 x 804 = 3211 more bytes. Still, every + CURL handle takes 45-50 K memory, therefore this 3K are not significant. +*/ +#ifndef CURL_SOCKET_HASH_TABLE_SIZE +#define CURL_SOCKET_HASH_TABLE_SIZE 911 +#endif + +#ifndef CURL_CONNECTION_HASH_SIZE +#define CURL_CONNECTION_HASH_SIZE 97 +#endif + +#define CURL_MULTI_HANDLE 0x000bab1e + +#define GOOD_MULTI_HANDLE(x) \ + ((x) && (x)->type == CURL_MULTI_HANDLE) + +static CURLMcode singlesocket(struct Curl_multi *multi, + struct Curl_easy *data); +static int update_timer(struct Curl_multi *multi); + +static CURLMcode add_next_timeout(struct curltime now, + struct Curl_multi *multi, + struct Curl_easy *d); +static CURLMcode multi_timeout(struct Curl_multi *multi, + long *timeout_ms); +static void process_pending_handles(struct Curl_multi *multi); +static void detach_connnection(struct Curl_easy *data); + +#ifdef DEBUGBUILD +static const char * const statename[]={ + "INIT", + "CONNECT_PEND", + "CONNECT", + "WAITRESOLVE", + "WAITCONNECT", + "WAITPROXYCONNECT", + "SENDPROTOCONNECT", + "PROTOCONNECT", + "DO", + "DOING", + "DO_MORE", + "DO_DONE", + "PERFORM", + "TOOFAST", + "DONE", + "COMPLETED", + "MSGSENT", +}; +#endif + +/* function pointer called once when switching TO a state */ +typedef void (*init_multistate_func)(struct Curl_easy *data); + +static void Curl_init_completed(struct Curl_easy *data) +{ + /* this is a completed transfer */ + + /* Important: reset the conn pointer so that we don't point to memory + that could be freed anytime */ + detach_connnection(data); + Curl_expire_clear(data); /* stop all timers */ +} + +/* always use this function to change state, to make debugging easier */ +static void mstate(struct Curl_easy *data, CURLMstate state +#ifdef DEBUGBUILD + , int lineno +#endif +) +{ + CURLMstate oldstate = data->mstate; + static const init_multistate_func finit[CURLM_STATE_LAST] = { + NULL, /* INIT */ + NULL, /* CONNECT_PEND */ + Curl_init_CONNECT, /* CONNECT */ + NULL, /* WAITRESOLVE */ + NULL, /* WAITCONNECT */ + NULL, /* WAITPROXYCONNECT */ + NULL, /* SENDPROTOCONNECT */ + NULL, /* PROTOCONNECT */ + Curl_connect_free, /* DO */ + NULL, /* DOING */ + NULL, /* DO_MORE */ + NULL, /* DO_DONE */ + NULL, /* PERFORM */ + NULL, /* TOOFAST */ + NULL, /* DONE */ + Curl_init_completed, /* COMPLETED */ + NULL /* MSGSENT */ + }; + +#if defined(DEBUGBUILD) && defined(CURL_DISABLE_VERBOSE_STRINGS) + (void) lineno; +#endif + + if(oldstate == state) + /* don't bother when the new state is the same as the old state */ + return; + + data->mstate = state; + +#if defined(DEBUGBUILD) && !defined(CURL_DISABLE_VERBOSE_STRINGS) + if(data->mstate >= CURLM_STATE_CONNECT_PEND && + data->mstate < CURLM_STATE_COMPLETED) { + long connection_id = -5000; + + if(data->conn) + connection_id = data->conn->connection_id; + + infof(data, + "STATE: %s => %s handle %p; line %d (connection #%ld)\n", + statename[oldstate], statename[data->mstate], + (void *)data, lineno, connection_id); + } +#endif + + if(state == CURLM_STATE_COMPLETED) + /* changing to COMPLETED means there's one less easy handle 'alive' */ + data->multi->num_alive--; + + /* if this state has an init-function, run it */ + if(finit[state]) + finit[state](data); +} + +#ifndef DEBUGBUILD +#define multistate(x,y) mstate(x,y) +#else +#define multistate(x,y) mstate(x,y, __LINE__) +#endif + +/* + * We add one of these structs to the sockhash for each socket + */ + +struct Curl_sh_entry { + struct curl_hash transfers; /* hash of transfers using this socket */ + unsigned int action; /* what combined action READ/WRITE this socket waits + for */ + void *socketp; /* settable by users with curl_multi_assign() */ + unsigned int users; /* number of transfers using this */ + unsigned int readers; /* this many transfers want to read */ + unsigned int writers; /* this many transfers want to write */ +}; +/* bits for 'action' having no bits means this socket is not expecting any + action */ +#define SH_READ 1 +#define SH_WRITE 2 + +/* look up a given socket in the socket hash, skip invalid sockets */ +static struct Curl_sh_entry *sh_getentry(struct curl_hash *sh, + curl_socket_t s) +{ + if(s != CURL_SOCKET_BAD) { + /* only look for proper sockets */ + return Curl_hash_pick(sh, (char *)&s, sizeof(curl_socket_t)); + } + return NULL; +} + +#define TRHASH_SIZE 13 +static size_t trhash(void *key, size_t key_length, size_t slots_num) +{ + size_t keyval = (size_t)key; /* this is a data pointer */ + (void) key_length; + + return (keyval % slots_num); +} + +static size_t trhash_compare(void *k1, size_t k1_len, void *k2, size_t k2_len) +{ + (void)k1_len; + (void)k2_len; + + return *(struct Curl_easy **)k1 == *(struct Curl_easy **)k2; +} + +static void trhash_dtor(void *nada) +{ + (void)nada; +} + + +/* make sure this socket is present in the hash for this handle */ +static struct Curl_sh_entry *sh_addentry(struct curl_hash *sh, + curl_socket_t s) +{ + struct Curl_sh_entry *there = sh_getentry(sh, s); + struct Curl_sh_entry *check; + + if(there) { + /* it is present, return fine */ + return there; + } + + /* not present, add it */ + check = calloc(1, sizeof(struct Curl_sh_entry)); + if(!check) + return NULL; /* major failure */ + + if(Curl_hash_init(&check->transfers, TRHASH_SIZE, trhash, + trhash_compare, trhash_dtor)) { + free(check); + return NULL; + } + + /* make/add new hash entry */ + if(!Curl_hash_add(sh, (char *)&s, sizeof(curl_socket_t), check)) { + free(check); + return NULL; /* major failure */ + } + + return check; /* things are good in sockhash land */ +} + + +/* delete the given socket + handle from the hash */ +static void sh_delentry(struct Curl_sh_entry *entry, + struct curl_hash *sh, curl_socket_t s) +{ + Curl_hash_destroy(&entry->transfers); + + /* We remove the hash entry. This will end up in a call to + sh_freeentry(). */ + Curl_hash_delete(sh, (char *)&s, sizeof(curl_socket_t)); +} + +/* + * free a sockhash entry + */ +static void sh_freeentry(void *freethis) +{ + struct Curl_sh_entry *p = (struct Curl_sh_entry *) freethis; + + free(p); +} + +static size_t fd_key_compare(void *k1, size_t k1_len, void *k2, size_t k2_len) +{ + (void) k1_len; (void) k2_len; + + return (*((curl_socket_t *) k1)) == (*((curl_socket_t *) k2)); +} + +static size_t hash_fd(void *key, size_t key_length, size_t slots_num) +{ + curl_socket_t fd = *((curl_socket_t *) key); + (void) key_length; + + return (fd % slots_num); +} + +/* + * sh_init() creates a new socket hash and returns the handle for it. + * + * Quote from README.multi_socket: + * + * "Some tests at 7000 and 9000 connections showed that the socket hash lookup + * is somewhat of a bottle neck. Its current implementation may be a bit too + * limiting. It simply has a fixed-size array, and on each entry in the array + * it has a linked list with entries. So the hash only checks which list to + * scan through. The code I had used so for used a list with merely 7 slots + * (as that is what the DNS hash uses) but with 7000 connections that would + * make an average of 1000 nodes in each list to run through. I upped that to + * 97 slots (I believe a prime is suitable) and noticed a significant speed + * increase. I need to reconsider the hash implementation or use a rather + * large default value like this. At 9000 connections I was still below 10us + * per call." + * + */ +static int sh_init(struct curl_hash *hash, int hashsize) +{ + return Curl_hash_init(hash, hashsize, hash_fd, fd_key_compare, + sh_freeentry); +} + +/* + * multi_addmsg() + * + * Called when a transfer is completed. Adds the given msg pointer to + * the list kept in the multi handle. + */ +static CURLMcode multi_addmsg(struct Curl_multi *multi, + struct Curl_message *msg) +{ + Curl_llist_insert_next(&multi->msglist, multi->msglist.tail, msg, + &msg->list); + return CURLM_OK; +} + +struct Curl_multi *Curl_multi_handle(int hashsize, /* socket hash */ + int chashsize) /* connection hash */ +{ + struct Curl_multi *multi = calloc(1, sizeof(struct Curl_multi)); + + if(!multi) + return NULL; + + multi->type = CURL_MULTI_HANDLE; + + if(Curl_mk_dnscache(&multi->hostcache)) + goto error; + + if(sh_init(&multi->sockhash, hashsize)) + goto error; + + if(Curl_conncache_init(&multi->conn_cache, chashsize)) + goto error; + + Curl_llist_init(&multi->msglist, NULL); + Curl_llist_init(&multi->pending, NULL); + + /* -1 means it not set by user, use the default value */ + multi->maxconnects = -1; + return multi; + + error: + + Curl_hash_destroy(&multi->sockhash); + Curl_hash_destroy(&multi->hostcache); + Curl_conncache_destroy(&multi->conn_cache); + Curl_llist_destroy(&multi->msglist, NULL); + Curl_llist_destroy(&multi->pending, NULL); + + free(multi); + return NULL; +} + +struct Curl_multi *curl_multi_init(void) +{ + return Curl_multi_handle(CURL_SOCKET_HASH_TABLE_SIZE, + CURL_CONNECTION_HASH_SIZE); +} + +CURLMcode curl_multi_add_handle(struct Curl_multi *multi, + struct Curl_easy *data) +{ + /* First, make some basic checks that the CURLM handle is a good handle */ + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + /* Verify that we got a somewhat good easy handle too */ + if(!GOOD_EASY_HANDLE(data)) + return CURLM_BAD_EASY_HANDLE; + + /* Prevent users from adding same easy handle more than once and prevent + adding to more than one multi stack */ + if(data->multi) + return CURLM_ADDED_ALREADY; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + /* Initialize timeout list for this handle */ + Curl_llist_init(&data->state.timeoutlist, NULL); + + /* + * No failure allowed in this function beyond this point. And no + * modification of easy nor multi handle allowed before this except for + * potential multi's connection cache growing which won't be undone in this + * function no matter what. + */ + if(data->set.errorbuffer) + data->set.errorbuffer[0] = 0; + + /* set the easy handle */ + multistate(data, CURLM_STATE_INIT); + + /* for multi interface connections, we share DNS cache automatically if the + easy handle's one is currently not set. */ + if(!data->dns.hostcache || + (data->dns.hostcachetype == HCACHE_NONE)) { + data->dns.hostcache = &multi->hostcache; + data->dns.hostcachetype = HCACHE_MULTI; + } + + /* Point to the shared or multi handle connection cache */ + if(data->share && (data->share->specifier & (1<< CURL_LOCK_DATA_CONNECT))) + data->state.conn_cache = &data->share->conn_cache; + else + data->state.conn_cache = &multi->conn_cache; + +#ifdef USE_LIBPSL + /* Do the same for PSL. */ + if(data->share && (data->share->specifier & (1 << CURL_LOCK_DATA_PSL))) + data->psl = &data->share->psl; + else + data->psl = &multi->psl; +#endif + + /* We add the new entry last in the list. */ + data->next = NULL; /* end of the line */ + if(multi->easyp) { + struct Curl_easy *last = multi->easylp; + last->next = data; + data->prev = last; + multi->easylp = data; /* the new last node */ + } + else { + /* first node, make prev NULL! */ + data->prev = NULL; + multi->easylp = multi->easyp = data; /* both first and last */ + } + + /* make the Curl_easy refer back to this multi handle */ + data->multi = multi; + + /* Set the timeout for this handle to expire really soon so that it will + be taken care of even when this handle is added in the midst of operation + when only the curl_multi_socket() API is used. During that flow, only + sockets that time-out or have actions will be dealt with. Since this + handle has no action yet, we make sure it times out to get things to + happen. */ + Curl_expire(data, 0, EXPIRE_RUN_NOW); + + /* increase the node-counter */ + multi->num_easy++; + + /* increase the alive-counter */ + multi->num_alive++; + + /* A somewhat crude work-around for a little glitch in update_timer() that + happens if the lastcall time is set to the same time when the handle is + removed as when the next handle is added, as then the check in + update_timer() that prevents calling the application multiple times with + the same timer info will not trigger and then the new handle's timeout + will not be notified to the app. + + The work-around is thus simply to clear the 'lastcall' variable to force + update_timer() to always trigger a callback to the app when a new easy + handle is added */ + memset(&multi->timer_lastcall, 0, sizeof(multi->timer_lastcall)); + + /* The closure handle only ever has default timeouts set. To improve the + state somewhat we clone the timeouts from each added handle so that the + closure handle always has the same timeouts as the most recently added + easy handle. */ + data->state.conn_cache->closure_handle->set.timeout = data->set.timeout; + data->state.conn_cache->closure_handle->set.server_response_timeout = + data->set.server_response_timeout; + data->state.conn_cache->closure_handle->set.no_signal = + data->set.no_signal; + + update_timer(multi); + return CURLM_OK; +} + +#if 0 +/* Debug-function, used like this: + * + * Curl_hash_print(multi->sockhash, debug_print_sock_hash); + * + * Enable the hash print function first by editing hash.c + */ +static void debug_print_sock_hash(void *p) +{ + struct Curl_sh_entry *sh = (struct Curl_sh_entry *)p; + + fprintf(stderr, " [easy %p/magic %x/socket %d]", + (void *)sh->data, sh->data->magic, (int)sh->socket); +} +#endif + +static CURLcode multi_done(struct Curl_easy *data, + CURLcode status, /* an error if this is called + after an error was detected */ + bool premature) +{ + CURLcode result; + struct connectdata *conn = data->conn; + unsigned int i; + + DEBUGF(infof(data, "multi_done\n")); + + if(data->state.done) + /* Stop if multi_done() has already been called */ + return CURLE_OK; + + /* Stop the resolver and free its own resources (but not dns_entry yet). */ + Curl_resolver_kill(conn); + + /* Cleanup possible redirect junk */ + Curl_safefree(data->req.newurl); + Curl_safefree(data->req.location); + + switch(status) { + case CURLE_ABORTED_BY_CALLBACK: + case CURLE_READ_ERROR: + case CURLE_WRITE_ERROR: + /* When we're aborted due to a callback return code it basically have to + be counted as premature as there is trouble ahead if we don't. We have + many callbacks and protocols work differently, we could potentially do + this more fine-grained in the future. */ + premature = TRUE; + default: + break; + } + + /* this calls the protocol-specific function pointer previously set */ + if(conn->handler->done) + result = conn->handler->done(conn, status, premature); + else + result = status; + + if(CURLE_ABORTED_BY_CALLBACK != result) { + /* avoid this if we already aborted by callback to avoid this calling + another callback */ + CURLcode rc = Curl_pgrsDone(conn); + if(!result && rc) + result = CURLE_ABORTED_BY_CALLBACK; + } + + process_pending_handles(data->multi); /* connection / multiplex */ + + detach_connnection(data); + if(CONN_INUSE(conn)) { + /* Stop if still used. */ + DEBUGF(infof(data, "Connection still in use %zu, " + "no more multi_done now!\n", + conn->easyq.size)); + return CURLE_OK; + } + + data->state.done = TRUE; /* called just now! */ + + if(conn->dns_entry) { + Curl_resolv_unlock(data, conn->dns_entry); /* done with this */ + conn->dns_entry = NULL; + } + Curl_hostcache_prune(data); + Curl_safefree(data->state.ulbuf); + + /* if the transfer was completed in a paused state there can be buffered + data left to free */ + for(i = 0; i < data->state.tempcount; i++) { + free(data->state.tempwrite[i].buf); + } + data->state.tempcount = 0; + + /* if data->set.reuse_forbid is TRUE, it means the libcurl client has + forced us to close this connection. This is ignored for requests taking + place in a NTLM/NEGOTIATE authentication handshake + + if conn->bits.close is TRUE, it means that the connection should be + closed in spite of all our efforts to be nice, due to protocol + restrictions in our or the server's end + + if premature is TRUE, it means this connection was said to be DONE before + the entire request operation is complete and thus we can't know in what + state it is for re-using, so we're forced to close it. In a perfect world + we can add code that keep track of if we really must close it here or not, + but currently we have no such detail knowledge. + */ + + if((data->set.reuse_forbid +#if defined(USE_NTLM) + && !(conn->http_ntlm_state == NTLMSTATE_TYPE2 || + conn->proxy_ntlm_state == NTLMSTATE_TYPE2) +#endif +#if defined(USE_SPNEGO) + && !(conn->http_negotiate_state == GSS_AUTHRECV || + conn->proxy_negotiate_state == GSS_AUTHRECV) +#endif + ) || conn->bits.close + || (premature && !(conn->handler->flags & PROTOPT_STREAM))) { + CURLcode res2 = Curl_disconnect(data, conn, premature); + + /* If we had an error already, make sure we return that one. But + if we got a new error, return that. */ + if(!result && res2) + result = res2; + } + else { + char buffer[256]; + /* create string before returning the connection */ + msnprintf(buffer, sizeof(buffer), + "Connection #%ld to host %s left intact", + conn->connection_id, + conn->bits.socksproxy ? conn->socks_proxy.host.dispname : + conn->bits.httpproxy ? conn->http_proxy.host.dispname : + conn->bits.conn_to_host ? conn->conn_to_host.dispname : + conn->host.dispname); + + /* the connection is no longer in use by this transfer */ + if(Curl_conncache_return_conn(conn)) { + /* remember the most recently used connection */ + data->state.lastconnect = conn; + infof(data, "%s\n", buffer); + } + else + data->state.lastconnect = NULL; + } + + Curl_free_request_state(data); + return result; +} + +CURLMcode curl_multi_remove_handle(struct Curl_multi *multi, + struct Curl_easy *data) +{ + struct Curl_easy *easy = data; + bool premature; + bool easy_owns_conn; + struct curl_llist_element *e; + + /* First, make some basic checks that the CURLM handle is a good handle */ + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + /* Verify that we got a somewhat good easy handle too */ + if(!GOOD_EASY_HANDLE(data)) + return CURLM_BAD_EASY_HANDLE; + + /* Prevent users from trying to remove same easy handle more than once */ + if(!data->multi) + return CURLM_OK; /* it is already removed so let's say it is fine! */ + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + premature = (data->mstate < CURLM_STATE_COMPLETED) ? TRUE : FALSE; + easy_owns_conn = (data->conn && (data->conn->data == easy)) ? + TRUE : FALSE; + + /* If the 'state' is not INIT or COMPLETED, we might need to do something + nice to put the easy_handle in a good known state when this returns. */ + if(premature) { + /* this handle is "alive" so we need to count down the total number of + alive connections when this is removed */ + multi->num_alive--; + } + + if(data->conn && + data->mstate > CURLM_STATE_DO && + data->mstate < CURLM_STATE_COMPLETED) { + /* Set connection owner so that the DONE function closes it. We can + safely do this here since connection is killed. */ + data->conn->data = easy; + streamclose(data->conn, "Removed with partial response"); + easy_owns_conn = TRUE; + } + + /* The timer must be shut down before data->multi is set to NULL, + else the timenode will remain in the splay tree after + curl_easy_cleanup is called. */ + Curl_expire_clear(data); + + if(data->conn) { + + /* we must call multi_done() here (if we still own the connection) so that + we don't leave a half-baked one around */ + if(easy_owns_conn) { + + /* multi_done() clears the conn->data field to lose the association + between the easy handle and the connection + + Note that this ignores the return code simply because there's + nothing really useful to do with it anyway! */ + (void)multi_done(data, data->result, premature); + } + } + + if(data->connect_queue.ptr) + /* the handle was in the pending list waiting for an available connection, + so go ahead and remove it */ + Curl_llist_remove(&multi->pending, &data->connect_queue, NULL); + + if(data->dns.hostcachetype == HCACHE_MULTI) { + /* stop using the multi handle's DNS cache, *after* the possible + multi_done() call above */ + data->dns.hostcache = NULL; + data->dns.hostcachetype = HCACHE_NONE; + } + + Curl_wildcard_dtor(&data->wildcard); + + /* destroy the timeout list that is held in the easy handle, do this *after* + multi_done() as that may actually call Curl_expire that uses this */ + Curl_llist_destroy(&data->state.timeoutlist, NULL); + + /* as this was using a shared connection cache we clear the pointer to that + since we're not part of that multi handle anymore */ + data->state.conn_cache = NULL; + + /* change state without using multistate(), only to make singlesocket() do + what we want */ + data->mstate = CURLM_STATE_COMPLETED; + singlesocket(multi, easy); /* to let the application know what sockets that + vanish with this handle */ + + /* Remove the association between the connection and the handle */ + if(data->conn) { + data->conn->data = NULL; + detach_connnection(data); + } + +#ifdef USE_LIBPSL + /* Remove the PSL association. */ + if(data->psl == &multi->psl) + data->psl = NULL; +#endif + + data->multi = NULL; /* clear the association to this multi handle */ + + /* make sure there's no pending message in the queue sent from this easy + handle */ + + for(e = multi->msglist.head; e; e = e->next) { + struct Curl_message *msg = e->ptr; + + if(msg->extmsg.easy_handle == easy) { + Curl_llist_remove(&multi->msglist, e, NULL); + /* there can only be one from this specific handle */ + break; + } + } + + /* make the previous node point to our next */ + if(data->prev) + data->prev->next = data->next; + else + multi->easyp = data->next; /* point to first node */ + + /* make our next point to our previous node */ + if(data->next) + data->next->prev = data->prev; + else + multi->easylp = data->prev; /* point to last node */ + + /* NOTE NOTE NOTE + We do not touch the easy handle here! */ + multi->num_easy--; /* one less to care about now */ + + update_timer(multi); + return CURLM_OK; +} + +/* Return TRUE if the application asked for multiplexing */ +bool Curl_multiplex_wanted(const struct Curl_multi *multi) +{ + return (multi && (multi->multiplexing)); +} + +/* This is the only function that should clear data->conn. This will + occasionally be called with the pointer already cleared. */ +static void detach_connnection(struct Curl_easy *data) +{ + struct connectdata *conn = data->conn; + if(conn) + Curl_llist_remove(&conn->easyq, &data->conn_queue, NULL); + data->conn = NULL; +} + +/* This is the only function that should assign data->conn */ +void Curl_attach_connnection(struct Curl_easy *data, + struct connectdata *conn) +{ + DEBUGASSERT(!data->conn); + DEBUGASSERT(conn); + data->conn = conn; + Curl_llist_insert_next(&conn->easyq, conn->easyq.tail, data, + &data->conn_queue); +} + +static int waitconnect_getsock(struct connectdata *conn, + curl_socket_t *sock, + int numsocks) +{ + int i; + int s = 0; + int rc = 0; + + if(!numsocks) + return GETSOCK_BLANK; + +#ifdef USE_SSL + if(CONNECT_FIRSTSOCKET_PROXY_SSL()) + return Curl_ssl_getsock(conn, sock, numsocks); +#endif + + for(i = 0; i<2; i++) { + if(conn->tempsock[i] != CURL_SOCKET_BAD) { + sock[s] = conn->tempsock[i]; + rc |= GETSOCK_WRITESOCK(s++); + } + } + + return rc; +} + +static int waitproxyconnect_getsock(struct connectdata *conn, + curl_socket_t *sock, + int numsocks) +{ + if(!numsocks) + return GETSOCK_BLANK; + + sock[0] = conn->sock[FIRSTSOCKET]; + + /* when we've sent a CONNECT to a proxy, we should rather wait for the + socket to become readable to be able to get the response headers */ + if(conn->connect_state) + return GETSOCK_READSOCK(0); + + return GETSOCK_WRITESOCK(0); +} + +static int domore_getsock(struct connectdata *conn, + curl_socket_t *socks, + int numsocks) +{ + if(conn && conn->handler->domore_getsock) + return conn->handler->domore_getsock(conn, socks, numsocks); + return GETSOCK_BLANK; +} + +/* returns bitmapped flags for this handle and its sockets */ +static int multi_getsock(struct Curl_easy *data, + curl_socket_t *socks, /* points to numsocks number + of sockets */ + int numsocks) +{ + /* The no connection case can happen when this is called from + curl_multi_remove_handle() => singlesocket() => multi_getsock(). + */ + if(!data->conn) + return 0; + + if(data->mstate > CURLM_STATE_CONNECT && + data->mstate < CURLM_STATE_COMPLETED) { + /* Set up ownership correctly */ + data->conn->data = data; + } + + switch(data->mstate) { + default: +#if 0 /* switch back on these cases to get the compiler to check for all enums + to be present */ + case CURLM_STATE_TOOFAST: /* returns 0, so will not select. */ + case CURLM_STATE_COMPLETED: + case CURLM_STATE_MSGSENT: + case CURLM_STATE_INIT: + case CURLM_STATE_CONNECT: + case CURLM_STATE_WAITDO: + case CURLM_STATE_DONE: + case CURLM_STATE_LAST: + /* this will get called with CURLM_STATE_COMPLETED when a handle is + removed */ +#endif + return 0; + + case CURLM_STATE_WAITRESOLVE: + return Curl_resolv_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_PROTOCONNECT: + case CURLM_STATE_SENDPROTOCONNECT: + return Curl_protocol_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_DO: + case CURLM_STATE_DOING: + return Curl_doing_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_WAITPROXYCONNECT: + return waitproxyconnect_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_WAITCONNECT: + return waitconnect_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_DO_MORE: + return domore_getsock(data->conn, socks, numsocks); + + case CURLM_STATE_DO_DONE: /* since is set after DO is completed, we switch + to waiting for the same as the *PERFORM + states */ + case CURLM_STATE_PERFORM: + return Curl_single_getsock(data->conn, socks, numsocks); + } + +} + +CURLMcode curl_multi_fdset(struct Curl_multi *multi, + fd_set *read_fd_set, fd_set *write_fd_set, + fd_set *exc_fd_set, int *max_fd) +{ + /* Scan through all the easy handles to get the file descriptors set. + Some easy handles may not have connected to the remote host yet, + and then we must make sure that is done. */ + struct Curl_easy *data; + int this_max_fd = -1; + curl_socket_t sockbunch[MAX_SOCKSPEREASYHANDLE]; + int i; + (void)exc_fd_set; /* not used */ + + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + data = multi->easyp; + while(data) { + int bitmap = multi_getsock(data, sockbunch, MAX_SOCKSPEREASYHANDLE); + + for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { + curl_socket_t s = CURL_SOCKET_BAD; + + if((bitmap & GETSOCK_READSOCK(i)) && VALID_SOCK((sockbunch[i]))) { + FD_SET(sockbunch[i], read_fd_set); + s = sockbunch[i]; + } + if((bitmap & GETSOCK_WRITESOCK(i)) && VALID_SOCK((sockbunch[i]))) { + FD_SET(sockbunch[i], write_fd_set); + s = sockbunch[i]; + } + if(s == CURL_SOCKET_BAD) + /* this socket is unused, break out of loop */ + break; + if((int)s > this_max_fd) + this_max_fd = (int)s; + } + + data = data->next; /* check next handle */ + } + + *max_fd = this_max_fd; + + return CURLM_OK; +} + +#define NUM_POLLS_ON_STACK 10 + +CURLMcode Curl_multi_wait(struct Curl_multi *multi, + struct curl_waitfd extra_fds[], + unsigned int extra_nfds, + int timeout_ms, + int *ret, + bool *gotsocket) /* if any socket was checked */ +{ + struct Curl_easy *data; + curl_socket_t sockbunch[MAX_SOCKSPEREASYHANDLE]; + int bitmap; + unsigned int i; + unsigned int nfds = 0; + unsigned int curlfds; + bool ufds_malloc = FALSE; + long timeout_internal; + int retcode = 0; + struct pollfd a_few_on_stack[NUM_POLLS_ON_STACK]; + struct pollfd *ufds = &a_few_on_stack[0]; + + if(gotsocket) + *gotsocket = FALSE; + + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + /* Count up how many fds we have from the multi handle */ + data = multi->easyp; + while(data) { + bitmap = multi_getsock(data, sockbunch, MAX_SOCKSPEREASYHANDLE); + + for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { + curl_socket_t s = CURL_SOCKET_BAD; + + if(bitmap & GETSOCK_READSOCK(i)) { + ++nfds; + s = sockbunch[i]; + } + if(bitmap & GETSOCK_WRITESOCK(i)) { + ++nfds; + s = sockbunch[i]; + } + if(s == CURL_SOCKET_BAD) { + break; + } + } + + data = data->next; /* check next handle */ + } + + /* If the internally desired timeout is actually shorter than requested from + the outside, then use the shorter time! But only if the internal timer + is actually larger than -1! */ + (void)multi_timeout(multi, &timeout_internal); + if((timeout_internal >= 0) && (timeout_internal < (long)timeout_ms)) + timeout_ms = (int)timeout_internal; + + curlfds = nfds; /* number of internal file descriptors */ + nfds += extra_nfds; /* add the externally provided ones */ + + if(nfds > NUM_POLLS_ON_STACK) { + /* 'nfds' is a 32 bit value and 'struct pollfd' is typically 8 bytes + big, so at 2^29 sockets this value might wrap. When a process gets + the capability to actually handle over 500 million sockets this + calculation needs a integer overflow check. */ + ufds = malloc(nfds * sizeof(struct pollfd)); + if(!ufds) + return CURLM_OUT_OF_MEMORY; + ufds_malloc = TRUE; + } + nfds = 0; + + /* only do the second loop if we found descriptors in the first stage run + above */ + + if(curlfds) { + /* Add the curl handles to our pollfds first */ + data = multi->easyp; + while(data) { + bitmap = multi_getsock(data, sockbunch, MAX_SOCKSPEREASYHANDLE); + + for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) { + curl_socket_t s = CURL_SOCKET_BAD; + + if(bitmap & GETSOCK_READSOCK(i)) { + ufds[nfds].fd = sockbunch[i]; + ufds[nfds].events = POLLIN; + ++nfds; + s = sockbunch[i]; + } + if(bitmap & GETSOCK_WRITESOCK(i)) { + ufds[nfds].fd = sockbunch[i]; + ufds[nfds].events = POLLOUT; + ++nfds; + s = sockbunch[i]; + } + if(s == CURL_SOCKET_BAD) { + break; + } + } + + data = data->next; /* check next handle */ + } + } + + /* Add external file descriptions from poll-like struct curl_waitfd */ + for(i = 0; i < extra_nfds; i++) { + ufds[nfds].fd = extra_fds[i].fd; + ufds[nfds].events = 0; + if(extra_fds[i].events & CURL_WAIT_POLLIN) + ufds[nfds].events |= POLLIN; + if(extra_fds[i].events & CURL_WAIT_POLLPRI) + ufds[nfds].events |= POLLPRI; + if(extra_fds[i].events & CURL_WAIT_POLLOUT) + ufds[nfds].events |= POLLOUT; + ++nfds; + } + + if(nfds) { + int pollrc; + /* wait... */ + pollrc = Curl_poll(ufds, nfds, timeout_ms); + + if(pollrc > 0) { + retcode = pollrc; + /* copy revents results from the poll to the curl_multi_wait poll + struct, the bit values of the actual underlying poll() implementation + may not be the same as the ones in the public libcurl API! */ + for(i = 0; i < extra_nfds; i++) { + unsigned short mask = 0; + unsigned r = ufds[curlfds + i].revents; + + if(r & POLLIN) + mask |= CURL_WAIT_POLLIN; + if(r & POLLOUT) + mask |= CURL_WAIT_POLLOUT; + if(r & POLLPRI) + mask |= CURL_WAIT_POLLPRI; + + extra_fds[i].revents = mask; + } + } + } + + if(ufds_malloc) + free(ufds); + if(ret) + *ret = retcode; + if(gotsocket && (extra_fds || curlfds)) + /* if any socket was checked */ + *gotsocket = TRUE; + + return CURLM_OK; +} + +CURLMcode curl_multi_wait(struct Curl_multi *multi, + struct curl_waitfd extra_fds[], + unsigned int extra_nfds, + int timeout_ms, + int *ret) +{ + return Curl_multi_wait(multi, extra_fds, extra_nfds, timeout_ms, ret, NULL); +} + +/* + * multi_ischanged() is called + * + * Returns TRUE/FALSE whether the state is changed to trigger a CONNECT_PEND + * => CONNECT action. + * + * Set 'clear' to TRUE to have it also clear the state variable. + */ +static bool multi_ischanged(struct Curl_multi *multi, bool clear) +{ + bool retval = multi->recheckstate; + if(clear) + multi->recheckstate = FALSE; + return retval; +} + +CURLMcode Curl_multi_add_perform(struct Curl_multi *multi, + struct Curl_easy *data, + struct connectdata *conn) +{ + CURLMcode rc; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + rc = curl_multi_add_handle(multi, data); + if(!rc) { + struct SingleRequest *k = &data->req; + + /* pass in NULL for 'conn' here since we don't want to init the + connection, only this transfer */ + Curl_init_do(data, NULL); + + /* take this handle to the perform state right away */ + multistate(data, CURLM_STATE_PERFORM); + Curl_attach_connnection(data, conn); + k->keepon |= KEEP_RECV; /* setup to receive! */ + } + return rc; +} + +/* + * do_complete is called when the DO actions are complete. + * + * We init chunking and trailer bits to their default values here immediately + * before receiving any header data for the current request. + */ +static void do_complete(struct connectdata *conn) +{ + conn->data->req.chunk = FALSE; + Curl_pgrsTime(conn->data, TIMER_PRETRANSFER); +} + +static CURLcode multi_do(struct Curl_easy *data, bool *done) +{ + CURLcode result = CURLE_OK; + struct connectdata *conn = data->conn; + + DEBUGASSERT(conn); + DEBUGASSERT(conn->handler); + + if(conn->handler->do_it) { + /* generic protocol-specific function pointer set in curl_connect() */ + result = conn->handler->do_it(conn, done); + + if(!result && *done) + /* do_complete must be called after the protocol-specific DO function */ + do_complete(conn); + } + return result; +} + +/* + * multi_do_more() is called during the DO_MORE multi state. It is basically a + * second stage DO state which (wrongly) was introduced to support FTP's + * second connection. + * + * 'complete' can return 0 for incomplete, 1 for done and -1 for go back to + * DOING state there's more work to do! + */ + +static CURLcode multi_do_more(struct connectdata *conn, int *complete) +{ + CURLcode result = CURLE_OK; + + *complete = 0; + + if(conn->handler->do_more) + result = conn->handler->do_more(conn, complete); + + if(!result && (*complete == 1)) + /* do_complete must be called after the protocol-specific DO function */ + do_complete(conn); + + return result; +} + +static CURLMcode multi_runsingle(struct Curl_multi *multi, + struct curltime now, + struct Curl_easy *data) +{ + struct Curl_message *msg = NULL; + bool connected; + bool async; + bool protocol_connect = FALSE; + bool dophase_done = FALSE; + bool done = FALSE; + CURLMcode rc; + CURLcode result = CURLE_OK; + timediff_t timeout_ms; + timediff_t recv_timeout_ms; + timediff_t send_timeout_ms; + int control; + + if(!GOOD_EASY_HANDLE(data)) + return CURLM_BAD_EASY_HANDLE; + + do { + /* A "stream" here is a logical stream if the protocol can handle that + (HTTP/2), or the full connection for older protocols */ + bool stream_error = FALSE; + rc = CURLM_OK; + + DEBUGASSERT((data->mstate <= CURLM_STATE_CONNECT) || + (data->mstate >= CURLM_STATE_DONE) || + data->conn); + if(!data->conn && + data->mstate > CURLM_STATE_CONNECT && + data->mstate < CURLM_STATE_DONE) { + /* In all these states, the code will blindly access 'data->conn' + so this is precaution that it isn't NULL. And it silences static + analyzers. */ + failf(data, "In state %d with no conn, bail out!\n", data->mstate); + return CURLM_INTERNAL_ERROR; + } + + if(multi_ischanged(multi, TRUE)) { + DEBUGF(infof(data, "multi changed, check CONNECT_PEND queue!\n")); + process_pending_handles(multi); /* multiplexed */ + } + + if(data->conn && data->mstate > CURLM_STATE_CONNECT && + data->mstate < CURLM_STATE_COMPLETED) { + /* Make sure we set the connection's current owner */ + data->conn->data = data; + } + + if(data->conn && + (data->mstate >= CURLM_STATE_CONNECT) && + (data->mstate < CURLM_STATE_COMPLETED)) { + /* we need to wait for the connect state as only then is the start time + stored, but we must not check already completed handles */ + timeout_ms = Curl_timeleft(data, &now, + (data->mstate <= CURLM_STATE_DO)? + TRUE:FALSE); + + if(timeout_ms < 0) { + /* Handle timed out */ + if(data->mstate == CURLM_STATE_WAITRESOLVE) + failf(data, "Resolving timed out after %" CURL_FORMAT_TIMEDIFF_T + " milliseconds", + Curl_timediff(now, data->progress.t_startsingle)); + else if(data->mstate == CURLM_STATE_WAITCONNECT) + failf(data, "Connection timed out after %" CURL_FORMAT_TIMEDIFF_T + " milliseconds", + Curl_timediff(now, data->progress.t_startsingle)); + else { + struct SingleRequest *k = &data->req; + if(k->size != -1) { + failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T + " milliseconds with %" CURL_FORMAT_CURL_OFF_T " out of %" + CURL_FORMAT_CURL_OFF_T " bytes received", + Curl_timediff(now, data->progress.t_startsingle), + k->bytecount, k->size); + } + else { + failf(data, "Operation timed out after %" CURL_FORMAT_TIMEDIFF_T + " milliseconds with %" CURL_FORMAT_CURL_OFF_T + " bytes received", + Curl_timediff(now, data->progress.t_startsingle), + k->bytecount); + } + } + + /* Force connection closed if the connection has indeed been used */ + if(data->mstate > CURLM_STATE_DO) { + streamclose(data->conn, "Disconnected with pending data"); + stream_error = TRUE; + } + result = CURLE_OPERATION_TIMEDOUT; + (void)multi_done(data, result, TRUE); + /* Skip the statemachine and go directly to error handling section. */ + goto statemachine_end; + } + } + + switch(data->mstate) { + case CURLM_STATE_INIT: + /* init this transfer. */ + result = Curl_pretransfer(data); + + if(!result) { + /* after init, go CONNECT */ + multistate(data, CURLM_STATE_CONNECT); + Curl_pgrsTime(data, TIMER_STARTOP); + rc = CURLM_CALL_MULTI_PERFORM; + } + break; + + case CURLM_STATE_CONNECT_PEND: + /* We will stay here until there is a connection available. Then + we try again in the CURLM_STATE_CONNECT state. */ + break; + + case CURLM_STATE_CONNECT: + /* Connect. We want to get a connection identifier filled in. */ + Curl_pgrsTime(data, TIMER_STARTSINGLE); + if(data->set.timeout) + Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT); + + if(data->set.connecttimeout) + Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT); + + result = Curl_connect(data, &async, &protocol_connect); + if(CURLE_NO_CONNECTION_AVAILABLE == result) { + /* There was no connection available. We will go to the pending + state and wait for an available connection. */ + multistate(data, CURLM_STATE_CONNECT_PEND); + + /* add this handle to the list of connect-pending handles */ + Curl_llist_insert_next(&multi->pending, multi->pending.tail, data, + &data->connect_queue); + result = CURLE_OK; + break; + } + else if(data->state.previouslypending) { + /* this transfer comes from the pending queue so try move another */ + infof(data, "Transfer was pending, now try another\n"); + process_pending_handles(data->multi); + } + + if(!result) { + if(async) + /* We're now waiting for an asynchronous name lookup */ + multistate(data, CURLM_STATE_WAITRESOLVE); + else { + /* after the connect has been sent off, go WAITCONNECT unless the + protocol connect is already done and we can go directly to + WAITDO or DO! */ + rc = CURLM_CALL_MULTI_PERFORM; + + if(protocol_connect) + multistate(data, CURLM_STATE_DO); + else { +#ifndef CURL_DISABLE_HTTP + if(Curl_connect_ongoing(data->conn)) + multistate(data, CURLM_STATE_WAITPROXYCONNECT); + else +#endif + multistate(data, CURLM_STATE_WAITCONNECT); + } + } + } + break; + + case CURLM_STATE_WAITRESOLVE: + /* awaiting an asynch name resolve to complete */ + { + struct Curl_dns_entry *dns = NULL; + struct connectdata *conn = data->conn; + const char *hostname; + + DEBUGASSERT(conn); + if(conn->bits.httpproxy) + hostname = conn->http_proxy.host.name; + else if(conn->bits.conn_to_host) + hostname = conn->conn_to_host.name; + else + hostname = conn->host.name; + + /* check if we have the name resolved by now */ + dns = Curl_fetch_addr(conn, hostname, (int)conn->port); + + if(dns) { +#ifdef CURLRES_ASYNCH + conn->async.dns = dns; + conn->async.done = TRUE; +#endif + result = CURLE_OK; + infof(data, "Hostname '%s' was found in DNS cache\n", hostname); + } + + if(!dns) + result = Curl_resolv_check(data->conn, &dns); + + /* Update sockets here, because the socket(s) may have been + closed and the application thus needs to be told, even if it + is likely that the same socket(s) will again be used further + down. If the name has not yet been resolved, it is likely + that new sockets have been opened in an attempt to contact + another resolver. */ + singlesocket(multi, data); + + if(dns) { + /* Perform the next step in the connection phase, and then move on + to the WAITCONNECT state */ + result = Curl_once_resolved(data->conn, &protocol_connect); + + if(result) + /* if Curl_once_resolved() returns failure, the connection struct + is already freed and gone */ + data->conn = NULL; /* no more connection */ + else { + /* call again please so that we get the next socket setup */ + rc = CURLM_CALL_MULTI_PERFORM; + if(protocol_connect) + multistate(data, CURLM_STATE_DO); + else { +#ifndef CURL_DISABLE_HTTP + if(Curl_connect_ongoing(data->conn)) + multistate(data, CURLM_STATE_WAITPROXYCONNECT); + else +#endif + multistate(data, CURLM_STATE_WAITCONNECT); + } + } + } + + if(result) { + /* failure detected */ + stream_error = TRUE; + break; + } + } + break; + +#ifndef CURL_DISABLE_HTTP + case CURLM_STATE_WAITPROXYCONNECT: + /* this is HTTP-specific, but sending CONNECT to a proxy is HTTP... */ + DEBUGASSERT(data->conn); + result = Curl_http_connect(data->conn, &protocol_connect); + + if(data->conn->bits.proxy_connect_closed) { + rc = CURLM_CALL_MULTI_PERFORM; + /* connect back to proxy again */ + result = CURLE_OK; + multi_done(data, CURLE_OK, FALSE); + multistate(data, CURLM_STATE_CONNECT); + } + else if(!result) { + if((data->conn->http_proxy.proxytype != CURLPROXY_HTTPS || + data->conn->bits.proxy_ssl_connected[FIRSTSOCKET]) && + Curl_connect_complete(data->conn)) { + rc = CURLM_CALL_MULTI_PERFORM; + /* initiate protocol connect phase */ + multistate(data, CURLM_STATE_SENDPROTOCONNECT); + } + } + else if(result) + stream_error = TRUE; + break; +#endif + + case CURLM_STATE_WAITCONNECT: + /* awaiting a completion of an asynch TCP connect */ + DEBUGASSERT(data->conn); + result = Curl_is_connected(data->conn, FIRSTSOCKET, &connected); + if(connected && !result) { +#ifndef CURL_DISABLE_HTTP + if((data->conn->http_proxy.proxytype == CURLPROXY_HTTPS && + !data->conn->bits.proxy_ssl_connected[FIRSTSOCKET]) || + Curl_connect_ongoing(data->conn)) { + multistate(data, CURLM_STATE_WAITPROXYCONNECT); + break; + } +#endif + rc = CURLM_CALL_MULTI_PERFORM; + multistate(data, data->conn->bits.tunnel_proxy? + CURLM_STATE_WAITPROXYCONNECT: + CURLM_STATE_SENDPROTOCONNECT); + } + else if(result) { + /* failure detected */ + Curl_posttransfer(data); + multi_done(data, result, TRUE); + stream_error = TRUE; + break; + } + break; + + case CURLM_STATE_SENDPROTOCONNECT: + result = Curl_protocol_connect(data->conn, &protocol_connect); + if(!result && !protocol_connect) + /* switch to waiting state */ + multistate(data, CURLM_STATE_PROTOCONNECT); + else if(!result) { + /* protocol connect has completed, go WAITDO or DO */ + multistate(data, CURLM_STATE_DO); + rc = CURLM_CALL_MULTI_PERFORM; + } + else if(result) { + /* failure detected */ + Curl_posttransfer(data); + multi_done(data, result, TRUE); + stream_error = TRUE; + } + break; + + case CURLM_STATE_PROTOCONNECT: + /* protocol-specific connect phase */ + result = Curl_protocol_connecting(data->conn, &protocol_connect); + if(!result && protocol_connect) { + /* after the connect has completed, go WAITDO or DO */ + multistate(data, CURLM_STATE_DO); + rc = CURLM_CALL_MULTI_PERFORM; + } + else if(result) { + /* failure detected */ + Curl_posttransfer(data); + multi_done(data, result, TRUE); + stream_error = TRUE; + } + break; + + case CURLM_STATE_DO: + if(data->set.connect_only) { + /* keep connection open for application to use the socket */ + connkeep(data->conn, "CONNECT_ONLY"); + multistate(data, CURLM_STATE_DONE); + result = CURLE_OK; + rc = CURLM_CALL_MULTI_PERFORM; + } + else { + /* Perform the protocol's DO action */ + result = multi_do(data, &dophase_done); + + /* When multi_do() returns failure, data->conn might be NULL! */ + + if(!result) { + if(!dophase_done) { +#ifndef CURL_DISABLE_FTP + /* some steps needed for wildcard matching */ + if(data->state.wildcardmatch) { + struct WildcardData *wc = &data->wildcard; + if(wc->state == CURLWC_DONE || wc->state == CURLWC_SKIP) { + /* skip some states if it is important */ + multi_done(data, CURLE_OK, FALSE); + multistate(data, CURLM_STATE_DONE); + rc = CURLM_CALL_MULTI_PERFORM; + break; + } + } +#endif + /* DO was not completed in one function call, we must continue + DOING... */ + multistate(data, CURLM_STATE_DOING); + rc = CURLM_OK; + } + + /* after DO, go DO_DONE... or DO_MORE */ + else if(data->conn->bits.do_more) { + /* we're supposed to do more, but we need to sit down, relax + and wait a little while first */ + multistate(data, CURLM_STATE_DO_MORE); + rc = CURLM_OK; + } + else { + /* we're done with the DO, now DO_DONE */ + multistate(data, CURLM_STATE_DO_DONE); + rc = CURLM_CALL_MULTI_PERFORM; + } + } + else if((CURLE_SEND_ERROR == result) && + data->conn->bits.reuse) { + /* + * In this situation, a connection that we were trying to use + * may have unexpectedly died. If possible, send the connection + * back to the CONNECT phase so we can try again. + */ + char *newurl = NULL; + followtype follow = FOLLOW_NONE; + CURLcode drc; + + drc = Curl_retry_request(data->conn, &newurl); + if(drc) { + /* a failure here pretty much implies an out of memory */ + result = drc; + stream_error = TRUE; + } + + Curl_posttransfer(data); + drc = multi_done(data, result, FALSE); + + /* When set to retry the connection, we must to go back to + * the CONNECT state */ + if(newurl) { + if(!drc || (drc == CURLE_SEND_ERROR)) { + follow = FOLLOW_RETRY; + drc = Curl_follow(data, newurl, follow); + if(!drc) { + multistate(data, CURLM_STATE_CONNECT); + rc = CURLM_CALL_MULTI_PERFORM; + result = CURLE_OK; + } + else { + /* Follow failed */ + result = drc; + } + } + else { + /* done didn't return OK or SEND_ERROR */ + result = drc; + } + } + else { + /* Have error handler disconnect conn if we can't retry */ + stream_error = TRUE; + } + free(newurl); + } + else { + /* failure detected */ + Curl_posttransfer(data); + if(data->conn) + multi_done(data, result, FALSE); + stream_error = TRUE; + } + } + break; + + case CURLM_STATE_DOING: + /* we continue DOING until the DO phase is complete */ + DEBUGASSERT(data->conn); + result = Curl_protocol_doing(data->conn, + &dophase_done); + if(!result) { + if(dophase_done) { + /* after DO, go DO_DONE or DO_MORE */ + multistate(data, data->conn->bits.do_more? + CURLM_STATE_DO_MORE: + CURLM_STATE_DO_DONE); + rc = CURLM_CALL_MULTI_PERFORM; + } /* dophase_done */ + } + else { + /* failure detected */ + Curl_posttransfer(data); + multi_done(data, result, FALSE); + stream_error = TRUE; + } + break; + + case CURLM_STATE_DO_MORE: + /* + * When we are connected, DO MORE and then go DO_DONE + */ + DEBUGASSERT(data->conn); + result = multi_do_more(data->conn, &control); + + if(!result) { + if(control) { + /* if positive, advance to DO_DONE + if negative, go back to DOING */ + multistate(data, control == 1? + CURLM_STATE_DO_DONE: + CURLM_STATE_DOING); + rc = CURLM_CALL_MULTI_PERFORM; + } + else + /* stay in DO_MORE */ + rc = CURLM_OK; + } + else { + /* failure detected */ + Curl_posttransfer(data); + multi_done(data, result, FALSE); + stream_error = TRUE; + } + break; + + case CURLM_STATE_DO_DONE: + DEBUGASSERT(data->conn); + if(data->conn->bits.multiplex) + /* Check if we can move pending requests to send pipe */ + process_pending_handles(multi); /* multiplexed */ + + /* Only perform the transfer if there's a good socket to work with. + Having both BAD is a signal to skip immediately to DONE */ + if((data->conn->sockfd != CURL_SOCKET_BAD) || + (data->conn->writesockfd != CURL_SOCKET_BAD)) + multistate(data, CURLM_STATE_PERFORM); + else { +#ifndef CURL_DISABLE_FTP + if(data->state.wildcardmatch && + ((data->conn->handler->flags & PROTOPT_WILDCARD) == 0)) { + data->wildcard.state = CURLWC_DONE; + } +#endif + multistate(data, CURLM_STATE_DONE); + } + rc = CURLM_CALL_MULTI_PERFORM; + break; + + case CURLM_STATE_TOOFAST: /* limit-rate exceeded in either direction */ + DEBUGASSERT(data->conn); + /* if both rates are within spec, resume transfer */ + if(Curl_pgrsUpdate(data->conn)) + result = CURLE_ABORTED_BY_CALLBACK; + else + result = Curl_speedcheck(data, now); + + if(!result) { + send_timeout_ms = 0; + if(data->set.max_send_speed > 0) + send_timeout_ms = + Curl_pgrsLimitWaitTime(data->progress.uploaded, + data->progress.ul_limit_size, + data->set.max_send_speed, + data->progress.ul_limit_start, + now); + + recv_timeout_ms = 0; + if(data->set.max_recv_speed > 0) + recv_timeout_ms = + Curl_pgrsLimitWaitTime(data->progress.downloaded, + data->progress.dl_limit_size, + data->set.max_recv_speed, + data->progress.dl_limit_start, + now); + + if(!send_timeout_ms && !recv_timeout_ms) { + multistate(data, CURLM_STATE_PERFORM); + Curl_ratelimit(data, now); + } + else if(send_timeout_ms >= recv_timeout_ms) + Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); + else + Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); + } + break; + + case CURLM_STATE_PERFORM: + { + char *newurl = NULL; + bool retry = FALSE; + bool comeback = FALSE; + + /* check if over send speed */ + send_timeout_ms = 0; + if(data->set.max_send_speed > 0) + send_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.uploaded, + data->progress.ul_limit_size, + data->set.max_send_speed, + data->progress.ul_limit_start, + now); + + /* check if over recv speed */ + recv_timeout_ms = 0; + if(data->set.max_recv_speed > 0) + recv_timeout_ms = Curl_pgrsLimitWaitTime(data->progress.downloaded, + data->progress.dl_limit_size, + data->set.max_recv_speed, + data->progress.dl_limit_start, + now); + + if(send_timeout_ms || recv_timeout_ms) { + Curl_ratelimit(data, now); + multistate(data, CURLM_STATE_TOOFAST); + if(send_timeout_ms >= recv_timeout_ms) + Curl_expire(data, send_timeout_ms, EXPIRE_TOOFAST); + else + Curl_expire(data, recv_timeout_ms, EXPIRE_TOOFAST); + break; + } + + /* read/write data if it is ready to do so */ + result = Curl_readwrite(data->conn, data, &done, &comeback); + + if(done || (result == CURLE_RECV_ERROR)) { + /* If CURLE_RECV_ERROR happens early enough, we assume it was a race + * condition and the server closed the re-used connection exactly when + * we wanted to use it, so figure out if that is indeed the case. + */ + CURLcode ret = Curl_retry_request(data->conn, &newurl); + if(!ret) + retry = (newurl)?TRUE:FALSE; + else if(!result) + result = ret; + + if(retry) { + /* if we are to retry, set the result to OK and consider the + request as done */ + result = CURLE_OK; + done = TRUE; + } + } + else if((CURLE_HTTP2_STREAM == result) && + Curl_h2_http_1_1_error(data->conn)) { + CURLcode ret = Curl_retry_request(data->conn, &newurl); + + if(!ret) { + infof(data, "Downgrades to HTTP/1.1!\n"); + data->set.httpversion = CURL_HTTP_VERSION_1_1; + /* clear the error message bit too as we ignore the one we got */ + data->state.errorbuf = FALSE; + if(!newurl) + /* typically for HTTP_1_1_REQUIRED error on first flight */ + newurl = strdup(data->change.url); + /* if we are to retry, set the result to OK and consider the request + as done */ + retry = TRUE; + result = CURLE_OK; + done = TRUE; + } + else + result = ret; + } + + if(result) { + /* + * The transfer phase returned error, we mark the connection to get + * closed to prevent being re-used. This is because we can't possibly + * know if the connection is in a good shape or not now. Unless it is + * a protocol which uses two "channels" like FTP, as then the error + * happened in the data connection. + */ + + if(!(data->conn->handler->flags & PROTOPT_DUAL) && + result != CURLE_HTTP2_STREAM) + streamclose(data->conn, "Transfer returned error"); + + Curl_posttransfer(data); + multi_done(data, result, TRUE); + } + else if(done) { + followtype follow = FOLLOW_NONE; + + /* call this even if the readwrite function returned error */ + Curl_posttransfer(data); + + /* When we follow redirects or is set to retry the connection, we must + to go back to the CONNECT state */ + if(data->req.newurl || retry) { + if(!retry) { + /* if the URL is a follow-location and not just a retried request + then figure out the URL here */ + free(newurl); + newurl = data->req.newurl; + data->req.newurl = NULL; + follow = FOLLOW_REDIR; + } + else + follow = FOLLOW_RETRY; + (void)multi_done(data, CURLE_OK, FALSE); + /* multi_done() might return CURLE_GOT_NOTHING */ + result = Curl_follow(data, newurl, follow); + if(!result) { + multistate(data, CURLM_STATE_CONNECT); + rc = CURLM_CALL_MULTI_PERFORM; + } + free(newurl); + } + else { + /* after the transfer is done, go DONE */ + + /* but first check to see if we got a location info even though we're + not following redirects */ + if(data->req.location) { + free(newurl); + newurl = data->req.location; + data->req.location = NULL; + result = Curl_follow(data, newurl, FOLLOW_FAKE); + free(newurl); + if(result) { + stream_error = TRUE; + result = multi_done(data, result, TRUE); + } + } + + if(!result) { + multistate(data, CURLM_STATE_DONE); + rc = CURLM_CALL_MULTI_PERFORM; + } + } + } + else if(comeback) + rc = CURLM_CALL_MULTI_PERFORM; + break; + } + + case CURLM_STATE_DONE: + /* this state is highly transient, so run another loop after this */ + rc = CURLM_CALL_MULTI_PERFORM; + + if(data->conn) { + CURLcode res; + + if(data->conn->bits.multiplex) + /* Check if we can move pending requests to connection */ + process_pending_handles(multi); /* multiplexing */ + + /* post-transfer command */ + res = multi_done(data, result, FALSE); + + /* allow a previously set error code take precedence */ + if(!result) + result = res; + + /* + * If there are other handles on the connection, multi_done won't set + * conn to NULL. In such a case, curl_multi_remove_handle() can + * access free'd data, if the connection is free'd and the handle + * removed before we perform the processing in CURLM_STATE_COMPLETED + */ + if(data->conn) + detach_connnection(data); + } + +#ifndef CURL_DISABLE_FTP + if(data->state.wildcardmatch) { + if(data->wildcard.state != CURLWC_DONE) { + /* if a wildcard is set and we are not ending -> lets start again + with CURLM_STATE_INIT */ + multistate(data, CURLM_STATE_INIT); + break; + } + } +#endif + /* after we have DONE what we're supposed to do, go COMPLETED, and + it doesn't matter what the multi_done() returned! */ + multistate(data, CURLM_STATE_COMPLETED); + break; + + case CURLM_STATE_COMPLETED: + break; + + case CURLM_STATE_MSGSENT: + data->result = result; + return CURLM_OK; /* do nothing */ + + default: + return CURLM_INTERNAL_ERROR; + } + statemachine_end: + + if(data->mstate < CURLM_STATE_COMPLETED) { + if(result) { + /* + * If an error was returned, and we aren't in completed state now, + * then we go to completed and consider this transfer aborted. + */ + + /* NOTE: no attempt to disconnect connections must be made + in the case blocks above - cleanup happens only here */ + + /* Check if we can move pending requests to send pipe */ + process_pending_handles(multi); /* connection */ + + if(data->conn) { + if(stream_error) { + /* Don't attempt to send data over a connection that timed out */ + bool dead_connection = result == CURLE_OPERATION_TIMEDOUT; + /* disconnect properly */ + Curl_disconnect(data, data->conn, dead_connection); + + /* This is where we make sure that the conn pointer is reset. + We don't have to do this in every case block above where a + failure is detected */ + detach_connnection(data); + } + } + else if(data->mstate == CURLM_STATE_CONNECT) { + /* Curl_connect() failed */ + (void)Curl_posttransfer(data); + } + + multistate(data, CURLM_STATE_COMPLETED); + rc = CURLM_CALL_MULTI_PERFORM; + } + /* if there's still a connection to use, call the progress function */ + else if(data->conn && Curl_pgrsUpdate(data->conn)) { + /* aborted due to progress callback return code must close the + connection */ + result = CURLE_ABORTED_BY_CALLBACK; + streamclose(data->conn, "Aborted by callback"); + + /* if not yet in DONE state, go there, otherwise COMPLETED */ + multistate(data, (data->mstate < CURLM_STATE_DONE)? + CURLM_STATE_DONE: CURLM_STATE_COMPLETED); + rc = CURLM_CALL_MULTI_PERFORM; + } + } + + if(CURLM_STATE_COMPLETED == data->mstate) { + if(data->set.fmultidone) { + /* signal via callback instead */ + data->set.fmultidone(data, result); + } + else { + /* now fill in the Curl_message with this info */ + msg = &data->msg; + + msg->extmsg.msg = CURLMSG_DONE; + msg->extmsg.easy_handle = data; + msg->extmsg.data.result = result; + + rc = multi_addmsg(multi, msg); + DEBUGASSERT(!data->conn); + } + multistate(data, CURLM_STATE_MSGSENT); + } + } while((rc == CURLM_CALL_MULTI_PERFORM) || multi_ischanged(multi, FALSE)); + + data->result = result; + return rc; +} + + +CURLMcode curl_multi_perform(struct Curl_multi *multi, int *running_handles) +{ + struct Curl_easy *data; + CURLMcode returncode = CURLM_OK; + struct Curl_tree *t; + struct curltime now = Curl_now(); + + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + data = multi->easyp; + while(data) { + CURLMcode result; + SIGPIPE_VARIABLE(pipe_st); + + sigpipe_ignore(data, &pipe_st); + result = multi_runsingle(multi, now, data); + sigpipe_restore(&pipe_st); + + if(result) + returncode = result; + + data = data->next; /* operate on next handle */ + } + + /* + * Simply remove all expired timers from the splay since handles are dealt + * with unconditionally by this function and curl_multi_timeout() requires + * that already passed/handled expire times are removed from the splay. + * + * It is important that the 'now' value is set at the entry of this function + * and not for the current time as it may have ticked a little while since + * then and then we risk this loop to remove timers that actually have not + * been handled! + */ + do { + multi->timetree = Curl_splaygetbest(now, multi->timetree, &t); + if(t) + /* the removed may have another timeout in queue */ + (void)add_next_timeout(now, multi, t->payload); + + } while(t); + + *running_handles = multi->num_alive; + + if(CURLM_OK >= returncode) + update_timer(multi); + + return returncode; +} + +CURLMcode curl_multi_cleanup(struct Curl_multi *multi) +{ + struct Curl_easy *data; + struct Curl_easy *nextdata; + + if(GOOD_MULTI_HANDLE(multi)) { + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + multi->type = 0; /* not good anymore */ + + /* Firsrt remove all remaining easy handles */ + data = multi->easyp; + while(data) { + nextdata = data->next; + if(!data->state.done && data->conn) + /* if DONE was never called for this handle */ + (void)multi_done(data, CURLE_OK, TRUE); + if(data->dns.hostcachetype == HCACHE_MULTI) { + /* clear out the usage of the shared DNS cache */ + Curl_hostcache_clean(data, data->dns.hostcache); + data->dns.hostcache = NULL; + data->dns.hostcachetype = HCACHE_NONE; + } + + /* Clear the pointer to the connection cache */ + data->state.conn_cache = NULL; + data->multi = NULL; /* clear the association */ + +#ifdef USE_LIBPSL + if(data->psl == &multi->psl) + data->psl = NULL; +#endif + + data = nextdata; + } + + /* Close all the connections in the connection cache */ + Curl_conncache_close_all_connections(&multi->conn_cache); + + Curl_hash_destroy(&multi->sockhash); + Curl_conncache_destroy(&multi->conn_cache); + Curl_llist_destroy(&multi->msglist, NULL); + Curl_llist_destroy(&multi->pending, NULL); + + Curl_hash_destroy(&multi->hostcache); + Curl_psl_destroy(&multi->psl); + free(multi); + + return CURLM_OK; + } + return CURLM_BAD_HANDLE; +} + +/* + * curl_multi_info_read() + * + * This function is the primary way for a multi/multi_socket application to + * figure out if a transfer has ended. We MUST make this function as fast as + * possible as it will be polled frequently and we MUST NOT scan any lists in + * here to figure out things. We must scale fine to thousands of handles and + * beyond. The current design is fully O(1). + */ + +CURLMsg *curl_multi_info_read(struct Curl_multi *multi, int *msgs_in_queue) +{ + struct Curl_message *msg; + + *msgs_in_queue = 0; /* default to none */ + + if(GOOD_MULTI_HANDLE(multi) && + !multi->in_callback && + Curl_llist_count(&multi->msglist)) { + /* there is one or more messages in the list */ + struct curl_llist_element *e; + + /* extract the head of the list to return */ + e = multi->msglist.head; + + msg = e->ptr; + + /* remove the extracted entry */ + Curl_llist_remove(&multi->msglist, e, NULL); + + *msgs_in_queue = curlx_uztosi(Curl_llist_count(&multi->msglist)); + + return &msg->extmsg; + } + return NULL; +} + +/* + * singlesocket() checks what sockets we deal with and their "action state" + * and if we have a different state in any of those sockets from last time we + * call the callback accordingly. + */ +static CURLMcode singlesocket(struct Curl_multi *multi, + struct Curl_easy *data) +{ + curl_socket_t socks[MAX_SOCKSPEREASYHANDLE]; + int i; + struct Curl_sh_entry *entry; + curl_socket_t s; + int num; + unsigned int curraction; + int actions[MAX_SOCKSPEREASYHANDLE]; + + for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++) + socks[i] = CURL_SOCKET_BAD; + + /* Fill in the 'current' struct with the state as it is now: what sockets to + supervise and for what actions */ + curraction = multi_getsock(data, socks, MAX_SOCKSPEREASYHANDLE); + + /* We have 0 .. N sockets already and we get to know about the 0 .. M + sockets we should have from now on. Detect the differences, remove no + longer supervised ones and add new ones */ + + /* walk over the sockets we got right now */ + for(i = 0; (i< MAX_SOCKSPEREASYHANDLE) && + (curraction & (GETSOCK_READSOCK(i) | GETSOCK_WRITESOCK(i))); + i++) { + unsigned int action = CURL_POLL_NONE; + unsigned int prevaction = 0; + unsigned int comboaction; + bool sincebefore = FALSE; + + s = socks[i]; + + /* get it from the hash */ + entry = sh_getentry(&multi->sockhash, s); + + if(curraction & GETSOCK_READSOCK(i)) + action |= CURL_POLL_IN; + if(curraction & GETSOCK_WRITESOCK(i)) + action |= CURL_POLL_OUT; + + actions[i] = action; + if(entry) { + /* check if new for this transfer */ + int j; + for(j = 0; j< data->numsocks; j++) { + if(s == data->sockets[j]) { + prevaction = data->actions[j]; + sincebefore = TRUE; + break; + } + } + } + else { + /* this is a socket we didn't have before, add it to the hash! */ + entry = sh_addentry(&multi->sockhash, s); + if(!entry) + /* fatal */ + return CURLM_OUT_OF_MEMORY; + } + if(sincebefore && (prevaction != action)) { + /* Socket was used already, but different action now */ + if(prevaction & CURL_POLL_IN) + entry->readers--; + if(prevaction & CURL_POLL_OUT) + entry->writers--; + if(action & CURL_POLL_IN) + entry->readers++; + if(action & CURL_POLL_OUT) + entry->writers++; + } + else if(!sincebefore) { + /* a new user */ + entry->users++; + if(action & CURL_POLL_IN) + entry->readers++; + if(action & CURL_POLL_OUT) + entry->writers++; + + /* add 'data' to the transfer hash on this socket! */ + if(!Curl_hash_add(&entry->transfers, (char *)&data, /* hash key */ + sizeof(struct Curl_easy *), data)) + return CURLM_OUT_OF_MEMORY; + } + + comboaction = (entry->writers? CURL_POLL_OUT : 0) | + (entry->readers ? CURL_POLL_IN : 0); + + /* socket existed before and has the same action set as before */ + if(sincebefore && (entry->action == comboaction)) + /* same, continue */ + continue; + + if(multi->socket_cb) + multi->socket_cb(data, s, comboaction, multi->socket_userp, + entry->socketp); + + entry->action = comboaction; /* store the current action state */ + } + + num = i; /* number of sockets */ + + /* when we've walked over all the sockets we should have right now, we must + make sure to detect sockets that are removed */ + for(i = 0; i< data->numsocks; i++) { + int j; + bool stillused = FALSE; + s = data->sockets[i]; + for(j = 0; j < num; j++) { + if(s == socks[j]) { + /* this is still supervised */ + stillused = TRUE; + break; + } + } + if(stillused) + continue; + + entry = sh_getentry(&multi->sockhash, s); + /* if this is NULL here, the socket has been closed and notified so + already by Curl_multi_closed() */ + if(entry) { + int oldactions = data->actions[i]; + /* this socket has been removed. Decrease user count */ + entry->users--; + if(oldactions & CURL_POLL_OUT) + entry->writers--; + if(oldactions & CURL_POLL_IN) + entry->readers--; + if(!entry->users) { + if(multi->socket_cb) + multi->socket_cb(data, s, CURL_POLL_REMOVE, + multi->socket_userp, + entry->socketp); + sh_delentry(entry, &multi->sockhash, s); + } + else { + /* still users, but remove this handle as a user of this socket */ + if(Curl_hash_delete(&entry->transfers, (char *)&data, + sizeof(struct Curl_easy *))) { + DEBUGASSERT(NULL); + } + } + } + } /* for loop over numsocks */ + + memcpy(data->sockets, socks, num*sizeof(curl_socket_t)); + memcpy(data->actions, actions, num*sizeof(int)); + data->numsocks = num; + return CURLM_OK; +} + +void Curl_updatesocket(struct Curl_easy *data) +{ + singlesocket(data->multi, data); +} + + +/* + * Curl_multi_closed() + * + * Used by the connect code to tell the multi_socket code that one of the + * sockets we were using is about to be closed. This function will then + * remove it from the sockethash for this handle to make the multi_socket API + * behave properly, especially for the case when libcurl will create another + * socket again and it gets the same file descriptor number. + */ + +void Curl_multi_closed(struct Curl_easy *data, curl_socket_t s) +{ + if(data) { + /* if there's still an easy handle associated with this connection */ + struct Curl_multi *multi = data->multi; + if(multi) { + /* this is set if this connection is part of a handle that is added to + a multi handle, and only then this is necessary */ + struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); + + if(entry) { + if(multi->socket_cb) + multi->socket_cb(data, s, CURL_POLL_REMOVE, + multi->socket_userp, + entry->socketp); + + /* now remove it from the socket hash */ + sh_delentry(entry, &multi->sockhash, s); + } + } + } +} + +/* + * add_next_timeout() + * + * Each Curl_easy has a list of timeouts. The add_next_timeout() is called + * when it has just been removed from the splay tree because the timeout has + * expired. This function is then to advance in the list to pick the next + * timeout to use (skip the already expired ones) and add this node back to + * the splay tree again. + * + * The splay tree only has each sessionhandle as a single node and the nearest + * timeout is used to sort it on. + */ +static CURLMcode add_next_timeout(struct curltime now, + struct Curl_multi *multi, + struct Curl_easy *d) +{ + struct curltime *tv = &d->state.expiretime; + struct curl_llist *list = &d->state.timeoutlist; + struct curl_llist_element *e; + struct time_node *node = NULL; + + /* move over the timeout list for this specific handle and remove all + timeouts that are now passed tense and store the next pending + timeout in *tv */ + for(e = list->head; e;) { + struct curl_llist_element *n = e->next; + timediff_t diff; + node = (struct time_node *)e->ptr; + diff = Curl_timediff(node->time, now); + if(diff <= 0) + /* remove outdated entry */ + Curl_llist_remove(list, e, NULL); + else + /* the list is sorted so get out on the first mismatch */ + break; + e = n; + } + e = list->head; + if(!e) { + /* clear the expire times within the handles that we remove from the + splay tree */ + tv->tv_sec = 0; + tv->tv_usec = 0; + } + else { + /* copy the first entry to 'tv' */ + memcpy(tv, &node->time, sizeof(*tv)); + + /* Insert this node again into the splay. Keep the timer in the list in + case we need to recompute future timers. */ + multi->timetree = Curl_splayinsert(*tv, multi->timetree, + &d->state.timenode); + } + return CURLM_OK; +} + +static CURLMcode multi_socket(struct Curl_multi *multi, + bool checkall, + curl_socket_t s, + int ev_bitmask, + int *running_handles) +{ + CURLMcode result = CURLM_OK; + struct Curl_easy *data = NULL; + struct Curl_tree *t; + struct curltime now = Curl_now(); + + if(checkall) { + /* *perform() deals with running_handles on its own */ + result = curl_multi_perform(multi, running_handles); + + /* walk through each easy handle and do the socket state change magic + and callbacks */ + if(result != CURLM_BAD_HANDLE) { + data = multi->easyp; + while(data && !result) { + result = singlesocket(multi, data); + data = data->next; + } + } + + /* or should we fall-through and do the timer-based stuff? */ + return result; + } + if(s != CURL_SOCKET_TIMEOUT) { + struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); + + if(!entry) + /* Unmatched socket, we can't act on it but we ignore this fact. In + real-world tests it has been proved that libevent can in fact give + the application actions even though the socket was just previously + asked to get removed, so thus we better survive stray socket actions + and just move on. */ + ; + else { + struct curl_hash_iterator iter; + struct curl_hash_element *he; + + /* the socket can be shared by many transfers, iterate */ + Curl_hash_start_iterate(&entry->transfers, &iter); + for(he = Curl_hash_next_element(&iter); he; + he = Curl_hash_next_element(&iter)) { + data = (struct Curl_easy *)he->ptr; + DEBUGASSERT(data); + DEBUGASSERT(data->magic == CURLEASY_MAGIC_NUMBER); + + if(data->conn && !(data->conn->handler->flags & PROTOPT_DIRLOCK)) + /* set socket event bitmask if they're not locked */ + data->conn->cselect_bits = ev_bitmask; + + Curl_expire(data, 0, EXPIRE_RUN_NOW); + } + + /* Now we fall-through and do the timer-based stuff, since we don't want + to force the user to have to deal with timeouts as long as at least + one connection in fact has traffic. */ + + data = NULL; /* set data to NULL again to avoid calling + multi_runsingle() in case there's no need to */ + now = Curl_now(); /* get a newer time since the multi_runsingle() loop + may have taken some time */ + } + } + else { + /* Asked to run due to time-out. Clear the 'lastcall' variable to force + update_timer() to trigger a callback to the app again even if the same + timeout is still the one to run after this call. That handles the case + when the application asks libcurl to run the timeout prematurely. */ + memset(&multi->timer_lastcall, 0, sizeof(multi->timer_lastcall)); + } + + /* + * The loop following here will go on as long as there are expire-times left + * to process in the splay and 'data' will be re-assigned for every expired + * handle we deal with. + */ + do { + /* the first loop lap 'data' can be NULL */ + if(data) { + SIGPIPE_VARIABLE(pipe_st); + + sigpipe_ignore(data, &pipe_st); + result = multi_runsingle(multi, now, data); + sigpipe_restore(&pipe_st); + + if(CURLM_OK >= result) { + /* get the socket(s) and check if the state has been changed since + last */ + result = singlesocket(multi, data); + if(result) + return result; + } + } + + /* Check if there's one (more) expired timer to deal with! This function + extracts a matching node if there is one */ + + multi->timetree = Curl_splaygetbest(now, multi->timetree, &t); + if(t) { + data = t->payload; /* assign this for next loop */ + (void)add_next_timeout(now, multi, t->payload); + } + + } while(t); + + *running_handles = multi->num_alive; + return result; +} + +#undef curl_multi_setopt +CURLMcode curl_multi_setopt(struct Curl_multi *multi, + CURLMoption option, ...) +{ + CURLMcode res = CURLM_OK; + va_list param; + + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + va_start(param, option); + + switch(option) { + case CURLMOPT_SOCKETFUNCTION: + multi->socket_cb = va_arg(param, curl_socket_callback); + break; + case CURLMOPT_SOCKETDATA: + multi->socket_userp = va_arg(param, void *); + break; + case CURLMOPT_PUSHFUNCTION: + multi->push_cb = va_arg(param, curl_push_callback); + break; + case CURLMOPT_PUSHDATA: + multi->push_userp = va_arg(param, void *); + break; + case CURLMOPT_PIPELINING: + multi->multiplexing = va_arg(param, long) & CURLPIPE_MULTIPLEX; + break; + case CURLMOPT_TIMERFUNCTION: + multi->timer_cb = va_arg(param, curl_multi_timer_callback); + break; + case CURLMOPT_TIMERDATA: + multi->timer_userp = va_arg(param, void *); + break; + case CURLMOPT_MAXCONNECTS: + multi->maxconnects = va_arg(param, long); + break; + case CURLMOPT_MAX_HOST_CONNECTIONS: + multi->max_host_connections = va_arg(param, long); + break; + case CURLMOPT_MAX_TOTAL_CONNECTIONS: + multi->max_total_connections = va_arg(param, long); + break; + /* options formerly used for pipelining */ + case CURLMOPT_MAX_PIPELINE_LENGTH: + break; + case CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE: + break; + case CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE: + break; + case CURLMOPT_PIPELINING_SITE_BL: + break; + case CURLMOPT_PIPELINING_SERVER_BL: + break; + default: + res = CURLM_UNKNOWN_OPTION; + break; + } + va_end(param); + return res; +} + +/* we define curl_multi_socket() in the public multi.h header */ +#undef curl_multi_socket + +CURLMcode curl_multi_socket(struct Curl_multi *multi, curl_socket_t s, + int *running_handles) +{ + CURLMcode result; + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + result = multi_socket(multi, FALSE, s, 0, running_handles); + if(CURLM_OK >= result) + update_timer(multi); + return result; +} + +CURLMcode curl_multi_socket_action(struct Curl_multi *multi, curl_socket_t s, + int ev_bitmask, int *running_handles) +{ + CURLMcode result; + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + result = multi_socket(multi, FALSE, s, ev_bitmask, running_handles); + if(CURLM_OK >= result) + update_timer(multi); + return result; +} + +CURLMcode curl_multi_socket_all(struct Curl_multi *multi, int *running_handles) + +{ + CURLMcode result; + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + result = multi_socket(multi, TRUE, CURL_SOCKET_BAD, 0, running_handles); + if(CURLM_OK >= result) + update_timer(multi); + return result; +} + +static CURLMcode multi_timeout(struct Curl_multi *multi, + long *timeout_ms) +{ + static struct curltime tv_zero = {0, 0}; + + if(multi->timetree) { + /* we have a tree of expire times */ + struct curltime now = Curl_now(); + + /* splay the lowest to the bottom */ + multi->timetree = Curl_splay(tv_zero, multi->timetree); + + if(Curl_splaycomparekeys(multi->timetree->key, now) > 0) { + /* some time left before expiration */ + timediff_t diff = Curl_timediff(multi->timetree->key, now); + if(diff <= 0) + /* + * Since we only provide millisecond resolution on the returned value + * and the diff might be less than one millisecond here, we don't + * return zero as that may cause short bursts of busyloops on fast + * processors while the diff is still present but less than one + * millisecond! instead we return 1 until the time is ripe. + */ + *timeout_ms = 1; + else + /* this should be safe even on 64 bit archs, as we don't use that + overly long timeouts */ + *timeout_ms = (long)diff; + } + else + /* 0 means immediately */ + *timeout_ms = 0; + } + else + *timeout_ms = -1; + + return CURLM_OK; +} + +CURLMcode curl_multi_timeout(struct Curl_multi *multi, + long *timeout_ms) +{ + /* First, make some basic checks that the CURLM handle is a good handle */ + if(!GOOD_MULTI_HANDLE(multi)) + return CURLM_BAD_HANDLE; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + return multi_timeout(multi, timeout_ms); +} + +/* + * Tell the application it should update its timers, if it subscribes to the + * update timer callback. + */ +static int update_timer(struct Curl_multi *multi) +{ + long timeout_ms; + + if(!multi->timer_cb) + return 0; + if(multi_timeout(multi, &timeout_ms)) { + return -1; + } + if(timeout_ms < 0) { + static const struct curltime none = {0, 0}; + if(Curl_splaycomparekeys(none, multi->timer_lastcall)) { + multi->timer_lastcall = none; + /* there's no timeout now but there was one previously, tell the app to + disable it */ + return multi->timer_cb(multi, -1, multi->timer_userp); + } + return 0; + } + + /* When multi_timeout() is done, multi->timetree points to the node with the + * timeout we got the (relative) time-out time for. We can thus easily check + * if this is the same (fixed) time as we got in a previous call and then + * avoid calling the callback again. */ + if(Curl_splaycomparekeys(multi->timetree->key, multi->timer_lastcall) == 0) + return 0; + + multi->timer_lastcall = multi->timetree->key; + + return multi->timer_cb(multi, timeout_ms, multi->timer_userp); +} + +/* + * multi_deltimeout() + * + * Remove a given timestamp from the list of timeouts. + */ +static void +multi_deltimeout(struct Curl_easy *data, expire_id eid) +{ + struct curl_llist_element *e; + struct curl_llist *timeoutlist = &data->state.timeoutlist; + /* find and remove the specific node from the list */ + for(e = timeoutlist->head; e; e = e->next) { + struct time_node *n = (struct time_node *)e->ptr; + if(n->eid == eid) { + Curl_llist_remove(timeoutlist, e, NULL); + return; + } + } +} + +/* + * multi_addtimeout() + * + * Add a timestamp to the list of timeouts. Keep the list sorted so that head + * of list is always the timeout nearest in time. + * + */ +static CURLMcode +multi_addtimeout(struct Curl_easy *data, + struct curltime *stamp, + expire_id eid) +{ + struct curl_llist_element *e; + struct time_node *node; + struct curl_llist_element *prev = NULL; + size_t n; + struct curl_llist *timeoutlist = &data->state.timeoutlist; + + node = &data->state.expires[eid]; + + /* copy the timestamp and id */ + memcpy(&node->time, stamp, sizeof(*stamp)); + node->eid = eid; /* also marks it as in use */ + + n = Curl_llist_count(timeoutlist); + if(n) { + /* find the correct spot in the list */ + for(e = timeoutlist->head; e; e = e->next) { + struct time_node *check = (struct time_node *)e->ptr; + timediff_t diff = Curl_timediff(check->time, node->time); + if(diff > 0) + break; + prev = e; + } + + } + /* else + this is the first timeout on the list */ + + Curl_llist_insert_next(timeoutlist, prev, node, &node->list); + return CURLM_OK; +} + +/* + * Curl_expire() + * + * given a number of milliseconds from now to use to set the 'act before + * this'-time for the transfer, to be extracted by curl_multi_timeout() + * + * The timeout will be added to a queue of timeouts if it defines a moment in + * time that is later than the current head of queue. + * + * Expire replaces a former timeout using the same id if already set. + */ +void Curl_expire(struct Curl_easy *data, time_t milli, expire_id id) +{ + struct Curl_multi *multi = data->multi; + struct curltime *nowp = &data->state.expiretime; + struct curltime set; + + /* this is only interesting while there is still an associated multi struct + remaining! */ + if(!multi) + return; + + DEBUGASSERT(id < EXPIRE_LAST); + + set = Curl_now(); + set.tv_sec += milli/1000; + set.tv_usec += (unsigned int)(milli%1000)*1000; + + if(set.tv_usec >= 1000000) { + set.tv_sec++; + set.tv_usec -= 1000000; + } + + /* Remove any timer with the same id just in case. */ + multi_deltimeout(data, id); + + /* Add it to the timer list. It must stay in the list until it has expired + in case we need to recompute the minimum timer later. */ + multi_addtimeout(data, &set, id); + + if(nowp->tv_sec || nowp->tv_usec) { + /* This means that the struct is added as a node in the splay tree. + Compare if the new time is earlier, and only remove-old/add-new if it + is. */ + timediff_t diff = Curl_timediff(set, *nowp); + int rc; + + if(diff > 0) { + /* The current splay tree entry is sooner than this new expiry time. + We don't need to update our splay tree entry. */ + return; + } + + /* Since this is an updated time, we must remove the previous entry from + the splay tree first and then re-add the new value */ + rc = Curl_splayremovebyaddr(multi->timetree, + &data->state.timenode, + &multi->timetree); + if(rc) + infof(data, "Internal error removing splay node = %d\n", rc); + } + + /* Indicate that we are in the splay tree and insert the new timer expiry + value since it is our local minimum. */ + *nowp = set; + data->state.timenode.payload = data; + multi->timetree = Curl_splayinsert(*nowp, multi->timetree, + &data->state.timenode); +} + +/* + * Curl_expire_done() + * + * Removes the expire timer. Marks it as done. + * + */ +void Curl_expire_done(struct Curl_easy *data, expire_id id) +{ + /* remove the timer, if there */ + multi_deltimeout(data, id); +} + +/* + * Curl_expire_clear() + * + * Clear ALL timeout values for this handle. + */ +void Curl_expire_clear(struct Curl_easy *data) +{ + struct Curl_multi *multi = data->multi; + struct curltime *nowp = &data->state.expiretime; + + /* this is only interesting while there is still an associated multi struct + remaining! */ + if(!multi) + return; + + if(nowp->tv_sec || nowp->tv_usec) { + /* Since this is an cleared time, we must remove the previous entry from + the splay tree */ + struct curl_llist *list = &data->state.timeoutlist; + int rc; + + rc = Curl_splayremovebyaddr(multi->timetree, + &data->state.timenode, + &multi->timetree); + if(rc) + infof(data, "Internal error clearing splay node = %d\n", rc); + + /* flush the timeout list too */ + while(list->size > 0) { + Curl_llist_remove(list, list->tail, NULL); + } + +#ifdef DEBUGBUILD + infof(data, "Expire cleared (transfer %p)\n", data); +#endif + nowp->tv_sec = 0; + nowp->tv_usec = 0; + } +} + + + + +CURLMcode curl_multi_assign(struct Curl_multi *multi, curl_socket_t s, + void *hashp) +{ + struct Curl_sh_entry *there = NULL; + + if(multi->in_callback) + return CURLM_RECURSIVE_API_CALL; + + there = sh_getentry(&multi->sockhash, s); + + if(!there) + return CURLM_BAD_SOCKET; + + there->socketp = hashp; + + return CURLM_OK; +} + +size_t Curl_multi_max_host_connections(struct Curl_multi *multi) +{ + return multi ? multi->max_host_connections : 0; +} + +size_t Curl_multi_max_total_connections(struct Curl_multi *multi) +{ + return multi ? multi->max_total_connections : 0; +} + +/* + * When information about a connection has appeared, call this! + */ + +void Curl_multiuse_state(struct connectdata *conn, + int bundlestate) /* use BUNDLE_* defines */ +{ + DEBUGASSERT(conn); + DEBUGASSERT(conn->bundle); + DEBUGASSERT(conn->data); + DEBUGASSERT(conn->data->multi); + + conn->bundle->multiuse = bundlestate; + process_pending_handles(conn->data->multi); +} + +static void process_pending_handles(struct Curl_multi *multi) +{ + struct curl_llist_element *e = multi->pending.head; + if(e) { + struct Curl_easy *data = e->ptr; + + DEBUGASSERT(data->mstate == CURLM_STATE_CONNECT_PEND); + + multistate(data, CURLM_STATE_CONNECT); + + /* Remove this node from the list */ + Curl_llist_remove(&multi->pending, e, NULL); + + /* Make sure that the handle will be processed soonish. */ + Curl_expire(data, 0, EXPIRE_RUN_NOW); + + /* mark this as having been in the pending queue */ + data->state.previouslypending = TRUE; + } +} + +void Curl_set_in_callback(struct Curl_easy *data, bool value) +{ + /* might get called when there is no data pointer! */ + if(data) { + if(data->multi_easy) + data->multi_easy->in_callback = value; + else if(data->multi) + data->multi->in_callback = value; + } +} + +bool Curl_is_in_callback(struct Curl_easy *easy) +{ + return ((easy->multi && easy->multi->in_callback) || + (easy->multi_easy && easy->multi_easy->in_callback)); +} + +#ifdef DEBUGBUILD +void Curl_multi_dump(struct Curl_multi *multi) +{ + struct Curl_easy *data; + int i; + fprintf(stderr, "* Multi status: %d handles, %d alive\n", + multi->num_easy, multi->num_alive); + for(data = multi->easyp; data; data = data->next) { + if(data->mstate < CURLM_STATE_COMPLETED) { + /* only display handles that are not completed */ + fprintf(stderr, "handle %p, state %s, %d sockets\n", + (void *)data, + statename[data->mstate], data->numsocks); + for(i = 0; i < data->numsocks; i++) { + curl_socket_t s = data->sockets[i]; + struct Curl_sh_entry *entry = sh_getentry(&multi->sockhash, s); + + fprintf(stderr, "%d ", (int)s); + if(!entry) { + fprintf(stderr, "INTERNAL CONFUSION\n"); + continue; + } + fprintf(stderr, "[%s %s] ", + (entry->action&CURL_POLL_IN)?"RECVING":"", + (entry->action&CURL_POLL_OUT)?"SENDING":""); + } + if(data->numsocks) + fprintf(stderr, "\n"); + } + } +} +#endif diff -ruN curl-7.65.1/lib/url.c curl-7.65.1-fix-dns-segfaults/lib/url.c --- curl-7.65.1/lib/url.c 2019-06-02 16:55:05.000000000 +0200 +++ curl-7.65.1-fix-dns-segfaults/lib/url.c 2019-06-28 17:01:20.685876110 +0200 @@ -1673,13 +1673,6 @@ #endif } -static void llist_dtor(void *user, void *element) -{ - (void)user; - (void)element; - /* Do nothing */ -} - /* * Allocate and initialize a new connectdata object. */ @@ -1791,7 +1784,7 @@ #endif /* Initialize the easy handle list */ - Curl_llist_init(&conn->easyq, (curl_llist_dtor) llist_dtor); + Curl_llist_init(&conn->easyq, NULL); #ifdef HAVE_GSSAPI conn->data_prot = PROT_CLEAR; diff -ruN curl-7.65.1/lib/urldata.h curl-7.65.1-fix-dns-segfaults/lib/urldata.h --- curl-7.65.1/lib/urldata.h 2019-06-04 22:28:08.000000000 +0200 +++ curl-7.65.1-fix-dns-segfaults/lib/urldata.h 2019-06-28 17:01:20.687876111 +0200 @@ -1778,8 +1778,6 @@ struct connectdata *conn; struct curl_llist_element connect_queue; - struct curl_llist_element sh_queue; /* list per Curl_sh_entry */ - struct Curl_sh_entry *sh_entry; /* the socket hash this was added to */ struct curl_llist_element conn_queue; /* list per connectdata */ CURLMstate mstate; /* the handle's state */