X-Git-Url: https://git.gag.com/?a=blobdiff_plain;f=device-src%2Fs3.c;h=584a47d54ad1feacf0a8eae1e13549c104266dad;hb=b116e9366c7b2ea2c2eb53b0a13df4090e176235;hp=3fbb3b48c0579a770afe893b84dc65e713f94ec3;hpb=d74dc4d908fcbc1a4ef474edaf51e61ec90eab6b;p=debian%2Famanda diff --git a/device-src/s3.c b/device-src/s3.c index 3fbb3b4..584a47d 100644 --- a/device-src/s3.c +++ b/device-src/s3.c @@ -1,21 +1,21 @@ /* - * Copyright (c) 2005-2008 Zmanda Inc. All Rights Reserved. - * - * This library is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License version 2.1 as - * published by the Free Software Foundation. - * - * This library is distributed in the hope that it will be useful, but + * Copyright (c) 2008, 2009, 2010 Zmanda, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public - * License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. - * - * Contact information: Zmanda Inc., 465 S Mathlida Ave, Suite 300 - * Sunnyvale, CA 94086, USA, or: http://www.zmanda.com + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300 + * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com */ /* TODO @@ -92,6 +92,10 @@ %s\n\ " +#define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class" + +#define AMAZON_WILDCARD_LOCATION "*" + /* parameters for exponential backoff in the face of retriable errors */ /* start at 0.01s */ @@ -106,18 +110,19 @@ /* Results which should always be retried */ #define RESULT_HANDLING_ALWAYS_RETRY \ - { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \ - { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY }, \ - { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \ - { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \ - { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \ - { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \ - { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY } + { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \ + { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \ + { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \ + { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \ + { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \ + { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY } /* * Data structures and associated functions @@ -130,13 +135,20 @@ struct S3Handle { char *secret_key; char *user_token; + /* attributes for new objects */ char *bucket_location; + char *storage_class; + + char *ca_info; CURL *curl; gboolean verbose; gboolean use_ssl; + guint64 max_send_speed; + guint64 max_recv_speed; + /* information from the last request */ char *last_message; guint last_response_code; @@ -145,6 +157,9 @@ struct S3Handle { guint last_num_retries; void *last_response_body; guint last_response_body_size; + + /* offset with s3 */ + time_t time_offset_with_s3; }; typedef struct { @@ -156,10 +171,12 @@ typedef struct { gboolean headers_done; gboolean int_write_done; char *etag; + /* Points to current handle: Added to get hold of s3 offset */ + struct S3Handle *hdl; } S3InternalData; /* Callback function to examine headers one-at-a-time - * + * * @note this is the same as CURLOPT_HEADERFUNCTION * * @param data: The pointer to read data from @@ -247,12 +264,20 @@ lookup_result(const result_handling_t *result_handling, /* * Precompiled regular expressions */ static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex, - location_con_regex; + location_con_regex, date_sync_regex; + /* * Utility functions */ +/* Check if a string is non-empty + * + * @param str: string to check + * @returns: true iff str is non-NULL and not "\0" + */ +static gboolean is_non_empty_string(const char *str); + /* Construct the URL for an Amazon S3 REST request. * * A new string is allocated and returned; it is the responsiblity of the caller. @@ -307,7 +332,7 @@ authenticate_request(S3Handle *hdl, * @param body: the response body * @param body_len: the length of the response body * @param etag: The response's ETag header - * @param content_md5: The hex-encoded MD5 hash of the request body, + * @param content_md5: The hex-encoded MD5 hash of the request body, * which will be checked against the response's ETag header. * If NULL, the header is not checked. * If non-NULL, then the body should have the response headers at its beginnning. @@ -324,11 +349,11 @@ interpret_response(S3Handle *hdl, /* Perform an S3 operation. This function handles all of the details * of retryig requests and so on. - * + * * The concepts of bucket and keys are defined by the Amazon S3 API. * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8 * - * Individual sub-resources are defined in several places. In the REST API, + * Individual sub-resources are defined in several places. In the REST API, * they they are represented by a "flag" in the "query string". * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60 * @@ -375,7 +400,7 @@ perform_request(S3Handle *hdl, const result_handling_t *result_handling); /* - * a CURLOPT_WRITEFUNCTION to save part of the response in memory and + * a CURLOPT_WRITEFUNCTION to save part of the response in memory and * call an external function if one was provided. */ static size_t @@ -408,7 +433,7 @@ s3_error_code_from_name(char *s3_error_name) /* do a brute-force search through the list, since it's not sorted */ for (i = 0; i < S3_ERROR_END; i++) { - if (g_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0) + if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0) return i; } @@ -443,6 +468,21 @@ s3_curl_supports_ssl(void) return supported; } +static gboolean +s3_curl_throttling_compat(void) +{ +/* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */ +#if LIBCURL_VERSION_NUM >= 0x070f05 + curl_version_info_data *info; + + /* check the runtime version too */ + info = curl_version_info(CURLVERSION_NOW); + return info->version_num >= 0x070f05; +#else + return FALSE; +#endif +} + static s3_result_t lookup_result(const result_handling_t *result_handling, guint response_code, @@ -450,7 +490,7 @@ lookup_result(const result_handling_t *result_handling, CURLcode curl_code) { while (result_handling->response_code - || result_handling->s3_error_code + || result_handling->s3_error_code || result_handling->curl_code) { if ((result_handling->response_code && result_handling->response_code != response_code) || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code) @@ -466,6 +506,12 @@ lookup_result(const result_handling_t *result_handling, return result_handling->result; } +static gboolean +is_non_empty_string(const char *str) +{ + return str && str[0] != '\0'; +} + static char * build_url(const char *bucket, const char *key, @@ -489,7 +535,7 @@ build_url(const char *bucket, g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket); else g_string_append(url, "s3.amazonaws.com/"); - + /* path */ if (!use_subdomain && bucket) { esc_bucket = curl_escape(bucket, 0); @@ -532,11 +578,11 @@ authenticate_request(S3Handle *hdl, const char *key, const char *subresource, const char *md5_hash, - gboolean use_subdomain) + gboolean use_subdomain) { time_t t; struct tm tmp; - char date[100]; + char *date = NULL; char *buf = NULL; HMAC_CTX ctx; GByteArray *md = NULL; @@ -545,14 +591,21 @@ authenticate_request(S3Handle *hdl, char *esc_bucket = NULL, *esc_key = NULL; GString *auth_string = NULL; + /* From RFC 2616 */ + static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; + static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; + + + /* Build the string to sign, per the S3 spec. * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58 */ - + /* verb */ auth_string = g_string_new(verb); g_string_append(auth_string, "\n"); - + /* Content-MD5 header */ if (md5_hash) g_string_append(auth_string, md5_hash); @@ -564,18 +617,26 @@ authenticate_request(S3Handle *hdl, /* calculate the date */ t = time(NULL); + + /* sync clock with amazon s3 */ + t = t + hdl->time_offset_with_s3; + #ifdef _WIN32 - if (!localtime_s(&tmp, &t)) g_debug("localtime error"); + if (!gmtime_s(&tmp, &t)) g_debug("localtime error"); #else - if (!localtime_r(&t, &tmp)) perror("localtime"); + if (!gmtime_r(&t, &tmp)) perror("localtime"); #endif - if (!strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", &tmp)) - perror("strftime"); + + + date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT", + wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year, + tmp.tm_hour, tmp.tm_min, tmp.tm_sec); g_string_append(auth_string, date); g_string_append(auth_string, "\n"); - if (hdl->user_token) { + /* CanonicalizedAmzHeaders, sorted lexicographically */ + if (is_non_empty_string(hdl->user_token)) { g_string_append(auth_string, AMAZON_SECURITY_HEADER); g_string_append(auth_string, ":"); g_string_append(auth_string, hdl->user_token); @@ -584,6 +645,13 @@ authenticate_request(S3Handle *hdl, g_string_append(auth_string, "\n"); } + if (is_non_empty_string(hdl->storage_class)) { + g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER); + g_string_append(auth_string, ":"); + g_string_append(auth_string, hdl->storage_class); + g_string_append(auth_string, "\n"); + } + /* CanonicalizedResource */ g_string_append(auth_string, "/"); if (bucket) { @@ -620,7 +688,7 @@ authenticate_request(S3Handle *hdl, auth_base64 = s3_base64_encode(md); /* append the new headers */ - if (hdl->user_token) { + if (is_non_empty_string(hdl->user_token)) { /* Devpay headers are included in hash. */ buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token); headers = curl_slist_append(headers, buf); @@ -631,11 +699,18 @@ authenticate_request(S3Handle *hdl, g_free(buf); } + if (is_non_empty_string(hdl->storage_class)) { + buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s", hdl->storage_class); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + + buf = g_strdup_printf("Authorization: AWS %s:%s", hdl->access_key, auth_base64); headers = curl_slist_append(headers, buf); g_free(buf); - + if (md5_hash && '\0' != md5_hash[0]) { buf = g_strdup_printf("Content-MD5: %s", md5_hash); headers = curl_slist_append(headers, buf); @@ -646,6 +721,7 @@ authenticate_request(S3Handle *hdl, headers = curl_slist_append(headers, buf); g_free(buf); cleanup: + g_free(date); g_free(esc_bucket); g_free(esc_key); g_byte_array_free(md, TRUE); @@ -688,7 +764,7 @@ interpret_response(S3Handle *hdl, /* check ETag, if present */ if (etag && content_md5 && 200 == response_code) { - if (etag && g_strcasecmp(etag, content_md5)) + if (etag && g_ascii_strcasecmp(etag, content_md5)) hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)"); else ret = FALSE; @@ -841,7 +917,7 @@ size_t s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream) { gint64 *count = (gint64*) stream, inc = nmemb*size; - + if (count) *count += inc; return inc; } @@ -892,7 +968,7 @@ s3_file_md5_func(void *stream) g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN)); ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN); - g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN); + g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN); MD5_Init(&md5_ctx); while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) { @@ -923,11 +999,11 @@ s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream) } #endif -static int -curl_debug_message(CURL *curl G_GNUC_UNUSED, - curl_infotype type, - char *s, - size_t len, +static int +curl_debug_message(CURL *curl G_GNUC_UNUSED, + curl_infotype type, + char *s, + size_t len, void *unused G_GNUC_UNUSED) { char *lineprefix; @@ -992,7 +1068,8 @@ perform_request(S3Handle *hdl, CURLcode curl_code = CURLE_OK; char curl_error_buffer[CURL_ERROR_SIZE] = ""; struct curl_slist *headers = NULL; - S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL}; + /* Set S3Internal Data */ + S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl}; gboolean should_retry; guint retries = 0; gulong backoff = EXPONENTIAL_BACKOFF_START_USEC; @@ -1009,7 +1086,7 @@ perform_request(S3Handle *hdl, s3_reset(hdl); - use_subdomain = hdl->bucket_location? TRUE : FALSE; + use_subdomain = is_non_empty_string(hdl->bucket_location); url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl); if (!url) goto cleanup; @@ -1068,12 +1145,17 @@ perform_request(S3Handle *hdl, /* set up the request */ headers = authenticate_request(hdl, verb, bucket, key, subresource, - md5_hash_b64, hdl->bucket_location? TRUE : FALSE); + md5_hash_b64, is_non_empty_string(hdl->bucket_location)); + + if (hdl->use_ssl && hdl->ca_info) { + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info))) + goto curl_error; + } if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose))) goto curl_error; if (hdl->verbose) { - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION, + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION, curl_debug_message))) goto curl_error; } @@ -1089,28 +1171,41 @@ perform_request(S3Handle *hdl, if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER, headers))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata))) goto curl_error; /* Note: we always have to set this apparently, for consistent "end of header" detection */ - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func))) goto curl_error; /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */ - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data))) goto curl_error; -#ifdef CURLOPT_INFILESIZE_LARGE +/* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */ +#if LIBCURL_VERSION_NUM >= 0x070b00 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size))) goto curl_error; #else if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size))) goto curl_error; #endif +/* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */ +#if LIBCURL_VERSION_NUM >= 0x070f05 + if (s3_curl_throttling_compat()) { + if (hdl->max_send_speed) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed))) + goto curl_error; + + if (hdl->max_recv_speed) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed))) + goto curl_error; + } +#endif if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget))) goto curl_error; @@ -1126,16 +1221,16 @@ perform_request(S3Handle *hdl, if (curlopt_upload) { - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data))) goto curl_error; } else { /* Clear request_body options. */ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, NULL))) goto curl_error; - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, NULL))) goto curl_error; } @@ -1146,12 +1241,12 @@ perform_request(S3Handle *hdl, /* interpret the response into hdl->last* */ curl_error: /* (label for short-circuiting the curl_easy_perform call) */ - should_retry = interpret_response(hdl, curl_code, curl_error_buffer, + should_retry = interpret_response(hdl, curl_code, curl_error_buffer, int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex); - + /* and, unless we know we need to retry, see what we're to do now */ if (!should_retry) { - result = lookup_result(result_handling, hdl->last_response_code, + result = lookup_result(result_handling, hdl->last_response_code, hdl->last_s3_error_code, hdl->last_curl_code); /* break out of the while(1) unless we're retrying */ @@ -1177,7 +1272,7 @@ perform_request(S3Handle *hdl, if (result != S3_RESULT_OK) { g_debug(_("%s %s failed with %d/%s"), verb, url, hdl->last_response_code, - s3_error_name_from_code(hdl->last_s3_error_code)); + s3_error_name_from_code(hdl->last_s3_error_code)); } cleanup: @@ -1185,7 +1280,7 @@ cleanup: if (headers) curl_slist_free_all(headers); g_free(md5_hash_b64); g_free(md5_hash_hex); - + /* we don't deallocate the response body -- we keep it for later */ hdl->last_response_body = int_writedata.resp_buf.buffer; hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos; @@ -1239,16 +1334,40 @@ static size_t s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream) { static const char *final_header = "\r\n"; + time_t remote_time_in_sec,local_time; char *header; regmatch_t pmatch[2]; S3InternalData *data = (S3InternalData *) stream; header = g_strndup((gchar *) ptr, (gsize) size*nmemb); + if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0)) - data->etag = find_regex_substring(header, pmatch[1]); + data->etag = find_regex_substring(header, pmatch[1]); if (!strcmp(final_header, header)) data->headers_done = TRUE; + /* If date header is found */ + if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){ + char *date = find_regex_substring(header, pmatch[1]); + + /* Remote time is always in GMT: RFC 2616 */ + /* both curl_getdate and time operate in UTC, so no timezone math is necessary */ + if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){ + g_debug("Error: Conversion of remote time to seconds failed."); + data->hdl->time_offset_with_s3 = 0; + }else{ + local_time = time(NULL); + /* Offset time */ + data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time; + + if (data->hdl->verbose) + g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3); + } + + g_free(date); + } + + g_free(header); return size*nmemb; } @@ -1262,8 +1381,9 @@ compile_regexes(void) {"[[:space:]]*([^<]*)[[:space:]]*", REG_EXTENDED | REG_ICASE, &error_name_regex}, {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex}, {"[[:space:]]*([^<]*)[[:space:]]*", REG_EXTENDED | REG_ICASE, &message_regex}, - {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex}, + {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex}, {"(/>)|(>([^<]*))", REG_EXTENDED | REG_ICASE, &location_con_regex}, + {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex}, {NULL, 0, NULL} }; char regmessage[1024]; @@ -1296,6 +1416,9 @@ compile_regexes(void) {"(/>)|(>([^<]*))", G_REGEX_CASELESS, &location_con_regex}, + {"^Date:(.*)\\r", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &date_sync_regex}, {NULL, 0, NULL} }; int i; @@ -1321,7 +1444,7 @@ gboolean s3_init(void) { static GStaticMutex mutex = G_STATIC_MUTEX_INIT; static gboolean init = FALSE, ret; - + /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */ g_static_mutex_lock (&mutex); @@ -1352,7 +1475,9 @@ S3Handle * s3_open(const char *access_key, const char *secret_key, const char *user_token, - const char *bucket_location + const char *bucket_location, + const char *storage_class, + const char *ca_info ) { S3Handle *hdl; @@ -1372,6 +1497,12 @@ s3_open(const char *access_key, /* NULL is okay */ hdl->bucket_location = g_strdup(bucket_location); + /* NULL is ok */ + hdl->storage_class = g_strdup(storage_class); + + /* NULL is okay */ + hdl->ca_info = g_strdup(ca_info); + hdl->curl = curl_easy_init(); if (!hdl->curl) goto error; @@ -1392,6 +1523,7 @@ s3_free(S3Handle *hdl) g_free(hdl->secret_key); if (hdl->user_token) g_free(hdl->user_token); if (hdl->bucket_location) g_free(hdl->bucket_location); + if (hdl->storage_class) g_free(hdl->storage_class); if (hdl->curl) curl_easy_cleanup(hdl->curl); g_free(hdl); @@ -1457,6 +1589,28 @@ s3_verbose(S3Handle *hdl, gboolean verbose) hdl->verbose = verbose; } +gboolean +s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed) +{ + if (!s3_curl_throttling_compat()) + return FALSE; + + hdl->max_send_speed = max_send_speed; + + return TRUE; +} + +gboolean +s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed) +{ + if (!s3_curl_throttling_compat()) + return FALSE; + + hdl->max_recv_speed = max_recv_speed; + + return TRUE; +} + gboolean s3_use_ssl(S3Handle *hdl, gboolean use_ssl) { @@ -1485,7 +1639,7 @@ s3_strerror(S3Handle *hdl) s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries); - if (!message) + if (!message) message = "Unknown S3 error"; if (s3_error_name) g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name); @@ -1493,7 +1647,7 @@ s3_strerror(S3Handle *hdl) g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code); if (curl_code) g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code); - if (num_retries) + if (num_retries) g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries); return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info); @@ -1512,7 +1666,7 @@ s3_strerror(S3Handle *hdl) gboolean s3_upload(S3Handle *hdl, const char *bucket, - const char *key, + const char *key, s3_read_func read_func, s3_reset_func reset_func, s3_size_func size_func, @@ -1523,9 +1677,9 @@ s3_upload(S3Handle *hdl, { s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { - { 200, 0, 0, S3_RESULT_OK }, + { 200, 0, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, - { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; g_assert(hdl != NULL); @@ -1551,7 +1705,7 @@ struct list_keys_thunk { gchar *next_marker; gboolean want_text; - + gchar *text; gsize text_len; }; @@ -1559,53 +1713,53 @@ struct list_keys_thunk { /* Functions for a SAX parser to parse the XML from Amazon */ static void -list_start_element(GMarkupParseContext *context G_GNUC_UNUSED, - const gchar *element_name, - const gchar **attribute_names G_GNUC_UNUSED, - const gchar **attribute_values G_GNUC_UNUSED, - gpointer user_data, +list_start_element(GMarkupParseContext *context G_GNUC_UNUSED, + const gchar *element_name, + const gchar **attribute_names G_GNUC_UNUSED, + const gchar **attribute_values G_GNUC_UNUSED, + gpointer user_data, GError **error G_GNUC_UNUSED) { struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data; thunk->want_text = 0; - if (g_strcasecmp(element_name, "contents") == 0) { + if (g_ascii_strcasecmp(element_name, "contents") == 0) { thunk->in_contents = 1; - } else if (g_strcasecmp(element_name, "commonprefixes") == 0) { + } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) { thunk->in_common_prefixes = 1; - } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { + } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { thunk->want_text = 1; - } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { + } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { thunk->want_text = 1; - } else if (g_strcasecmp(element_name, "istruncated")) { + } else if (g_ascii_strcasecmp(element_name, "istruncated")) { thunk->want_text = 1; - } else if (g_strcasecmp(element_name, "nextmarker")) { + } else if (g_ascii_strcasecmp(element_name, "nextmarker")) { thunk->want_text = 1; } } static void -list_end_element(GMarkupParseContext *context G_GNUC_UNUSED, +list_end_element(GMarkupParseContext *context G_GNUC_UNUSED, const gchar *element_name, - gpointer user_data, + gpointer user_data, GError **error G_GNUC_UNUSED) { struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data; - if (g_strcasecmp(element_name, "contents") == 0) { + if (g_ascii_strcasecmp(element_name, "contents") == 0) { thunk->in_contents = 0; - } else if (g_strcasecmp(element_name, "commonprefixes") == 0) { + } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) { thunk->in_common_prefixes = 0; - } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { + } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text); thunk->text = NULL; - } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { + } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text); thunk->text = NULL; - } else if (g_strcasecmp(element_name, "istruncated") == 0) { - if (thunk->text && g_strncasecmp(thunk->text, "false", 5) != 0) + } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) { + if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0) thunk->is_truncated = TRUE; - } else if (g_strcasecmp(element_name, "nextmarker") == 0) { + } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) { if (thunk->next_marker) g_free(thunk->next_marker); thunk->next_marker = thunk->text; thunk->text = NULL; @@ -1614,9 +1768,9 @@ list_end_element(GMarkupParseContext *context G_GNUC_UNUSED, static void list_text(GMarkupParseContext *context G_GNUC_UNUSED, - const gchar *text, - gsize text_len, - gpointer user_data, + const gchar *text, + gsize text_len, + gpointer user_data, GError **error G_GNUC_UNUSED) { struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data; @@ -1632,17 +1786,17 @@ list_text(GMarkupParseContext *context G_GNUC_UNUSED, static s3_result_t list_fetch(S3Handle *hdl, const char *bucket, - const char *prefix, - const char *delimiter, + const char *prefix, + const char *delimiter, const char *marker, const char *max_keys, CurlBuffer *buf) { - s3_result_t result = S3_RESULT_FAIL; + s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { - { 200, 0, 0, S3_RESULT_OK }, + { 200, 0, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, - { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; const char* pos_parts[][2] = { {"prefix", prefix}, @@ -1742,7 +1896,7 @@ s3_list_keys(S3Handle *hdl, result = S3_RESULT_FAIL; goto cleanup; } - + g_markup_parse_context_free(ctxt); ctxt = NULL; } while (thunk.next_marker); @@ -1775,9 +1929,9 @@ s3_read(S3Handle *hdl, { s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { - { 200, 0, 0, S3_RESULT_OK }, + { 200, 0, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, - { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; g_assert(hdl != NULL); @@ -1797,9 +1951,10 @@ s3_delete(S3Handle *hdl, { s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { - { 204, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, + { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, - { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; g_assert(hdl != NULL); @@ -1818,9 +1973,10 @@ s3_make_bucket(S3Handle *hdl, char *body = NULL; s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { - { 200, 0, 0, S3_RESULT_OK }, + { 200, 0, 0, S3_RESULT_OK }, + { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY }, RESULT_HANDLING_ALWAYS_RETRY, - { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; regmatch_t pmatch[4]; char *loc_end_open, *loc_content; @@ -1831,8 +1987,9 @@ s3_make_bucket(S3Handle *hdl, s3_size_func size_func = NULL; g_assert(hdl != NULL); - - if (hdl->bucket_location && hdl->bucket_location[0]) { + + if (is_non_empty_string(hdl->bucket_location) && + 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) { if (s3_bucket_location_compat(bucket)) { ptr = &buf; buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location); @@ -1856,7 +2013,7 @@ s3_make_bucket(S3Handle *hdl, NULL, NULL, NULL, NULL, NULL, result_handling); if (result == S3_RESULT_OK || - (hdl->bucket_location && result != S3_RESULT_OK + (is_non_empty_string(hdl->bucket_location) && result != S3_RESULT_OK && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) { /* verify the that the location constraint on the existing bucket matches * the one that's configured. @@ -1865,10 +2022,10 @@ s3_make_bucket(S3Handle *hdl, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, result_handling); - /* note that we can check only one of the three AND conditions above + /* note that we can check only one of the three AND conditions above * and infer that the others are true */ - if (result == S3_RESULT_OK && hdl->bucket_location) { + if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) { /* return to the default state of failure */ result = S3_RESULT_FAIL; @@ -1878,8 +2035,11 @@ s3_make_bucket(S3Handle *hdl, if (!body) { hdl->last_message = g_strdup(_("No body received for location request")); goto cleanup; + } else if ('\0' == body[0]) { + hdl->last_message = g_strdup(_("Empty body received for location request")); + goto cleanup; } - + if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) { loc_end_open = find_regex_substring(body, pmatch[1]); loc_content = find_regex_substring(body, pmatch[3]); @@ -1887,11 +2047,13 @@ s3_make_bucket(S3Handle *hdl, /* The case of an empty string is special because XML allows * "self-closing" tags */ - if ('\0' == hdl->bucket_location[0] && - '/' != loc_end_open[0] && '\0' != hdl->bucket_location[0]) - hdl->last_message = g_strdup(_("An empty location constraint is " + if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) && + '/' != loc_end_open[0]) + hdl->last_message = g_strdup(_("A wildcard location constraint is " "configured, but the bucket has a non-empty location constraint")); - else if (strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location))) + else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)? + strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) : + ('\0' != loc_content[0])) hdl->last_message = g_strdup(_("The location constraint configured " "does not match the constraint currently on the bucket")); else @@ -1904,7 +2066,14 @@ s3_make_bucket(S3Handle *hdl, cleanup: if (body) g_free(body); - + return result == S3_RESULT_OK; } + +gboolean +s3_delete_bucket(S3Handle *hdl, + const char *bucket) +{ + return s3_delete(hdl, bucket, NULL); +}