2 * Copyright (c) 2008, 2009, 2010 Zmanda, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
18 * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
22 * - collect speed statistics
27 /* use a relative path here to avoid conflicting with Perl's config.h. */
28 #include "../config/config.h"
36 #ifdef HAVE_SYS_TYPES_H
37 #include <sys/types.h>
39 #ifdef HAVE_SYS_STAT_H
58 #include <curl/curl.h>
60 /* Constant renamed after version 7.10.7 */
61 #ifndef CURLINFO_RESPONSE_CODE
62 #define CURLINFO_RESPONSE_CODE CURLINFO_HTTP_CODE
65 /* We don't need OpenSSL's kerberos support, and it's broken in
67 #define OPENSSL_NO_KRB5
69 #ifdef HAVE_OPENSSL_HMAC_H
70 # include <openssl/hmac.h>
72 # ifdef HAVE_CRYPTO_HMAC_H
73 # include <crypto/hmac.h>
81 #include <openssl/err.h>
82 #include <openssl/ssl.h>
83 #include <openssl/md5.h>
85 /* Maximum key length as specified in the S3 documentation
86 * (*excluding* null terminator) */
87 #define S3_MAX_KEY_LENGTH 1024
89 #define AMAZON_SECURITY_HEADER "x-amz-security-token"
90 #define AMAZON_BUCKET_CONF_TEMPLATE "\
91 <CreateBucketConfiguration>\n\
92 <LocationConstraint>%s</LocationConstraint>\n\
93 </CreateBucketConfiguration>"
95 #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
97 #define AMAZON_WILDCARD_LOCATION "*"
99 /* parameters for exponential backoff in the face of retriable errors */
102 #define EXPONENTIAL_BACKOFF_START_USEC G_USEC_PER_SEC/100
103 /* double at each retry */
104 #define EXPONENTIAL_BACKOFF_BASE 2
105 /* retry 14 times (for a total of about 3 minutes spent waiting) */
106 #define EXPONENTIAL_BACKOFF_MAX_RETRIES 14
108 /* general "reasonable size" parameters */
109 #define MAX_ERROR_RESPONSE_LEN (100*1024)
111 /* Results which should always be retried */
112 #define RESULT_HANDLING_ALWAYS_RETRY \
113 { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
114 { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \
115 { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
116 { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
117 { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
118 { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
119 { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
120 { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
121 { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
122 { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
123 { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
124 { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
125 { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
128 * Data structures and associated functions
132 /* (all strings in this struct are freed by s3_free()) */
138 /* attributes for new objects */
139 char *bucket_location;
149 guint64 max_send_speed;
150 guint64 max_recv_speed;
152 /* information from the last request */
154 guint last_response_code;
155 s3_error_code_t last_s3_error_code;
156 CURLcode last_curl_code;
157 guint last_num_retries;
158 void *last_response_body;
159 guint last_response_body_size;
162 time_t time_offset_with_s3;
167 s3_write_func write_func;
168 s3_reset_func reset_func;
171 gboolean headers_done;
172 gboolean int_write_done;
174 /* Points to current handle: Added to get hold of s3 offset */
175 struct S3Handle *hdl;
178 /* Callback function to examine headers one-at-a-time
180 * @note this is the same as CURLOPT_HEADERFUNCTION
182 * @param data: The pointer to read data from
183 * @param size: The size of each "element" of the data buffer in bytes
184 * @param nmemb: The number of elements in the data buffer.
185 * So, the buffer's size is size*nmemb bytes.
186 * @param stream: the header_data (an opaque pointer)
188 * @return The number of bytes written to the buffer or
189 * CURL_WRITEFUNC_PAUSE to pause.
190 * If it's the number of bytes written, it should match the buffer size
192 typedef size_t (*s3_header_func)(void *data, size_t size, size_t nmemb, void *stream);
198 /* (see preprocessor magic in s3.h) */
200 static char * s3_error_code_names[] = {
201 #define S3_ERROR(NAME) #NAME
206 /* Convert an s3 error name to an error code. This function
207 * matches strings case-insensitively, and is appropriate for use
208 * on data from the network.
210 * @param s3_error_code: the error name
211 * @returns: the error code (see constants in s3.h)
213 static s3_error_code_t
214 s3_error_code_from_name(char *s3_error_name);
216 /* Convert an s3 error code to a string
218 * @param s3_error_code: the error code to convert
219 * @returns: statically allocated string
222 s3_error_name_from_code(s3_error_code_t s3_error_code);
228 /* result handling is specified by a static array of result_handling structs,
229 * which match based on response_code (from HTTP) and S3 error code. The result
230 * given for the first match is used. 0 acts as a wildcard for both response_code
231 * and s3_error_code. The list is terminated with a struct containing 0 for both
232 * response_code and s3_error_code; the result for that struct is the default
235 * See RESULT_HANDLING_ALWAYS_RETRY for an example.
238 S3_RESULT_RETRY = -1,
243 typedef struct result_handling {
245 s3_error_code_t s3_error_code;
250 /* Lookup a result in C{result_handling}.
252 * @param result_handling: array of handling specifications
253 * @param response_code: response code from operation
254 * @param s3_error_code: s3 error code from operation, if any
255 * @param curl_code: the CURL error, if any
256 * @returns: the matching result
259 lookup_result(const result_handling_t *result_handling,
261 s3_error_code_t s3_error_code,
265 * Precompiled regular expressions */
266 static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
267 location_con_regex, date_sync_regex;
274 /* Check if a string is non-empty
276 * @param str: string to check
277 * @returns: true iff str is non-NULL and not "\0"
279 static gboolean is_non_empty_string(const char *str);
281 /* Construct the URL for an Amazon S3 REST request.
283 * A new string is allocated and returned; it is the responsiblity of the caller.
285 * @param hdl: the S3Handle object
286 * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
287 * @param bucket: the bucket being accessed, or NULL for none
288 * @param key: the key being accessed, or NULL for none
289 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
290 * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
293 build_url(const char *bucket,
295 const char *subresource,
297 gboolean use_subdomain,
300 /* Create proper authorization headers for an Amazon S3 REST
301 * request to C{headers}.
303 * @note: C{X-Amz} headers (in C{headers}) must
305 * - be in alphabetical order
306 * - have no spaces around the colon
307 * (don't yell at me -- see the Amazon Developer Guide)
309 * @param hdl: the S3Handle object
310 * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
311 * @param bucket: the bucket being accessed, or NULL for none
312 * @param key: the key being accessed, or NULL for none
313 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
314 * @param md5_hash: the MD5 hash of the request body, or NULL for none
315 * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
317 static struct curl_slist *
318 authenticate_request(S3Handle *hdl,
322 const char *subresource,
323 const char *md5_hash,
324 gboolean use_subdomain);
328 /* Interpret the response to an S3 operation, assuming CURL completed its request
329 * successfully. This function fills in the relevant C{hdl->last*} members.
331 * @param hdl: The S3Handle object
332 * @param body: the response body
333 * @param body_len: the length of the response body
334 * @param etag: The response's ETag header
335 * @param content_md5: The hex-encoded MD5 hash of the request body,
336 * which will be checked against the response's ETag header.
337 * If NULL, the header is not checked.
338 * If non-NULL, then the body should have the response headers at its beginnning.
339 * @returns: TRUE if the response should be retried (e.g., network error)
342 interpret_response(S3Handle *hdl,
344 char *curl_error_buffer,
348 const char *content_md5);
350 /* Perform an S3 operation. This function handles all of the details
351 * of retryig requests and so on.
353 * The concepts of bucket and keys are defined by the Amazon S3 API.
354 * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
356 * Individual sub-resources are defined in several places. In the REST API,
357 * they they are represented by a "flag" in the "query string".
358 * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
360 * @param hdl: the S3Handle object
361 * @param verb: the HTTP request method
362 * @param bucket: the bucket to access, or NULL for none
363 * @param key: the key to access, or NULL for none
364 * @param subresource: the "sub-resource" to request (e.g. "acl") or NULL for none
365 * @param query: the query string to send (not including th initial '?'),
367 * @param read_func: the callback for reading data
368 * Will use s3_empty_read_func if NULL is passed in.
369 * @param read_reset_func: the callback for to reset reading data
370 * @param size_func: the callback to get the number of bytes to upload
371 * @param md5_func: the callback to get the MD5 hash of the data to upload
372 * @param read_data: pointer to pass to the above functions
373 * @param write_func: the callback for writing data.
374 * Will use s3_counter_write_func if NULL is passed in.
375 * @param write_reset_func: the callback for to reset writing data
376 * @param write_data: pointer to pass to C{write_func}
377 * @param progress_func: the callback for progress information
378 * @param progress_data: pointer to pass to C{progress_func}
379 * @param result_handling: instructions for handling the results; see above.
380 * @returns: the result specified by result_handling; details of the response
381 * are then available in C{hdl->last*}
384 perform_request(S3Handle *hdl,
388 const char *subresource,
390 s3_read_func read_func,
391 s3_reset_func read_reset_func,
392 s3_size_func size_func,
393 s3_md5_func md5_func,
395 s3_write_func write_func,
396 s3_reset_func write_reset_func,
398 s3_progress_func progress_func,
399 gpointer progress_data,
400 const result_handling_t *result_handling);
403 * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
404 * call an external function if one was provided.
407 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream);
410 * a function to reset to our internal buffer
413 s3_internal_reset_func(void * stream);
416 * a CURLOPT_HEADERFUNCTION to save the ETag header only.
419 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream);
422 compile_regexes(void);
425 * Static function implementations
427 static s3_error_code_t
428 s3_error_code_from_name(char *s3_error_name)
432 if (!s3_error_name) return S3_ERROR_Unknown;
434 /* do a brute-force search through the list, since it's not sorted */
435 for (i = 0; i < S3_ERROR_END; i++) {
436 if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
440 return S3_ERROR_Unknown;
444 s3_error_name_from_code(s3_error_code_t s3_error_code)
446 if (s3_error_code >= S3_ERROR_END)
447 s3_error_code = S3_ERROR_Unknown;
449 return s3_error_code_names[s3_error_code];
453 s3_curl_supports_ssl(void)
455 static int supported = -1;
456 if (supported == -1) {
457 #if defined(CURL_VERSION_SSL)
458 curl_version_info_data *info = curl_version_info(CURLVERSION_NOW);
459 if (info->features & CURL_VERSION_SSL)
472 s3_curl_throttling_compat(void)
474 /* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
475 #if LIBCURL_VERSION_NUM >= 0x070f05
476 curl_version_info_data *info;
478 /* check the runtime version too */
479 info = curl_version_info(CURLVERSION_NOW);
480 return info->version_num >= 0x070f05;
487 lookup_result(const result_handling_t *result_handling,
489 s3_error_code_t s3_error_code,
492 while (result_handling->response_code
493 || result_handling->s3_error_code
494 || result_handling->curl_code) {
495 if ((result_handling->response_code && result_handling->response_code != response_code)
496 || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
497 || (result_handling->curl_code && result_handling->curl_code != curl_code)) {
502 return result_handling->result;
505 /* return the result for the terminator, as the default */
506 return result_handling->result;
510 is_non_empty_string(const char *str)
512 return str && str[0] != '\0';
516 build_url(const char *bucket,
518 const char *subresource,
520 gboolean use_subdomain,
524 char *esc_bucket = NULL, *esc_key = NULL;
527 url = g_string_new("http");
529 g_string_append(url, "s");
531 g_string_append(url, "://");
534 if (use_subdomain && bucket)
535 g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket);
537 g_string_append(url, "s3.amazonaws.com/");
540 if (!use_subdomain && bucket) {
541 esc_bucket = curl_escape(bucket, 0);
542 if (!esc_bucket) goto cleanup;
543 g_string_append_printf(url, "%s", esc_bucket);
545 g_string_append(url, "/");
549 esc_key = curl_escape(key, 0);
550 if (!esc_key) goto cleanup;
551 g_string_append_printf(url, "%s", esc_key);
555 if (subresource || query)
556 g_string_append(url, "?");
559 g_string_append(url, subresource);
561 if (subresource && query)
562 g_string_append(url, "&");
565 g_string_append(url, query);
568 if (esc_bucket) curl_free(esc_bucket);
569 if (esc_key) curl_free(esc_key);
571 return g_string_free(url, FALSE);
574 static struct curl_slist *
575 authenticate_request(S3Handle *hdl,
579 const char *subresource,
580 const char *md5_hash,
581 gboolean use_subdomain)
588 GByteArray *md = NULL;
589 char *auth_base64 = NULL;
590 struct curl_slist *headers = NULL;
591 char *esc_bucket = NULL, *esc_key = NULL;
592 GString *auth_string = NULL;
595 static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
596 static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
597 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
601 /* Build the string to sign, per the S3 spec.
602 * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
606 auth_string = g_string_new(verb);
607 g_string_append(auth_string, "\n");
609 /* Content-MD5 header */
611 g_string_append(auth_string, md5_hash);
612 g_string_append(auth_string, "\n");
614 /* Content-Type is empty*/
615 g_string_append(auth_string, "\n");
618 /* calculate the date */
621 /* sync clock with amazon s3 */
622 t = t + hdl->time_offset_with_s3;
625 if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
627 if (!gmtime_r(&t, &tmp)) perror("localtime");
631 date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
632 wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
633 tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
635 g_string_append(auth_string, date);
636 g_string_append(auth_string, "\n");
638 /* CanonicalizedAmzHeaders, sorted lexicographically */
639 if (is_non_empty_string(hdl->user_token)) {
640 g_string_append(auth_string, AMAZON_SECURITY_HEADER);
641 g_string_append(auth_string, ":");
642 g_string_append(auth_string, hdl->user_token);
643 g_string_append(auth_string, ",");
644 g_string_append(auth_string, STS_PRODUCT_TOKEN);
645 g_string_append(auth_string, "\n");
648 if (is_non_empty_string(hdl->storage_class)) {
649 g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
650 g_string_append(auth_string, ":");
651 g_string_append(auth_string, hdl->storage_class);
652 g_string_append(auth_string, "\n");
655 /* CanonicalizedResource */
656 g_string_append(auth_string, "/");
659 g_string_append(auth_string, bucket);
661 esc_bucket = curl_escape(bucket, 0);
662 if (!esc_bucket) goto cleanup;
663 g_string_append(auth_string, esc_bucket);
667 if (bucket && (use_subdomain || key))
668 g_string_append(auth_string, "/");
671 esc_key = curl_escape(key, 0);
672 if (!esc_key) goto cleanup;
673 g_string_append(auth_string, esc_key);
677 g_string_append(auth_string, "?");
678 g_string_append(auth_string, subresource);
681 /* run HMAC-SHA1 on the canonicalized string */
682 md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
684 HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), EVP_sha1(), NULL);
685 HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
686 HMAC_Final(&ctx, md->data, &md->len);
687 HMAC_CTX_cleanup(&ctx);
688 auth_base64 = s3_base64_encode(md);
690 /* append the new headers */
691 if (is_non_empty_string(hdl->user_token)) {
692 /* Devpay headers are included in hash. */
693 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token);
694 headers = curl_slist_append(headers, buf);
697 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", STS_PRODUCT_TOKEN);
698 headers = curl_slist_append(headers, buf);
702 if (is_non_empty_string(hdl->storage_class)) {
703 buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s", hdl->storage_class);
704 headers = curl_slist_append(headers, buf);
709 buf = g_strdup_printf("Authorization: AWS %s:%s",
710 hdl->access_key, auth_base64);
711 headers = curl_slist_append(headers, buf);
714 if (md5_hash && '\0' != md5_hash[0]) {
715 buf = g_strdup_printf("Content-MD5: %s", md5_hash);
716 headers = curl_slist_append(headers, buf);
720 buf = g_strdup_printf("Date: %s", date);
721 headers = curl_slist_append(headers, buf);
727 g_byte_array_free(md, TRUE);
729 g_string_free(auth_string, TRUE);
735 interpret_response(S3Handle *hdl,
737 char *curl_error_buffer,
741 const char *content_md5)
743 long response_code = 0;
744 regmatch_t pmatch[2];
745 char *error_name = NULL, *message = NULL;
746 char *body_copy = NULL;
749 if (!hdl) return FALSE;
751 if (hdl->last_message) g_free(hdl->last_message);
752 hdl->last_message = NULL;
754 /* bail out from a CURL error */
755 if (curl_code != CURLE_OK) {
756 hdl->last_curl_code = curl_code;
757 hdl->last_message = g_strdup_printf("CURL error: %s", curl_error_buffer);
761 /* CURL seems to think things were OK, so get its response code */
762 curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
763 hdl->last_response_code = response_code;
765 /* check ETag, if present */
766 if (etag && content_md5 && 200 == response_code) {
767 if (etag && g_ascii_strcasecmp(etag, content_md5))
768 hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
774 if (200 <= response_code && response_code < 400) {
775 /* 2xx and 3xx codes won't have a response body we care about */
776 hdl->last_s3_error_code = S3_ERROR_None;
780 /* Now look at the body to try to get the actual Amazon error message. Rather
781 * than parse out the XML, just use some regexes. */
783 /* impose a reasonable limit on body size */
784 if (body_len > MAX_ERROR_RESPONSE_LEN) {
785 hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
787 } else if (!body || body_len == 0) {
788 hdl->last_message = g_strdup("S3 Error: Unknown (empty response body)");
789 return TRUE; /* perhaps a network error; retry the request */
792 /* use strndup to get a zero-terminated string */
793 body_copy = g_strndup(body, body_len);
794 if (!body_copy) goto cleanup;
796 if (!s3_regexec_wrap(&error_name_regex, body_copy, 2, pmatch, 0))
797 error_name = find_regex_substring(body_copy, pmatch[1]);
799 if (!s3_regexec_wrap(&message_regex, body_copy, 2, pmatch, 0))
800 message = find_regex_substring(body_copy, pmatch[1]);
803 hdl->last_s3_error_code = s3_error_code_from_name(error_name);
807 hdl->last_message = message;
808 message = NULL; /* steal the reference to the string */
819 /* a CURLOPT_READFUNCTION to read data from a buffer. */
821 s3_buffer_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
823 CurlBuffer *data = stream;
824 guint bytes_desired = (guint) size * nmemb;
826 /* check the number of bytes remaining, just to be safe */
827 if (bytes_desired > data->buffer_len - data->buffer_pos)
828 bytes_desired = data->buffer_len - data->buffer_pos;
830 memcpy((char *)ptr, data->buffer + data->buffer_pos, bytes_desired);
831 data->buffer_pos += bytes_desired;
833 return bytes_desired;
837 s3_buffer_size_func(void *stream)
839 CurlBuffer *data = stream;
840 return data->buffer_len;
844 s3_buffer_md5_func(void *stream)
846 CurlBuffer *data = stream;
847 GByteArray req_body_gba = {(guint8 *)data->buffer, data->buffer_len};
849 return s3_compute_md5_hash(&req_body_gba);
853 s3_buffer_reset_func(void *stream)
855 CurlBuffer *data = stream;
856 data->buffer_pos = 0;
859 /* a CURLOPT_WRITEFUNCTION to write data to a buffer. */
861 s3_buffer_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
863 CurlBuffer * data = stream;
864 guint new_bytes = (guint) size * nmemb;
865 guint bytes_needed = data->buffer_pos + new_bytes;
867 /* error out if the new size is greater than the maximum allowed */
868 if (data->max_buffer_size && bytes_needed > data->max_buffer_size)
871 /* reallocate if necessary. We use exponential sizing to make this
872 * happen less often. */
873 if (bytes_needed > data->buffer_len) {
874 guint new_size = MAX(bytes_needed, data->buffer_len * 2);
875 if (data->max_buffer_size) {
876 new_size = MIN(new_size, data->max_buffer_size);
878 data->buffer = g_realloc(data->buffer, new_size);
879 data->buffer_len = new_size;
882 return 0; /* returning zero signals an error to libcurl */
884 /* actually copy the data to the buffer */
885 memcpy(data->buffer + data->buffer_pos, ptr, new_bytes);
886 data->buffer_pos += new_bytes;
888 /* signal success to curl */
892 /* a CURLOPT_READFUNCTION that writes nothing. */
894 s3_empty_read_func(G_GNUC_UNUSED void *ptr, G_GNUC_UNUSED size_t size, G_GNUC_UNUSED size_t nmemb, G_GNUC_UNUSED void * stream)
900 s3_empty_size_func(G_GNUC_UNUSED void *stream)
906 s3_empty_md5_func(G_GNUC_UNUSED void *stream)
908 static const GByteArray empty = {(guint8 *) "", 0};
910 return s3_compute_md5_hash(&empty);
913 /* a CURLOPT_WRITEFUNCTION to write data that just counts data.
914 * s3_write_data should be NULL or a pointer to an gint64.
917 s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
919 gint64 *count = (gint64*) stream, inc = nmemb*size;
921 if (count) *count += inc;
926 s3_counter_reset_func(void *stream)
928 gint64 *count = (gint64*) stream;
930 if (count) *count = 0;
934 /* a CURLOPT_READFUNCTION to read data from a file. */
936 s3_file_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
938 HANDLE *hFile = (HANDLE *) stream;
941 ReadFile(hFile, ptr, (DWORD) size*nmemb, &bytes_read, NULL);
946 s3_file_size_func(void *stream)
948 HANDLE *hFile = (HANDLE *) stream;
949 DWORD size = GetFileSize(hFile, NULL);
951 if (INVALID_FILE_SIZE == size) {
959 s3_file_md5_func(void *stream)
961 #define S3_MD5_BUF_SIZE (10*1024)
962 HANDLE *hFile = (HANDLE *) stream;
963 guint8 buf[S3_MD5_BUF_SIZE];
966 GByteArray *ret = NULL;
968 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
970 ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
971 g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
974 while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
975 MD5_Update(&md5_ctx, buf, bytes_read);
977 MD5_Final(ret->data, &md5_ctx);
979 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
981 #undef S3_MD5_BUF_SIZE
985 s3_file_reset_func(void *stream)
987 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
990 /* a CURLOPT_WRITEFUNCTION to write data to a file. */
992 s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
994 HANDLE *hFile = (HANDLE *) stream;
997 WriteFile(hFile, ptr, (DWORD) size*nmemb, &bytes_written, NULL);
998 return bytes_written;
1003 curl_debug_message(CURL *curl G_GNUC_UNUSED,
1007 void *unused G_GNUC_UNUSED)
1011 char **lines, **line;
1018 case CURLINFO_HEADER_IN:
1019 lineprefix="Hdr In: ";
1022 case CURLINFO_HEADER_OUT:
1023 lineprefix="Hdr Out: ";
1027 /* ignore data in/out -- nobody wants to see that in the
1032 /* split the input into lines */
1033 message = g_strndup(s, (gsize) len);
1034 lines = g_strsplit(message, "\n", -1);
1037 for (line = lines; *line; line++) {
1038 if (**line == '\0') continue; /* skip blank lines */
1039 g_debug("%s%s", lineprefix, *line);
1047 perform_request(S3Handle *hdl,
1051 const char *subresource,
1053 s3_read_func read_func,
1054 s3_reset_func read_reset_func,
1055 s3_size_func size_func,
1056 s3_md5_func md5_func,
1058 s3_write_func write_func,
1059 s3_reset_func write_reset_func,
1060 gpointer write_data,
1061 s3_progress_func progress_func,
1062 gpointer progress_data,
1063 const result_handling_t *result_handling)
1065 gboolean use_subdomain;
1067 s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
1068 CURLcode curl_code = CURLE_OK;
1069 char curl_error_buffer[CURL_ERROR_SIZE] = "";
1070 struct curl_slist *headers = NULL;
1071 /* Set S3Internal Data */
1072 S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
1073 gboolean should_retry;
1075 gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
1076 /* corresponds to PUT, HEAD, GET, and POST */
1077 int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
1078 /* do we want to examine the headers */
1079 const char *curlopt_customrequest = NULL;
1080 /* for MD5 calculation */
1081 GByteArray *md5_hash = NULL;
1082 gchar *md5_hash_hex = NULL, *md5_hash_b64 = NULL;
1083 size_t request_body_size = 0;
1085 g_assert(hdl != NULL && hdl->curl != NULL);
1089 use_subdomain = is_non_empty_string(hdl->bucket_location);
1090 url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl);
1091 if (!url) goto cleanup;
1093 /* libcurl may behave strangely if these are not set correctly */
1094 if (!strncmp(verb, "PUT", 4)) {
1096 } else if (!strncmp(verb, "GET", 4)) {
1097 curlopt_httpget = 1;
1098 } else if (!strncmp(verb, "POST", 5)) {
1100 } else if (!strncmp(verb, "HEAD", 5)) {
1103 curlopt_customrequest = verb;
1107 request_body_size = size_func(read_data);
1111 md5_hash = md5_func(read_data);
1113 md5_hash_b64 = s3_base64_encode(md5_hash);
1114 md5_hash_hex = s3_hex_encode(md5_hash);
1115 g_byte_array_free(md5_hash, TRUE);
1119 /* Curl will use fread() otherwise */
1120 read_func = s3_empty_read_func;
1124 int_writedata.write_func = write_func;
1125 int_writedata.reset_func = write_reset_func;
1126 int_writedata.write_data = write_data;
1128 /* Curl will use fwrite() otherwise */
1129 int_writedata.write_func = s3_counter_write_func;
1130 int_writedata.reset_func = s3_counter_reset_func;
1131 int_writedata.write_data = NULL;
1137 curl_slist_free_all(headers);
1139 curl_error_buffer[0] = '\0';
1140 if (read_reset_func) {
1141 read_reset_func(read_data);
1143 /* calls write_reset_func */
1144 s3_internal_reset_func(&int_writedata);
1146 /* set up the request */
1147 headers = authenticate_request(hdl, verb, bucket, key, subresource,
1148 md5_hash_b64, is_non_empty_string(hdl->bucket_location));
1150 if (hdl->use_ssl && hdl->ca_info) {
1151 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
1155 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
1158 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
1159 curl_debug_message)))
1162 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_ERRORBUFFER,
1163 curl_error_buffer)))
1165 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS, 1)))
1167 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1)))
1169 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_URL, url)))
1171 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
1174 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
1176 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
1178 /* Note: we always have to set this apparently, for consistent "end of header" detection */
1179 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
1181 /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
1182 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
1184 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
1186 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
1189 /* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
1190 #if LIBCURL_VERSION_NUM >= 0x070b00
1191 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
1194 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
1197 /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
1198 #if LIBCURL_VERSION_NUM >= 0x070f05
1199 if (s3_curl_throttling_compat()) {
1200 if (hdl->max_send_speed)
1201 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
1204 if (hdl->max_recv_speed)
1205 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
1210 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
1212 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_UPLOAD, curlopt_upload)))
1214 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POST, curlopt_post)))
1216 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOBODY, curlopt_nobody)))
1218 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CUSTOMREQUEST,
1219 curlopt_customrequest)))
1223 if (curlopt_upload) {
1224 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
1226 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
1229 /* Clear request_body options. */
1230 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
1233 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
1238 /* Perform the request */
1239 curl_code = curl_easy_perform(hdl->curl);
1242 /* interpret the response into hdl->last* */
1243 curl_error: /* (label for short-circuiting the curl_easy_perform call) */
1244 should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
1245 int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
1247 /* and, unless we know we need to retry, see what we're to do now */
1248 if (!should_retry) {
1249 result = lookup_result(result_handling, hdl->last_response_code,
1250 hdl->last_s3_error_code, hdl->last_curl_code);
1252 /* break out of the while(1) unless we're retrying */
1253 if (result != S3_RESULT_RETRY)
1257 if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
1258 /* we're out of retries, so annotate hdl->last_message appropriately and bail
1260 char *m = g_strdup_printf("Too many retries; last message was '%s'", hdl->last_message);
1261 if (hdl->last_message) g_free(hdl->last_message);
1262 hdl->last_message = m;
1263 result = S3_RESULT_FAIL;
1269 backoff *= EXPONENTIAL_BACKOFF_BASE;
1272 if (result != S3_RESULT_OK) {
1273 g_debug(_("%s %s failed with %d/%s"), verb, url,
1274 hdl->last_response_code,
1275 s3_error_name_from_code(hdl->last_s3_error_code));
1280 if (headers) curl_slist_free_all(headers);
1281 g_free(md5_hash_b64);
1282 g_free(md5_hash_hex);
1284 /* we don't deallocate the response body -- we keep it for later */
1285 hdl->last_response_body = int_writedata.resp_buf.buffer;
1286 hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
1287 hdl->last_num_retries = retries;
1294 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream)
1296 S3InternalData *data = (S3InternalData *) stream;
1299 if (!data->headers_done)
1302 /* call write on internal buffer (if not full) */
1303 if (data->int_write_done) {
1306 bytes_saved = s3_buffer_write_func(ptr, size, nmemb, &data->resp_buf);
1308 data->int_write_done = TRUE;
1311 /* call write on user buffer */
1312 if (data->write_func) {
1313 return data->write_func(ptr, size, nmemb, data->write_data);
1320 s3_internal_reset_func(void * stream)
1322 S3InternalData *data = (S3InternalData *) stream;
1324 s3_buffer_reset_func(&data->resp_buf);
1325 data->headers_done = FALSE;
1326 data->int_write_done = FALSE;
1328 if (data->reset_func) {
1329 data->reset_func(data->write_data);
1334 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
1336 static const char *final_header = "\r\n";
1337 time_t remote_time_in_sec,local_time;
1339 regmatch_t pmatch[2];
1340 S3InternalData *data = (S3InternalData *) stream;
1342 header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
1344 if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
1345 data->etag = find_regex_substring(header, pmatch[1]);
1346 if (!strcmp(final_header, header))
1347 data->headers_done = TRUE;
1349 /* If date header is found */
1350 if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
1351 char *date = find_regex_substring(header, pmatch[1]);
1353 /* Remote time is always in GMT: RFC 2616 */
1354 /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
1355 if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
1356 g_debug("Error: Conversion of remote time to seconds failed.");
1357 data->hdl->time_offset_with_s3 = 0;
1359 local_time = time(NULL);
1361 data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
1363 if (data->hdl->verbose)
1364 g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
1375 compile_regexes(void)
1379 /* using POSIX regular expressions */
1380 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
1381 {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
1382 {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
1383 {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
1384 {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
1385 {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
1386 {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
1389 char regmessage[1024];
1393 for (i = 0; regexes[i].str; i++) {
1394 reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
1395 if (reg_result != 0) {
1396 size = regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
1397 g_error(_("Regex error: %s"), regmessage);
1401 #else /* ! HAVE_REGEX_H */
1402 /* using PCRE via GLib */
1403 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
1404 {"<Code>\\s*([^<]*)\\s*</Code>",
1405 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1407 {"^ETag:\\s*\"([^\"]+)\"\\s*$",
1408 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1410 {"<Message>\\s*([^<]*)\\s*</Message>",
1411 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1413 {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$",
1414 G_REGEX_OPTIMIZE | G_REGEX_NO_AUTO_CAPTURE,
1416 {"(/>)|(>([^<]*)</LocationConstraint>)",
1418 &location_con_regex},
1420 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1427 for (i = 0; regexes[i].str; i++) {
1428 *(regexes[i].regex) = g_regex_new(regexes[i].str, regexes[i].flags, 0, &err);
1430 g_error(_("Regex error: %s"), err->message);
1440 * Public function implementations
1443 gboolean s3_init(void)
1445 static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
1446 static gboolean init = FALSE, ret;
1448 /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
1450 g_static_mutex_lock (&mutex);
1452 ret = compile_regexes();
1455 g_static_mutex_unlock(&mutex);
1460 s3_curl_location_compat(void)
1462 curl_version_info_data *info;
1464 info = curl_version_info(CURLVERSION_NOW);
1465 return info->version_num > 0x070a02;
1469 s3_bucket_location_compat(const char *bucket)
1471 return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
1475 s3_open(const char *access_key,
1476 const char *secret_key,
1477 const char *user_token,
1478 const char *bucket_location,
1479 const char *storage_class,
1484 hdl = g_new0(S3Handle, 1);
1485 if (!hdl) goto error;
1487 hdl->verbose = FALSE;
1488 hdl->use_ssl = s3_curl_supports_ssl();
1490 g_assert(access_key);
1491 hdl->access_key = g_strdup(access_key);
1492 g_assert(secret_key);
1493 hdl->secret_key = g_strdup(secret_key);
1495 hdl->user_token = g_strdup(user_token);
1498 hdl->bucket_location = g_strdup(bucket_location);
1501 hdl->storage_class = g_strdup(storage_class);
1504 hdl->ca_info = g_strdup(ca_info);
1506 hdl->curl = curl_easy_init();
1507 if (!hdl->curl) goto error;
1517 s3_free(S3Handle *hdl)
1522 g_free(hdl->access_key);
1523 g_free(hdl->secret_key);
1524 if (hdl->user_token) g_free(hdl->user_token);
1525 if (hdl->bucket_location) g_free(hdl->bucket_location);
1526 if (hdl->storage_class) g_free(hdl->storage_class);
1527 if (hdl->curl) curl_easy_cleanup(hdl->curl);
1534 s3_reset(S3Handle *hdl)
1537 /* We don't call curl_easy_reset here, because doing that in curl
1538 * < 7.16 blanks the default CA certificate path, and there's no way
1539 * to get it back. */
1540 if (hdl->last_message) {
1541 g_free(hdl->last_message);
1542 hdl->last_message = NULL;
1545 hdl->last_response_code = 0;
1546 hdl->last_curl_code = 0;
1547 hdl->last_s3_error_code = 0;
1548 hdl->last_num_retries = 0;
1550 if (hdl->last_response_body) {
1551 g_free(hdl->last_response_body);
1552 hdl->last_response_body = NULL;
1555 hdl->last_response_body_size = 0;
1560 s3_error(S3Handle *hdl,
1561 const char **message,
1562 guint *response_code,
1563 s3_error_code_t *s3_error_code,
1564 const char **s3_error_name,
1565 CURLcode *curl_code,
1569 if (message) *message = hdl->last_message;
1570 if (response_code) *response_code = hdl->last_response_code;
1571 if (s3_error_code) *s3_error_code = hdl->last_s3_error_code;
1572 if (s3_error_name) *s3_error_name = s3_error_name_from_code(hdl->last_s3_error_code);
1573 if (curl_code) *curl_code = hdl->last_curl_code;
1574 if (num_retries) *num_retries = hdl->last_num_retries;
1576 /* no hdl? return something coherent, anyway */
1577 if (message) *message = "NULL S3Handle";
1578 if (response_code) *response_code = 0;
1579 if (s3_error_code) *s3_error_code = 0;
1580 if (s3_error_name) *s3_error_name = NULL;
1581 if (curl_code) *curl_code = 0;
1582 if (num_retries) *num_retries = 0;
1587 s3_verbose(S3Handle *hdl, gboolean verbose)
1589 hdl->verbose = verbose;
1593 s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
1595 if (!s3_curl_throttling_compat())
1598 hdl->max_send_speed = max_send_speed;
1604 s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
1606 if (!s3_curl_throttling_compat())
1609 hdl->max_recv_speed = max_recv_speed;
1615 s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
1617 gboolean ret = TRUE;
1618 if (use_ssl & !s3_curl_supports_ssl()) {
1621 hdl->use_ssl = use_ssl;
1627 s3_strerror(S3Handle *hdl)
1629 const char *message;
1630 guint response_code;
1631 const char *s3_error_name;
1635 char s3_info[256] = "";
1636 char response_info[16] = "";
1637 char curl_info[32] = "";
1638 char retries_info[32] = "";
1640 s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
1643 message = "Unknown S3 error";
1645 g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
1647 g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
1649 g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
1651 g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
1653 return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
1656 /* Perform an upload. When this function returns, KEY and
1657 * BUFFER remain the responsibility of the caller.
1659 * @param self: the s3 device
1660 * @param bucket: the bucket to which the upload should be made
1661 * @param key: the key to which the upload should be made
1662 * @param buffer: the data to be uploaded
1663 * @param buffer_len: the length of the data to upload
1664 * @returns: false if an error ocurred
1667 s3_upload(S3Handle *hdl,
1670 s3_read_func read_func,
1671 s3_reset_func reset_func,
1672 s3_size_func size_func,
1673 s3_md5_func md5_func,
1675 s3_progress_func progress_func,
1676 gpointer progress_data)
1678 s3_result_t result = S3_RESULT_FAIL;
1679 static result_handling_t result_handling[] = {
1680 { 200, 0, 0, S3_RESULT_OK },
1681 RESULT_HANDLING_ALWAYS_RETRY,
1682 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1685 g_assert(hdl != NULL);
1687 result = perform_request(hdl, "PUT", bucket, key, NULL, NULL,
1688 read_func, reset_func, size_func, md5_func, read_data,
1689 NULL, NULL, NULL, progress_func, progress_data,
1692 return result == S3_RESULT_OK;
1696 /* Private structure for our "thunk", which tracks where the user is in the list
1698 struct list_keys_thunk {
1699 GSList *filename_list; /* all pending filenames */
1701 gboolean in_contents; /* look for "key" entities in here */
1702 gboolean in_common_prefixes; /* look for "prefix" entities in here */
1704 gboolean is_truncated;
1713 /* Functions for a SAX parser to parse the XML from Amazon */
1716 list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
1717 const gchar *element_name,
1718 const gchar **attribute_names G_GNUC_UNUSED,
1719 const gchar **attribute_values G_GNUC_UNUSED,
1721 GError **error G_GNUC_UNUSED)
1723 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
1725 thunk->want_text = 0;
1726 if (g_ascii_strcasecmp(element_name, "contents") == 0) {
1727 thunk->in_contents = 1;
1728 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
1729 thunk->in_common_prefixes = 1;
1730 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
1731 thunk->want_text = 1;
1732 } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
1733 thunk->want_text = 1;
1734 } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
1735 thunk->want_text = 1;
1736 } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
1737 thunk->want_text = 1;
1742 list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
1743 const gchar *element_name,
1745 GError **error G_GNUC_UNUSED)
1747 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
1749 if (g_ascii_strcasecmp(element_name, "contents") == 0) {
1750 thunk->in_contents = 0;
1751 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
1752 thunk->in_common_prefixes = 0;
1753 } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
1754 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
1756 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
1757 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
1759 } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
1760 if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
1761 thunk->is_truncated = TRUE;
1762 } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
1763 if (thunk->next_marker) g_free(thunk->next_marker);
1764 thunk->next_marker = thunk->text;
1770 list_text(GMarkupParseContext *context G_GNUC_UNUSED,
1774 GError **error G_GNUC_UNUSED)
1776 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
1778 if (thunk->want_text) {
1779 if (thunk->text) g_free(thunk->text);
1780 thunk->text = g_strndup(text, text_len);
1784 /* Perform a fetch from S3; several fetches may be involved in a
1785 * single listing operation */
1787 list_fetch(S3Handle *hdl,
1790 const char *delimiter,
1792 const char *max_keys,
1795 s3_result_t result = S3_RESULT_FAIL;
1796 static result_handling_t result_handling[] = {
1797 { 200, 0, 0, S3_RESULT_OK },
1798 RESULT_HANDLING_ALWAYS_RETRY,
1799 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1801 const char* pos_parts[][2] = {
1803 {"delimiter", delimiter},
1805 {"max-keys", max_keys},
1811 gboolean have_prev_part = FALSE;
1813 /* loop over possible parts to build query string */
1814 query = g_string_new("");
1815 for (i = 0; pos_parts[i][0]; i++) {
1816 if (pos_parts[i][1]) {
1818 g_string_append(query, "&");
1820 have_prev_part = TRUE;
1821 esc_value = curl_escape(pos_parts[i][1], 0);
1822 g_string_append_printf(query, "%s=%s", pos_parts[i][0], esc_value);
1823 curl_free(esc_value);
1827 /* and perform the request on that URI */
1828 result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str,
1829 NULL, NULL, NULL, NULL, NULL,
1830 S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
1833 if (query) g_string_free(query, TRUE);
1839 s3_list_keys(S3Handle *hdl,
1842 const char *delimiter,
1846 * max len of XML variables:
1847 * bucket: 255 bytes (p12 API Version 2006-03-01)
1848 * key: 1024 bytes (p15 API Version 2006-03-01)
1849 * size per key: 5GB bytes (p6 API Version 2006-03-01)
1850 * size of size 10 bytes (i.e. 10 decimal digits)
1851 * etag: 44 (observed+assumed)
1852 * owner ID: 64 (observed+assumed)
1853 * owner DisplayName: 255 (assumed)
1854 * StorageClass: const (p18 API Version 2006-03-01)
1856 static const guint MAX_RESPONSE_LEN = 1000*2000;
1857 static const char *MAX_KEYS = "1000";
1858 struct list_keys_thunk thunk;
1859 GMarkupParseContext *ctxt = NULL;
1860 static GMarkupParser parser = { list_start_element, list_end_element, list_text, NULL, NULL };
1862 s3_result_t result = S3_RESULT_FAIL;
1863 CurlBuffer buf = {NULL, 0, 0, MAX_RESPONSE_LEN};
1867 thunk.filename_list = NULL;
1869 thunk.next_marker = NULL;
1871 /* Loop until S3 has given us the entire picture */
1873 s3_buffer_reset_func(&buf);
1874 /* get some data from S3 */
1875 result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
1876 if (result != S3_RESULT_OK) goto cleanup;
1878 /* run the parser over it */
1879 thunk.in_contents = FALSE;
1880 thunk.in_common_prefixes = FALSE;
1881 thunk.is_truncated = FALSE;
1882 thunk.want_text = FALSE;
1884 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
1886 if (!g_markup_parse_context_parse(ctxt, buf.buffer, buf.buffer_pos, &err)) {
1887 if (hdl->last_message) g_free(hdl->last_message);
1888 hdl->last_message = g_strdup(err->message);
1889 result = S3_RESULT_FAIL;
1893 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
1894 if (hdl->last_message) g_free(hdl->last_message);
1895 hdl->last_message = g_strdup(err->message);
1896 result = S3_RESULT_FAIL;
1900 g_markup_parse_context_free(ctxt);
1902 } while (thunk.next_marker);
1905 if (err) g_error_free(err);
1906 if (thunk.text) g_free(thunk.text);
1907 if (thunk.next_marker) g_free(thunk.next_marker);
1908 if (ctxt) g_markup_parse_context_free(ctxt);
1909 if (buf.buffer) g_free(buf.buffer);
1911 if (result != S3_RESULT_OK) {
1912 g_slist_free(thunk.filename_list);
1915 *list = thunk.filename_list;
1921 s3_read(S3Handle *hdl,
1924 s3_write_func write_func,
1925 s3_reset_func reset_func,
1926 gpointer write_data,
1927 s3_progress_func progress_func,
1928 gpointer progress_data)
1930 s3_result_t result = S3_RESULT_FAIL;
1931 static result_handling_t result_handling[] = {
1932 { 200, 0, 0, S3_RESULT_OK },
1933 RESULT_HANDLING_ALWAYS_RETRY,
1934 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1937 g_assert(hdl != NULL);
1938 g_assert(write_func != NULL);
1940 result = perform_request(hdl, "GET", bucket, key, NULL, NULL,
1941 NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
1942 progress_func, progress_data, result_handling);
1944 return result == S3_RESULT_OK;
1948 s3_delete(S3Handle *hdl,
1952 s3_result_t result = S3_RESULT_FAIL;
1953 static result_handling_t result_handling[] = {
1954 { 204, 0, 0, S3_RESULT_OK },
1955 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
1956 RESULT_HANDLING_ALWAYS_RETRY,
1957 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1960 g_assert(hdl != NULL);
1962 result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL,
1963 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1966 return result == S3_RESULT_OK;
1970 s3_make_bucket(S3Handle *hdl,
1974 s3_result_t result = S3_RESULT_FAIL;
1975 static result_handling_t result_handling[] = {
1976 { 200, 0, 0, S3_RESULT_OK },
1977 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
1978 RESULT_HANDLING_ALWAYS_RETRY,
1979 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1981 regmatch_t pmatch[4];
1982 char *loc_end_open, *loc_content;
1983 CurlBuffer buf = {NULL, 0, 0, 0}, *ptr = NULL;
1984 s3_read_func read_func = NULL;
1985 s3_reset_func reset_func = NULL;
1986 s3_md5_func md5_func = NULL;
1987 s3_size_func size_func = NULL;
1989 g_assert(hdl != NULL);
1991 if (is_non_empty_string(hdl->bucket_location) &&
1992 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
1993 if (s3_bucket_location_compat(bucket)) {
1995 buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location);
1996 buf.buffer_len = (guint) strlen(buf.buffer);
1998 buf.max_buffer_size = buf.buffer_len;
1999 read_func = s3_buffer_read_func;
2000 reset_func = s3_buffer_reset_func;
2001 size_func = s3_buffer_size_func;
2002 md5_func = s3_buffer_md5_func;
2004 hdl->last_message = g_strdup_printf(_(
2005 "Location constraint given for Amazon S3 bucket, "
2006 "but the bucket name (%s) is not usable as a subdomain."), bucket);
2011 result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL,
2012 read_func, reset_func, size_func, md5_func, ptr,
2013 NULL, NULL, NULL, NULL, NULL, result_handling);
2015 if (result == S3_RESULT_OK ||
2016 (is_non_empty_string(hdl->bucket_location) && result != S3_RESULT_OK
2017 && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
2018 /* verify the that the location constraint on the existing bucket matches
2019 * the one that's configured.
2021 result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
2022 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2023 NULL, NULL, result_handling);
2025 /* note that we can check only one of the three AND conditions above
2026 * and infer that the others are true
2028 if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
2029 /* return to the default state of failure */
2030 result = S3_RESULT_FAIL;
2032 if (body) g_free(body);
2033 /* use strndup to get a null-terminated string */
2034 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
2036 hdl->last_message = g_strdup(_("No body received for location request"));
2038 } else if ('\0' == body[0]) {
2039 hdl->last_message = g_strdup(_("Empty body received for location request"));
2043 if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
2044 loc_end_open = find_regex_substring(body, pmatch[1]);
2045 loc_content = find_regex_substring(body, pmatch[3]);
2047 /* The case of an empty string is special because XML allows
2048 * "self-closing" tags
2050 if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
2051 '/' != loc_end_open[0])
2052 hdl->last_message = g_strdup(_("A wildcard location constraint is "
2053 "configured, but the bucket has a non-empty location constraint"));
2054 else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
2055 strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
2056 ('\0' != loc_content[0]))
2057 hdl->last_message = g_strdup(_("The location constraint configured "
2058 "does not match the constraint currently on the bucket"));
2060 result = S3_RESULT_OK;
2062 hdl->last_message = g_strdup(_("Unexpected location response from Amazon S3"));
2068 if (body) g_free(body);
2070 return result == S3_RESULT_OK;
2075 s3_delete_bucket(S3Handle *hdl,
2078 return s3_delete(hdl, bucket, NULL);