2 * Copyright (c) 2008, 2009, 2010 Zmanda, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
18 * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
22 * - collect speed statistics
27 /* use a relative path here to avoid conflicting with Perl's config.h. */
28 #include "../config/config.h"
36 #ifdef HAVE_SYS_TYPES_H
37 #include <sys/types.h>
39 #ifdef HAVE_SYS_STAT_H
58 #include <curl/curl.h>
60 /* Constant renamed after version 7.10.7 */
61 #ifndef CURLINFO_RESPONSE_CODE
62 #define CURLINFO_RESPONSE_CODE CURLINFO_HTTP_CODE
65 /* We don't need OpenSSL's kerberos support, and it's broken in
67 #define OPENSSL_NO_KRB5
69 #ifdef HAVE_OPENSSL_HMAC_H
70 # include <openssl/hmac.h>
72 # ifdef HAVE_CRYPTO_HMAC_H
73 # include <crypto/hmac.h>
81 #include <openssl/err.h>
82 #include <openssl/ssl.h>
83 #include <openssl/md5.h>
85 /* Maximum key length as specified in the S3 documentation
86 * (*excluding* null terminator) */
87 #define S3_MAX_KEY_LENGTH 1024
89 #define AMAZON_SECURITY_HEADER "x-amz-security-token"
90 #define AMAZON_BUCKET_CONF_TEMPLATE "\
91 <CreateBucketConfiguration%s>\n\
92 <LocationConstraint>%s</LocationConstraint>\n\
93 </CreateBucketConfiguration>"
95 #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
97 #define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption"
99 #define AMAZON_WILDCARD_LOCATION "*"
101 /* parameters for exponential backoff in the face of retriable errors */
104 #define EXPONENTIAL_BACKOFF_START_USEC G_USEC_PER_SEC/100
105 /* double at each retry */
106 #define EXPONENTIAL_BACKOFF_BASE 2
107 /* retry 14 times (for a total of about 3 minutes spent waiting) */
108 #define EXPONENTIAL_BACKOFF_MAX_RETRIES 14
110 /* general "reasonable size" parameters */
111 #define MAX_ERROR_RESPONSE_LEN (100*1024)
113 /* Results which should always be retried */
114 #define RESULT_HANDLING_ALWAYS_RETRY \
115 { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
116 { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \
117 { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
118 { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
119 { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
120 { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
121 { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
122 { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
123 { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
124 { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
125 { 0, 0, CURLE_SSL_CONNECT_ERROR, S3_RESULT_RETRY }, \
126 { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
127 { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
128 { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
131 * Data structures and associated functions
135 /* (all strings in this struct are freed by s3_free()) */
140 char *swift_account_id;
141 char *swift_access_key;
143 /* attributes for new objects */
144 char *bucket_location;
146 char *server_side_encryption;
149 gboolean use_subdomain;
150 gboolean openstack_swift_api;
160 guint64 max_send_speed;
161 guint64 max_recv_speed;
163 /* information from the last request */
165 guint last_response_code;
166 s3_error_code_t last_s3_error_code;
167 CURLcode last_curl_code;
168 guint last_num_retries;
169 void *last_response_body;
170 guint last_response_body_size;
173 time_t time_offset_with_s3;
178 s3_write_func write_func;
179 s3_reset_func reset_func;
182 gboolean headers_done;
183 gboolean int_write_done;
185 /* Points to current handle: Added to get hold of s3 offset */
186 struct S3Handle *hdl;
189 /* Callback function to examine headers one-at-a-time
191 * @note this is the same as CURLOPT_HEADERFUNCTION
193 * @param data: The pointer to read data from
194 * @param size: The size of each "element" of the data buffer in bytes
195 * @param nmemb: The number of elements in the data buffer.
196 * So, the buffer's size is size*nmemb bytes.
197 * @param stream: the header_data (an opaque pointer)
199 * @return The number of bytes written to the buffer or
200 * CURL_WRITEFUNC_PAUSE to pause.
201 * If it's the number of bytes written, it should match the buffer size
203 typedef size_t (*s3_header_func)(void *data, size_t size, size_t nmemb, void *stream);
209 /* (see preprocessor magic in s3.h) */
211 static char * s3_error_code_names[] = {
212 #define S3_ERROR(NAME) #NAME
217 /* Convert an s3 error name to an error code. This function
218 * matches strings case-insensitively, and is appropriate for use
219 * on data from the network.
221 * @param s3_error_code: the error name
222 * @returns: the error code (see constants in s3.h)
224 static s3_error_code_t
225 s3_error_code_from_name(char *s3_error_name);
227 /* Convert an s3 error code to a string
229 * @param s3_error_code: the error code to convert
230 * @returns: statically allocated string
233 s3_error_name_from_code(s3_error_code_t s3_error_code);
239 /* result handling is specified by a static array of result_handling structs,
240 * which match based on response_code (from HTTP) and S3 error code. The result
241 * given for the first match is used. 0 acts as a wildcard for both response_code
242 * and s3_error_code. The list is terminated with a struct containing 0 for both
243 * response_code and s3_error_code; the result for that struct is the default
246 * See RESULT_HANDLING_ALWAYS_RETRY for an example.
249 S3_RESULT_RETRY = -1,
254 typedef struct result_handling {
256 s3_error_code_t s3_error_code;
261 /* Lookup a result in C{result_handling}.
263 * @param result_handling: array of handling specifications
264 * @param response_code: response code from operation
265 * @param s3_error_code: s3 error code from operation, if any
266 * @param curl_code: the CURL error, if any
267 * @returns: the matching result
270 lookup_result(const result_handling_t *result_handling,
272 s3_error_code_t s3_error_code,
276 * Precompiled regular expressions */
277 static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
278 location_con_regex, date_sync_regex, x_auth_token_regex,
286 /* Check if a string is non-empty
288 * @param str: string to check
289 * @returns: true iff str is non-NULL and not "\0"
291 static gboolean is_non_empty_string(const char *str);
293 /* Construct the URL for an Amazon S3 REST request.
295 * A new string is allocated and returned; it is the responsiblity of the caller.
297 * @param hdl: the S3Handle object
298 * @param service_path: A path to add in the URL, or NULL for none.
299 * @param bucket: the bucket being accessed, or NULL for none
300 * @param key: the key being accessed, or NULL for none
301 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
302 * @param query: the query being accessed (e.g. "acl"), or NULL for none
304 * !use_subdomain: http://host/service_path/bucket/key
305 * use_subdomain : http://bucket.host/service_path/key
313 const char *subresource,
316 /* Create proper authorization headers for an Amazon S3 REST
317 * request to C{headers}.
319 * @note: C{X-Amz} headers (in C{headers}) must
321 * - be in alphabetical order
322 * - have no spaces around the colon
323 * (don't yell at me -- see the Amazon Developer Guide)
325 * @param hdl: the S3Handle object
326 * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
327 * @param bucket: the bucket being accessed, or NULL for none
328 * @param key: the key being accessed, or NULL for none
329 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
330 * @param md5_hash: the MD5 hash of the request body, or NULL for none
332 static struct curl_slist *
333 authenticate_request(S3Handle *hdl,
337 const char *subresource,
338 const char *md5_hash);
342 /* Interpret the response to an S3 operation, assuming CURL completed its request
343 * successfully. This function fills in the relevant C{hdl->last*} members.
345 * @param hdl: The S3Handle object
346 * @param body: the response body
347 * @param body_len: the length of the response body
348 * @param etag: The response's ETag header
349 * @param content_md5: The hex-encoded MD5 hash of the request body,
350 * which will be checked against the response's ETag header.
351 * If NULL, the header is not checked.
352 * If non-NULL, then the body should have the response headers at its beginnning.
353 * @returns: TRUE if the response should be retried (e.g., network error)
356 interpret_response(S3Handle *hdl,
358 char *curl_error_buffer,
362 const char *content_md5);
364 /* Perform an S3 operation. This function handles all of the details
365 * of retryig requests and so on.
367 * The concepts of bucket and keys are defined by the Amazon S3 API.
368 * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
370 * Individual sub-resources are defined in several places. In the REST API,
371 * they they are represented by a "flag" in the "query string".
372 * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
374 * @param hdl: the S3Handle object
375 * @param verb: the HTTP request method
376 * @param bucket: the bucket to access, or NULL for none
377 * @param key: the key to access, or NULL for none
378 * @param subresource: the "sub-resource" to request (e.g. "acl") or NULL for none
379 * @param query: the query string to send (not including th initial '?'),
381 * @param read_func: the callback for reading data
382 * Will use s3_empty_read_func if NULL is passed in.
383 * @param read_reset_func: the callback for to reset reading data
384 * @param size_func: the callback to get the number of bytes to upload
385 * @param md5_func: the callback to get the MD5 hash of the data to upload
386 * @param read_data: pointer to pass to the above functions
387 * @param write_func: the callback for writing data.
388 * Will use s3_counter_write_func if NULL is passed in.
389 * @param write_reset_func: the callback for to reset writing data
390 * @param write_data: pointer to pass to C{write_func}
391 * @param progress_func: the callback for progress information
392 * @param progress_data: pointer to pass to C{progress_func}
393 * @param result_handling: instructions for handling the results; see above.
394 * @returns: the result specified by result_handling; details of the response
395 * are then available in C{hdl->last*}
398 perform_request(S3Handle *hdl,
402 const char *subresource,
404 s3_read_func read_func,
405 s3_reset_func read_reset_func,
406 s3_size_func size_func,
407 s3_md5_func md5_func,
409 s3_write_func write_func,
410 s3_reset_func write_reset_func,
412 s3_progress_func progress_func,
413 gpointer progress_data,
414 const result_handling_t *result_handling);
417 * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
418 * call an external function if one was provided.
421 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream);
424 * a function to reset to our internal buffer
427 s3_internal_reset_func(void * stream);
430 * a CURLOPT_HEADERFUNCTION to save the ETag header only.
433 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream);
436 compile_regexes(void);
439 * Static function implementations
441 static s3_error_code_t
442 s3_error_code_from_name(char *s3_error_name)
446 if (!s3_error_name) return S3_ERROR_Unknown;
448 /* do a brute-force search through the list, since it's not sorted */
449 for (i = 0; i < S3_ERROR_END; i++) {
450 if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
454 return S3_ERROR_Unknown;
458 s3_error_name_from_code(s3_error_code_t s3_error_code)
460 if (s3_error_code >= S3_ERROR_END)
461 s3_error_code = S3_ERROR_Unknown;
463 return s3_error_code_names[s3_error_code];
467 s3_curl_supports_ssl(void)
469 static int supported = -1;
470 if (supported == -1) {
471 #if defined(CURL_VERSION_SSL)
472 curl_version_info_data *info = curl_version_info(CURLVERSION_NOW);
473 if (info->features & CURL_VERSION_SSL)
486 s3_curl_throttling_compat(void)
488 /* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
489 #if LIBCURL_VERSION_NUM >= 0x070f05
490 curl_version_info_data *info;
492 /* check the runtime version too */
493 info = curl_version_info(CURLVERSION_NOW);
494 return info->version_num >= 0x070f05;
501 lookup_result(const result_handling_t *result_handling,
503 s3_error_code_t s3_error_code,
506 while (result_handling->response_code
507 || result_handling->s3_error_code
508 || result_handling->curl_code) {
509 if ((result_handling->response_code && result_handling->response_code != response_code)
510 || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
511 || (result_handling->curl_code && result_handling->curl_code != curl_code)) {
516 return result_handling->result;
519 /* return the result for the terminator, as the default */
520 return result_handling->result;
524 is_non_empty_string(const char *str)
526 return str && str[0] != '\0';
534 const char *subresource,
538 char *esc_bucket = NULL, *esc_key = NULL;
540 if (hdl->openstack_swift_api && hdl->x_storage_url) {
541 url = g_string_new(hdl->x_storage_url);
542 g_string_append(url, "/");
545 url = g_string_new("http");
547 g_string_append(url, "s");
549 g_string_append(url, "://");
552 if (hdl->use_subdomain && bucket)
553 g_string_append_printf(url, "%s.%s", bucket, hdl->host);
555 g_string_append_printf(url, "%s", hdl->host);
557 if (hdl->service_path) {
558 g_string_append_printf(url, "%s/", hdl->service_path);
560 g_string_append(url, "/");
565 if (!hdl->use_subdomain && bucket) {
566 /* curl_easy_escape addeded in 7.15.4 */
567 #if LIBCURL_VERSION_NUM >= 0x070f04
568 curl_version_info_data *info;
569 /* check the runtime version too */
570 info = curl_version_info(CURLVERSION_NOW);
571 if (info->version_num >= 0x070f04)
572 esc_bucket = curl_easy_escape(hdl->curl, bucket, 0);
574 esc_bucket = curl_escape(bucket, 0);
576 esc_bucket = curl_escape(bucket, 0);
578 if (!esc_bucket) goto cleanup;
579 g_string_append_printf(url, "%s", esc_bucket);
581 g_string_append(url, "/");
582 curl_free(esc_bucket);
586 /* curl_easy_escape addeded in 7.15.4 */
587 #if LIBCURL_VERSION_NUM >= 0x070f04
588 curl_version_info_data *info;
589 /* check the runtime version too */
590 info = curl_version_info(CURLVERSION_NOW);
591 if (info->version_num >= 0x070f04)
592 esc_key = curl_easy_escape(hdl->curl, key, 0);
594 esc_key = curl_escape(key, 0);
596 esc_key = curl_escape(key, 0);
598 if (!esc_key) goto cleanup;
599 g_string_append_printf(url, "%s", esc_key);
604 if (subresource || query)
605 g_string_append(url, "?");
608 g_string_append(url, subresource);
610 if (subresource && query)
611 g_string_append(url, "&");
614 g_string_append(url, query);
618 return g_string_free(url, FALSE);
621 static struct curl_slist *
622 authenticate_request(S3Handle *hdl,
626 const char *subresource,
627 const char *md5_hash)
634 GByteArray *md = NULL;
635 char *auth_base64 = NULL;
636 struct curl_slist *headers = NULL;
637 char *esc_bucket = NULL, *esc_key = NULL;
638 GString *auth_string = NULL;
641 static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
642 static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
643 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
646 /* calculate the date */
649 /* sync clock with amazon s3 */
650 t = t + hdl->time_offset_with_s3;
653 if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
655 if (!gmtime_r(&t, &tmp)) perror("localtime");
659 date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
660 wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
661 tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
663 if (hdl->openstack_swift_api) {
665 buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id);
666 headers = curl_slist_append(headers, buf);
668 buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key);
669 headers = curl_slist_append(headers, buf);
672 buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
673 headers = curl_slist_append(headers, buf);
677 /* Build the string to sign, per the S3 spec.
678 * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
682 auth_string = g_string_new(verb);
683 g_string_append(auth_string, "\n");
685 /* Content-MD5 header */
687 g_string_append(auth_string, md5_hash);
688 g_string_append(auth_string, "\n");
690 /* Content-Type is empty*/
691 g_string_append(auth_string, "\n");
694 g_string_append(auth_string, date);
695 g_string_append(auth_string, "\n");
697 /* CanonicalizedAmzHeaders, sorted lexicographically */
698 if (is_non_empty_string(hdl->user_token)) {
699 g_string_append(auth_string, AMAZON_SECURITY_HEADER);
700 g_string_append(auth_string, ":");
701 g_string_append(auth_string, hdl->user_token);
702 g_string_append(auth_string, ",");
703 g_string_append(auth_string, STS_PRODUCT_TOKEN);
704 g_string_append(auth_string, "\n");
707 if (g_str_equal(verb,"PUT") &&
708 is_non_empty_string(hdl->server_side_encryption)) {
709 g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER);
710 g_string_append(auth_string, ":");
711 g_string_append(auth_string, hdl->server_side_encryption);
712 g_string_append(auth_string, "\n");
715 if (is_non_empty_string(hdl->storage_class)) {
716 g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
717 g_string_append(auth_string, ":");
718 g_string_append(auth_string, hdl->storage_class);
719 g_string_append(auth_string, "\n");
722 /* CanonicalizedResource */
723 if (hdl->service_path) {
724 g_string_append(auth_string, hdl->service_path);
726 g_string_append(auth_string, "/");
728 if (hdl->use_subdomain)
729 g_string_append(auth_string, bucket);
731 esc_bucket = curl_escape(bucket, 0);
732 if (!esc_bucket) goto cleanup;
733 g_string_append(auth_string, esc_bucket);
737 if (bucket && (hdl->use_subdomain || key))
738 g_string_append(auth_string, "/");
741 esc_key = curl_escape(key, 0);
742 if (!esc_key) goto cleanup;
743 g_string_append(auth_string, esc_key);
747 g_string_append(auth_string, "?");
748 g_string_append(auth_string, subresource);
751 /* run HMAC-SHA1 on the canonicalized string */
752 md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
754 HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key),
756 HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
757 HMAC_Final(&ctx, md->data, &md->len);
758 HMAC_CTX_cleanup(&ctx);
759 auth_base64 = s3_base64_encode(md);
760 /* append the new headers */
761 if (is_non_empty_string(hdl->user_token)) {
762 /* Devpay headers are included in hash. */
763 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
765 headers = curl_slist_append(headers, buf);
768 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
770 headers = curl_slist_append(headers, buf);
774 if (g_str_equal(verb,"PUT") &&
775 is_non_empty_string(hdl->server_side_encryption)) {
776 buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s",
777 hdl->server_side_encryption);
778 headers = curl_slist_append(headers, buf);
782 if (is_non_empty_string(hdl->storage_class)) {
783 buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s",
785 headers = curl_slist_append(headers, buf);
789 buf = g_strdup_printf("Authorization: AWS %s:%s",
790 hdl->access_key, auth_base64);
791 headers = curl_slist_append(headers, buf);
795 if (md5_hash && '\0' != md5_hash[0]) {
796 buf = g_strdup_printf("Content-MD5: %s", md5_hash);
797 headers = curl_slist_append(headers, buf);
801 buf = g_strdup_printf("Date: %s", date);
802 headers = curl_slist_append(headers, buf);
809 if (md) g_byte_array_free(md, TRUE);
811 if (auth_string) g_string_free(auth_string, TRUE);
816 /* Functions for a SAX parser to parse the XML failure from Amazon */
818 /* Private structure for our "thunk", which tracks where the user is in the list
820 struct failure_thunk {
837 failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
838 const gchar *element_name,
839 const gchar **attribute_names G_GNUC_UNUSED,
840 const gchar **attribute_values G_GNUC_UNUSED,
842 GError **error G_GNUC_UNUSED)
844 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
846 if (g_ascii_strcasecmp(element_name, "title") == 0) {
848 thunk->in_others = 0;
849 thunk->want_text = 1;
850 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
852 thunk->in_others = 0;
853 thunk->want_text = 1;
854 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
856 thunk->in_others = 0;
857 thunk->want_text = 1;
858 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
859 thunk->in_message = 1;
860 thunk->in_others = 0;
861 thunk->want_text = 1;
868 failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
869 const gchar *element_name,
871 GError **error G_GNUC_UNUSED)
873 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
875 if (g_ascii_strcasecmp(element_name, "title") == 0) {
876 char *p = strchr(thunk->text, ' ');
880 thunk->error_name = g_strdup(p);
886 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
887 thunk->message = thunk->text;
888 g_strstrip(thunk->message);
891 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
892 thunk->error_name = thunk->text;
895 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
896 thunk->message = thunk->text;
898 thunk->in_message = 0;
905 failure_text(GMarkupParseContext *context G_GNUC_UNUSED,
909 GError **error G_GNUC_UNUSED)
911 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
913 if (thunk->want_text && thunk->in_others == 0) {
916 new_text = g_strndup(text, text_len);
918 strappend(thunk->text, new_text);
921 thunk->text = new_text;
927 interpret_response(S3Handle *hdl,
929 char *curl_error_buffer,
933 const char *content_md5)
935 long response_code = 0;
937 struct failure_thunk thunk;
938 GMarkupParseContext *ctxt = NULL;
939 static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL };
942 if (!hdl) return FALSE;
944 if (hdl->last_message) g_free(hdl->last_message);
945 hdl->last_message = NULL;
947 /* bail out from a CURL error */
948 if (curl_code != CURLE_OK) {
949 hdl->last_curl_code = curl_code;
950 hdl->last_message = g_strdup_printf("CURL error: %s", curl_error_buffer);
954 /* CURL seems to think things were OK, so get its response code */
955 curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
956 hdl->last_response_code = response_code;
958 /* check ETag, if present */
959 if (etag && content_md5 && 200 == response_code) {
960 if (etag && g_ascii_strcasecmp(etag, content_md5))
961 hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
966 if (200 <= response_code && response_code < 400) {
967 /* 2xx and 3xx codes won't have a response body we care about */
968 hdl->last_s3_error_code = S3_ERROR_None;
972 /* Now look at the body to try to get the actual Amazon error message. */
974 /* impose a reasonable limit on body size */
975 if (body_len > MAX_ERROR_RESPONSE_LEN) {
976 hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
978 } else if (!body || body_len == 0) {
979 hdl->last_message = g_strdup("S3 Error: Unknown (empty response body)");
980 return TRUE; /* perhaps a network error; retry the request */
983 thunk.in_title = FALSE;
984 thunk.in_body = FALSE;
985 thunk.in_code = FALSE;
986 thunk.in_message = FALSE;
989 thunk.want_text = FALSE;
991 thunk.message = NULL;
992 thunk.error_name = NULL;
994 if (hdl->openstack_swift_api &&
995 !g_strstr_len(body, body_len, "xml version") &&
996 !g_strstr_len(body, body_len, "<html>")) {
997 char *body_copy = g_strndup(body, body_len);
999 char *p = strchr(b, '\n');
1001 if (p) { /* first line: error code */
1004 p1 = strchr(b, ' ');
1008 thunk.error_name = g_strdup(p1);
1013 p = strchr(b, '\n');
1014 if (p) { /* second line: error message */
1017 thunk.message = g_strdup(p);
1018 g_strstrip(thunk.message);
1024 /* run the parser over it */
1025 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
1026 if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) {
1027 if (hdl->last_message) g_free(hdl->last_message);
1028 hdl->last_message = g_strdup(err->message);
1032 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
1033 if (hdl->last_message) g_free(hdl->last_message);
1034 hdl->last_message = g_strdup(err->message);
1038 g_markup_parse_context_free(ctxt);
1042 if (thunk.error_name) {
1043 hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name);
1044 g_free(thunk.error_name);
1045 thunk.error_name = NULL;
1048 if (thunk.message) {
1049 g_free(hdl->last_message);
1050 hdl->last_message = thunk.message;
1051 thunk.message = NULL; /* steal the reference to the string */
1056 g_free(thunk.message);
1057 g_free(thunk.error_name);
1061 /* a CURLOPT_READFUNCTION to read data from a buffer. */
1063 s3_buffer_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1065 CurlBuffer *data = stream;
1066 guint bytes_desired = (guint) size * nmemb;
1068 /* check the number of bytes remaining, just to be safe */
1069 if (bytes_desired > data->buffer_len - data->buffer_pos)
1070 bytes_desired = data->buffer_len - data->buffer_pos;
1072 memcpy((char *)ptr, data->buffer + data->buffer_pos, bytes_desired);
1073 data->buffer_pos += bytes_desired;
1075 return bytes_desired;
1079 s3_buffer_size_func(void *stream)
1081 CurlBuffer *data = stream;
1082 return data->buffer_len;
1086 s3_buffer_md5_func(void *stream)
1088 CurlBuffer *data = stream;
1089 GByteArray req_body_gba = {(guint8 *)data->buffer, data->buffer_len};
1091 return s3_compute_md5_hash(&req_body_gba);
1095 s3_buffer_reset_func(void *stream)
1097 CurlBuffer *data = stream;
1098 data->buffer_pos = 0;
1101 /* a CURLOPT_WRITEFUNCTION to write data to a buffer. */
1103 s3_buffer_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1105 CurlBuffer * data = stream;
1106 guint new_bytes = (guint) size * nmemb;
1107 guint bytes_needed = data->buffer_pos + new_bytes;
1109 /* error out if the new size is greater than the maximum allowed */
1110 if (data->max_buffer_size && bytes_needed > data->max_buffer_size)
1113 /* reallocate if necessary. We use exponential sizing to make this
1114 * happen less often. */
1115 if (bytes_needed > data->buffer_len) {
1116 guint new_size = MAX(bytes_needed, data->buffer_len * 2);
1117 if (data->max_buffer_size) {
1118 new_size = MIN(new_size, data->max_buffer_size);
1120 data->buffer = g_realloc(data->buffer, new_size);
1121 data->buffer_len = new_size;
1124 return 0; /* returning zero signals an error to libcurl */
1126 /* actually copy the data to the buffer */
1127 memcpy(data->buffer + data->buffer_pos, ptr, new_bytes);
1128 data->buffer_pos += new_bytes;
1130 /* signal success to curl */
1134 /* a CURLOPT_READFUNCTION that writes nothing. */
1136 s3_empty_read_func(G_GNUC_UNUSED void *ptr, G_GNUC_UNUSED size_t size, G_GNUC_UNUSED size_t nmemb, G_GNUC_UNUSED void * stream)
1142 s3_empty_size_func(G_GNUC_UNUSED void *stream)
1148 s3_empty_md5_func(G_GNUC_UNUSED void *stream)
1150 static const GByteArray empty = {(guint8 *) "", 0};
1152 return s3_compute_md5_hash(&empty);
1155 /* a CURLOPT_WRITEFUNCTION to write data that just counts data.
1156 * s3_write_data should be NULL or a pointer to an gint64.
1159 s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
1161 gint64 *count = (gint64*) stream, inc = nmemb*size;
1163 if (count) *count += inc;
1168 s3_counter_reset_func(void *stream)
1170 gint64 *count = (gint64*) stream;
1172 if (count) *count = 0;
1176 /* a CURLOPT_READFUNCTION to read data from a file. */
1178 s3_file_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1180 HANDLE *hFile = (HANDLE *) stream;
1183 ReadFile(hFile, ptr, (DWORD) size*nmemb, &bytes_read, NULL);
1188 s3_file_size_func(void *stream)
1190 HANDLE *hFile = (HANDLE *) stream;
1191 DWORD size = GetFileSize(hFile, NULL);
1193 if (INVALID_FILE_SIZE == size) {
1201 s3_file_md5_func(void *stream)
1203 #define S3_MD5_BUF_SIZE (10*1024)
1204 HANDLE *hFile = (HANDLE *) stream;
1205 guint8 buf[S3_MD5_BUF_SIZE];
1208 GByteArray *ret = NULL;
1210 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1212 ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
1213 g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
1216 while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
1217 MD5_Update(&md5_ctx, buf, bytes_read);
1219 MD5_Final(ret->data, &md5_ctx);
1221 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1223 #undef S3_MD5_BUF_SIZE
1227 s3_file_reset_func(void *stream)
1229 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1232 /* a CURLOPT_WRITEFUNCTION to write data to a file. */
1234 s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1236 HANDLE *hFile = (HANDLE *) stream;
1237 DWORD bytes_written;
1239 WriteFile(hFile, ptr, (DWORD) size*nmemb, &bytes_written, NULL);
1240 return bytes_written;
1245 curl_debug_message(CURL *curl G_GNUC_UNUSED,
1249 void *unused G_GNUC_UNUSED)
1253 char **lines, **line;
1260 case CURLINFO_HEADER_IN:
1261 lineprefix="Hdr In: ";
1264 case CURLINFO_HEADER_OUT:
1265 lineprefix="Hdr Out: ";
1268 case CURLINFO_DATA_IN:
1269 if (len > 1000) return 0;
1270 lineprefix="Data In: ";
1273 case CURLINFO_DATA_OUT:
1274 if (len > 1000) return 0;
1275 lineprefix="Data Out: ";
1279 /* ignore data in/out -- nobody wants to see that in the
1284 /* split the input into lines */
1285 message = g_strndup(s, (gsize) len);
1286 lines = g_strsplit(message, "\n", -1);
1289 for (line = lines; *line; line++) {
1290 if (**line == '\0') continue; /* skip blank lines */
1291 g_debug("%s%s", lineprefix, *line);
1299 perform_request(S3Handle *hdl,
1303 const char *subresource,
1305 s3_read_func read_func,
1306 s3_reset_func read_reset_func,
1307 s3_size_func size_func,
1308 s3_md5_func md5_func,
1310 s3_write_func write_func,
1311 s3_reset_func write_reset_func,
1312 gpointer write_data,
1313 s3_progress_func progress_func,
1314 gpointer progress_data,
1315 const result_handling_t *result_handling)
1318 s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
1319 CURLcode curl_code = CURLE_OK;
1320 char curl_error_buffer[CURL_ERROR_SIZE] = "";
1321 struct curl_slist *headers = NULL;
1322 /* Set S3Internal Data */
1323 S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
1324 gboolean should_retry;
1326 gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
1327 /* corresponds to PUT, HEAD, GET, and POST */
1328 int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
1329 /* do we want to examine the headers */
1330 const char *curlopt_customrequest = NULL;
1331 /* for MD5 calculation */
1332 GByteArray *md5_hash = NULL;
1333 gchar *md5_hash_hex = NULL, *md5_hash_b64 = NULL;
1334 size_t request_body_size = 0;
1336 g_assert(hdl != NULL && hdl->curl != NULL);
1340 url = build_url(hdl, bucket, key, subresource, query);
1341 if (!url) goto cleanup;
1343 /* libcurl may behave strangely if these are not set correctly */
1344 if (!strncmp(verb, "PUT", 4)) {
1346 } else if (!strncmp(verb, "GET", 4)) {
1347 curlopt_httpget = 1;
1348 } else if (!strncmp(verb, "POST", 5)) {
1350 } else if (!strncmp(verb, "HEAD", 5)) {
1353 curlopt_customrequest = verb;
1357 request_body_size = size_func(read_data);
1361 md5_hash = md5_func(read_data);
1363 md5_hash_b64 = s3_base64_encode(md5_hash);
1364 md5_hash_hex = s3_hex_encode(md5_hash);
1365 g_byte_array_free(md5_hash, TRUE);
1369 /* Curl will use fread() otherwise */
1370 read_func = s3_empty_read_func;
1374 int_writedata.write_func = write_func;
1375 int_writedata.reset_func = write_reset_func;
1376 int_writedata.write_data = write_data;
1378 /* Curl will use fwrite() otherwise */
1379 int_writedata.write_func = s3_counter_write_func;
1380 int_writedata.reset_func = s3_counter_reset_func;
1381 int_writedata.write_data = NULL;
1387 curl_slist_free_all(headers);
1389 curl_error_buffer[0] = '\0';
1390 if (read_reset_func) {
1391 read_reset_func(read_data);
1393 /* calls write_reset_func */
1394 s3_internal_reset_func(&int_writedata);
1396 /* set up the request */
1397 headers = authenticate_request(hdl, verb, bucket, key, subresource,
1400 if (hdl->use_ssl && hdl->ca_info) {
1401 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
1405 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
1408 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
1409 curl_debug_message)))
1412 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_ERRORBUFFER,
1413 curl_error_buffer)))
1415 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS, 1)))
1417 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1)))
1419 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_URL, url)))
1421 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
1424 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
1426 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
1428 /* Note: we always have to set this apparently, for consistent "end of header" detection */
1429 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
1431 /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
1432 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
1434 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
1436 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
1439 /* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
1440 #if LIBCURL_VERSION_NUM >= 0x070b00
1441 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
1444 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
1447 /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
1448 #if LIBCURL_VERSION_NUM >= 0x070f05
1449 if (s3_curl_throttling_compat()) {
1450 if (hdl->max_send_speed)
1451 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
1454 if (hdl->max_recv_speed)
1455 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
1460 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
1462 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_UPLOAD, curlopt_upload)))
1464 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POST, curlopt_post)))
1466 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOBODY, curlopt_nobody)))
1468 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CUSTOMREQUEST,
1469 curlopt_customrequest)))
1473 if (curlopt_upload) {
1474 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
1476 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
1479 /* Clear request_body options. */
1480 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
1483 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
1488 /* Perform the request */
1489 curl_code = curl_easy_perform(hdl->curl);
1492 /* interpret the response into hdl->last* */
1493 curl_error: /* (label for short-circuiting the curl_easy_perform call) */
1494 should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
1495 int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
1497 /* and, unless we know we need to retry, see what we're to do now */
1498 if (!should_retry) {
1499 result = lookup_result(result_handling, hdl->last_response_code,
1500 hdl->last_s3_error_code, hdl->last_curl_code);
1502 /* break out of the while(1) unless we're retrying */
1503 if (result != S3_RESULT_RETRY)
1507 if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
1508 /* we're out of retries, so annotate hdl->last_message appropriately and bail
1510 char *m = g_strdup_printf("Too many retries; last message was '%s'", hdl->last_message);
1511 if (hdl->last_message) g_free(hdl->last_message);
1512 hdl->last_message = m;
1513 result = S3_RESULT_FAIL;
1519 backoff *= EXPONENTIAL_BACKOFF_BASE;
1522 if (result != S3_RESULT_OK) {
1523 g_debug(_("%s %s failed with %d/%s"), verb, url,
1524 hdl->last_response_code,
1525 s3_error_name_from_code(hdl->last_s3_error_code));
1530 if (headers) curl_slist_free_all(headers);
1531 g_free(md5_hash_b64);
1532 g_free(md5_hash_hex);
1534 /* we don't deallocate the response body -- we keep it for later */
1535 hdl->last_response_body = int_writedata.resp_buf.buffer;
1536 hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
1537 hdl->last_num_retries = retries;
1544 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream)
1546 S3InternalData *data = (S3InternalData *) stream;
1549 if (!data->headers_done)
1552 /* call write on internal buffer (if not full) */
1553 if (data->int_write_done) {
1556 bytes_saved = s3_buffer_write_func(ptr, size, nmemb, &data->resp_buf);
1558 data->int_write_done = TRUE;
1561 /* call write on user buffer */
1562 if (data->write_func) {
1563 return data->write_func(ptr, size, nmemb, data->write_data);
1570 s3_internal_reset_func(void * stream)
1572 S3InternalData *data = (S3InternalData *) stream;
1574 s3_buffer_reset_func(&data->resp_buf);
1575 data->headers_done = FALSE;
1576 data->int_write_done = FALSE;
1578 if (data->reset_func) {
1579 data->reset_func(data->write_data);
1584 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
1586 static const char *final_header = "\r\n";
1587 time_t remote_time_in_sec,local_time;
1589 regmatch_t pmatch[2];
1590 S3InternalData *data = (S3InternalData *) stream;
1592 header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
1594 if (header[strlen(header)-1] == '\n')
1595 header[strlen(header)-1] = '\0';
1596 if (header[strlen(header)-1] == '\r')
1597 header[strlen(header)-1] = '\0';
1598 if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
1599 data->etag = find_regex_substring(header, pmatch[1]);
1600 if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0))
1601 data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]);
1603 if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0))
1604 data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]);
1606 if (strlen(header) == 0)
1607 data->headers_done = TRUE;
1608 if (g_str_equal(final_header, header))
1609 data->headers_done = TRUE;
1610 if (g_str_equal("\n", header))
1611 data->headers_done = TRUE;
1613 /* If date header is found */
1614 if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
1615 char *date = find_regex_substring(header, pmatch[1]);
1617 /* Remote time is always in GMT: RFC 2616 */
1618 /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
1619 if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
1620 g_debug("Error: Conversion of remote time to seconds failed.");
1621 data->hdl->time_offset_with_s3 = 0;
1623 local_time = time(NULL);
1625 data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
1627 if (data->hdl->verbose)
1628 g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
1639 compile_regexes(void)
1643 /* using POSIX regular expressions */
1644 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
1645 {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
1646 {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
1647 {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex},
1648 {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex},
1649 {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
1650 {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
1651 {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
1652 {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
1655 char regmessage[1024];
1659 for (i = 0; regexes[i].str; i++) {
1660 reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
1661 if (reg_result != 0) {
1662 regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
1663 g_error(_("Regex error: %s"), regmessage);
1667 #else /* ! HAVE_REGEX_H */
1668 /* using PCRE via GLib */
1669 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
1670 {"<Code>\\s*([^<]*)\\s*</Code>",
1671 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1673 {"^ETag:\\s*\"([^\"]+)\"\\s*$",
1674 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1676 {"<Message>\\s*([^<]*)\\s*</Message>",
1677 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1679 {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$",
1680 G_REGEX_OPTIMIZE | G_REGEX_NO_AUTO_CAPTURE,
1682 {"(/>)|(>([^<]*)</LocationConstraint>)",
1684 &location_con_regex},
1686 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
1693 for (i = 0; regexes[i].str; i++) {
1694 *(regexes[i].regex) = g_regex_new(regexes[i].str, regexes[i].flags, 0, &err);
1696 g_error(_("Regex error: %s"), err->message);
1706 * Public function implementations
1709 gboolean s3_init(void)
1711 static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
1712 static gboolean init = FALSE, ret;
1714 /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
1716 g_static_mutex_lock (&mutex);
1718 ret = compile_regexes();
1721 g_static_mutex_unlock(&mutex);
1726 s3_curl_location_compat(void)
1728 curl_version_info_data *info;
1730 info = curl_version_info(CURLVERSION_NOW);
1731 return info->version_num > 0x070a02;
1735 s3_bucket_location_compat(const char *bucket)
1737 return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
1741 get_openstack_swift_api_setting(
1744 s3_result_t result = S3_RESULT_FAIL;
1745 static result_handling_t result_handling[] = {
1746 { 200, 0, 0, S3_RESULT_OK },
1747 RESULT_HANDLING_ALWAYS_RETRY,
1748 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
1752 result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL,
1753 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1754 NULL, NULL, result_handling);
1756 return result == S3_RESULT_OK;
1761 s3_open(const char *access_key,
1762 const char *secret_key,
1763 const char *swift_account_id,
1764 const char *swift_access_key,
1766 const char *service_path,
1767 const gboolean use_subdomain,
1768 const char *user_token,
1769 const char *bucket_location,
1770 const char *storage_class,
1771 const char *ca_info,
1772 const char *server_side_encryption,
1773 const gboolean openstack_swift_api)
1777 hdl = g_new0(S3Handle, 1);
1778 if (!hdl) goto error;
1780 hdl->verbose = FALSE;
1781 hdl->use_ssl = s3_curl_supports_ssl();
1783 if (!openstack_swift_api) {
1784 g_assert(access_key);
1785 hdl->access_key = g_strdup(access_key);
1786 g_assert(secret_key);
1787 hdl->secret_key = g_strdup(secret_key);
1789 g_assert(swift_account_id);
1790 hdl->swift_account_id = g_strdup(swift_account_id);
1791 g_assert(swift_access_key);
1792 hdl->swift_access_key = g_strdup(swift_access_key);
1796 hdl->user_token = g_strdup(user_token);
1799 hdl->bucket_location = g_strdup(bucket_location);
1802 hdl->storage_class = g_strdup(storage_class);
1805 hdl->server_side_encryption = g_strdup(server_side_encryption);
1808 hdl->ca_info = g_strdup(ca_info);
1810 if (!is_non_empty_string(host))
1811 host = "s3.amazonaws.com";
1812 hdl->host = g_ascii_strdown(host, -1);
1813 hdl->use_subdomain = use_subdomain ||
1814 (strcmp(hdl->host, "s3.amazonaws.com") == 0 &&
1815 is_non_empty_string(hdl->bucket_location));
1816 hdl->openstack_swift_api = openstack_swift_api;
1818 if (strlen(service_path) == 0 ||
1819 (strlen(service_path) == 1 && service_path[0] == '/')) {
1820 hdl->service_path = NULL;
1821 } else if (service_path[0] != '/') {
1822 hdl->service_path = g_strdup_printf("/%s", service_path);
1824 hdl->service_path = g_strdup(service_path);
1826 if (hdl->service_path) {
1827 /* remove trailling / */
1828 size_t len = strlen(hdl->service_path) - 1;
1829 if (hdl->service_path[len] == '/')
1830 hdl->service_path[len] = '\0';
1833 hdl->service_path = NULL;
1836 hdl->curl = curl_easy_init();
1837 if (!hdl->curl) goto error;
1839 if (openstack_swift_api) { /* get the X-Storage-Url and X-Auth-Token */
1840 get_openstack_swift_api_setting(hdl);
1850 s3_free(S3Handle *hdl)
1855 g_free(hdl->access_key);
1856 g_free(hdl->secret_key);
1857 g_free(hdl->swift_account_id);
1858 g_free(hdl->swift_access_key);
1859 if (hdl->user_token) g_free(hdl->user_token);
1860 if (hdl->bucket_location) g_free(hdl->bucket_location);
1861 if (hdl->storage_class) g_free(hdl->storage_class);
1862 if (hdl->server_side_encryption) g_free(hdl->server_side_encryption);
1863 if (hdl->host) g_free(hdl->host);
1864 if (hdl->service_path) g_free(hdl->service_path);
1865 if (hdl->curl) curl_easy_cleanup(hdl->curl);
1872 s3_reset(S3Handle *hdl)
1875 /* We don't call curl_easy_reset here, because doing that in curl
1876 * < 7.16 blanks the default CA certificate path, and there's no way
1877 * to get it back. */
1878 if (hdl->last_message) {
1879 g_free(hdl->last_message);
1880 hdl->last_message = NULL;
1883 hdl->last_response_code = 0;
1884 hdl->last_curl_code = 0;
1885 hdl->last_s3_error_code = 0;
1886 hdl->last_num_retries = 0;
1888 if (hdl->last_response_body) {
1889 g_free(hdl->last_response_body);
1890 hdl->last_response_body = NULL;
1893 hdl->last_response_body_size = 0;
1898 s3_error(S3Handle *hdl,
1899 const char **message,
1900 guint *response_code,
1901 s3_error_code_t *s3_error_code,
1902 const char **s3_error_name,
1903 CURLcode *curl_code,
1907 if (message) *message = hdl->last_message;
1908 if (response_code) *response_code = hdl->last_response_code;
1909 if (s3_error_code) *s3_error_code = hdl->last_s3_error_code;
1910 if (s3_error_name) *s3_error_name = s3_error_name_from_code(hdl->last_s3_error_code);
1911 if (curl_code) *curl_code = hdl->last_curl_code;
1912 if (num_retries) *num_retries = hdl->last_num_retries;
1914 /* no hdl? return something coherent, anyway */
1915 if (message) *message = "NULL S3Handle";
1916 if (response_code) *response_code = 0;
1917 if (s3_error_code) *s3_error_code = 0;
1918 if (s3_error_name) *s3_error_name = NULL;
1919 if (curl_code) *curl_code = 0;
1920 if (num_retries) *num_retries = 0;
1925 s3_verbose(S3Handle *hdl, gboolean verbose)
1927 hdl->verbose = verbose;
1931 s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
1933 if (!s3_curl_throttling_compat())
1936 hdl->max_send_speed = max_send_speed;
1942 s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
1944 if (!s3_curl_throttling_compat())
1947 hdl->max_recv_speed = max_recv_speed;
1953 s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
1955 gboolean ret = TRUE;
1956 if (use_ssl & !s3_curl_supports_ssl()) {
1959 hdl->use_ssl = use_ssl;
1965 s3_strerror(S3Handle *hdl)
1967 const char *message;
1968 guint response_code;
1969 const char *s3_error_name;
1973 char s3_info[256] = "";
1974 char response_info[16] = "";
1975 char curl_info[32] = "";
1976 char retries_info[32] = "";
1978 s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
1981 message = "Unknown S3 error";
1983 g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
1985 g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
1987 g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
1989 g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
1991 return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
1994 /* Perform an upload. When this function returns, KEY and
1995 * BUFFER remain the responsibility of the caller.
1997 * @param self: the s3 device
1998 * @param bucket: the bucket to which the upload should be made
1999 * @param key: the key to which the upload should be made
2000 * @param buffer: the data to be uploaded
2001 * @param buffer_len: the length of the data to upload
2002 * @returns: false if an error ocurred
2005 s3_upload(S3Handle *hdl,
2008 s3_read_func read_func,
2009 s3_reset_func reset_func,
2010 s3_size_func size_func,
2011 s3_md5_func md5_func,
2013 s3_progress_func progress_func,
2014 gpointer progress_data)
2016 s3_result_t result = S3_RESULT_FAIL;
2017 static result_handling_t result_handling[] = {
2018 { 200, 0, 0, S3_RESULT_OK },
2019 { 201, 0, 0, S3_RESULT_OK },
2020 RESULT_HANDLING_ALWAYS_RETRY,
2021 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2024 g_assert(hdl != NULL);
2026 result = perform_request(hdl, "PUT", bucket, key, NULL, NULL,
2027 read_func, reset_func, size_func, md5_func, read_data,
2028 NULL, NULL, NULL, progress_func, progress_data,
2031 return result == S3_RESULT_OK;
2035 /* Private structure for our "thunk", which tracks where the user is in the list
2037 struct list_keys_thunk {
2038 GSList *filename_list; /* all pending filenames */
2040 gboolean in_contents; /* look for "key" entities in here */
2041 gboolean in_common_prefixes; /* look for "prefix" entities in here */
2043 gboolean is_truncated;
2053 /* Functions for a SAX parser to parse the XML from Amazon */
2056 list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
2057 const gchar *element_name,
2058 const gchar **attribute_names G_GNUC_UNUSED,
2059 const gchar **attribute_values G_GNUC_UNUSED,
2061 GError **error G_GNUC_UNUSED)
2063 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2065 thunk->want_text = 0;
2066 if (g_ascii_strcasecmp(element_name, "contents") == 0 ||
2067 g_ascii_strcasecmp(element_name, "object") == 0) {
2068 thunk->in_contents = 1;
2069 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2070 thunk->in_common_prefixes = 1;
2071 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2072 thunk->want_text = 1;
2073 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2074 g_ascii_strcasecmp(element_name, "name") == 0) &&
2075 thunk->in_contents) {
2076 thunk->want_text = 1;
2077 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2078 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2079 thunk->in_contents) {
2080 thunk->want_text = 1;
2081 } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
2082 thunk->want_text = 1;
2083 } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
2084 thunk->want_text = 1;
2089 list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
2090 const gchar *element_name,
2092 GError **error G_GNUC_UNUSED)
2094 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2096 if (g_ascii_strcasecmp(element_name, "contents") == 0) {
2097 thunk->in_contents = 0;
2098 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2099 thunk->in_common_prefixes = 0;
2100 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2101 g_ascii_strcasecmp(element_name, "name") == 0) &&
2102 thunk->in_contents) {
2103 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2104 if (thunk->is_truncated) {
2105 if (thunk->next_marker) g_free(thunk->next_marker);
2106 thunk->next_marker = g_strdup(thunk->text);
2109 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2110 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2111 thunk->in_contents) {
2112 thunk->size += g_ascii_strtoull (thunk->text, NULL, 10);
2114 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2115 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2117 } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
2118 if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
2119 thunk->is_truncated = TRUE;
2120 } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
2121 if (thunk->next_marker) g_free(thunk->next_marker);
2122 thunk->next_marker = thunk->text;
2128 list_text(GMarkupParseContext *context G_GNUC_UNUSED,
2132 GError **error G_GNUC_UNUSED)
2134 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2136 if (thunk->want_text) {
2137 if (thunk->text) g_free(thunk->text);
2138 thunk->text = g_strndup(text, text_len);
2142 /* Perform a fetch from S3; several fetches may be involved in a
2143 * single listing operation */
2145 list_fetch(S3Handle *hdl,
2148 const char *delimiter,
2150 const char *max_keys,
2153 s3_result_t result = S3_RESULT_FAIL;
2154 static result_handling_t result_handling[] = {
2155 { 200, 0, 0, S3_RESULT_OK },
2156 { 204, 0, 0, S3_RESULT_OK },
2157 RESULT_HANDLING_ALWAYS_RETRY,
2158 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2160 const char* pos_parts[][2] = {
2162 {"delimiter", delimiter},
2164 {"max-keys", max_keys},
2170 gboolean have_prev_part = FALSE;
2172 /* loop over possible parts to build query string */
2173 query = g_string_new("");
2174 for (i = 0; pos_parts[i][0]; i++) {
2175 if (pos_parts[i][1]) {
2176 const char *keyword;
2178 g_string_append(query, "&");
2180 have_prev_part = TRUE;
2181 esc_value = curl_escape(pos_parts[i][1], 0);
2182 keyword = pos_parts[i][0];
2183 if (hdl->openstack_swift_api && strcmp(keyword, "max-keys") == 0) {
2186 g_string_append_printf(query, "%s=%s", keyword, esc_value);
2187 curl_free(esc_value);
2190 if (hdl->openstack_swift_api) {
2192 g_string_append(query, "&");
2193 g_string_append(query, "format=xml");
2196 /* and perform the request on that URI */
2197 result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str,
2198 NULL, NULL, NULL, NULL, NULL,
2199 S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
2202 if (query) g_string_free(query, TRUE);
2208 s3_list_keys(S3Handle *hdl,
2211 const char *delimiter,
2213 guint64 *total_size)
2216 * max len of XML variables:
2217 * bucket: 255 bytes (p12 API Version 2006-03-01)
2218 * key: 1024 bytes (p15 API Version 2006-03-01)
2219 * size per key: 5GB bytes (p6 API Version 2006-03-01)
2220 * size of size 10 bytes (i.e. 10 decimal digits)
2221 * etag: 44 (observed+assumed)
2222 * owner ID: 64 (observed+assumed)
2223 * owner DisplayName: 255 (assumed)
2224 * StorageClass: const (p18 API Version 2006-03-01)
2226 static const guint MAX_RESPONSE_LEN = 1000*2000;
2227 static const char *MAX_KEYS = "1000";
2228 struct list_keys_thunk thunk;
2229 GMarkupParseContext *ctxt = NULL;
2230 static GMarkupParser parser = { list_start_element, list_end_element, list_text, NULL, NULL };
2232 s3_result_t result = S3_RESULT_FAIL;
2233 CurlBuffer buf = {NULL, 0, 0, MAX_RESPONSE_LEN};
2237 thunk.filename_list = NULL;
2239 thunk.next_marker = NULL;
2242 /* Loop until S3 has given us the entire picture */
2244 s3_buffer_reset_func(&buf);
2245 /* get some data from S3 */
2246 result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
2247 if (result != S3_RESULT_OK) goto cleanup;
2248 if (buf.buffer_pos == 0) goto cleanup; /* no body */
2250 /* run the parser over it */
2251 thunk.in_contents = FALSE;
2252 thunk.in_common_prefixes = FALSE;
2253 thunk.is_truncated = FALSE;
2254 if (thunk.next_marker) g_free(thunk.next_marker);
2255 thunk.next_marker = NULL;
2256 thunk.want_text = FALSE;
2258 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
2260 if (!g_markup_parse_context_parse(ctxt, buf.buffer, buf.buffer_pos, &err)) {
2261 if (hdl->last_message) g_free(hdl->last_message);
2262 hdl->last_message = g_strdup(err->message);
2263 result = S3_RESULT_FAIL;
2267 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
2268 if (hdl->last_message) g_free(hdl->last_message);
2269 hdl->last_message = g_strdup(err->message);
2270 result = S3_RESULT_FAIL;
2274 g_markup_parse_context_free(ctxt);
2276 } while (thunk.next_marker);
2279 if (err) g_error_free(err);
2280 if (thunk.text) g_free(thunk.text);
2281 if (thunk.next_marker) g_free(thunk.next_marker);
2282 if (ctxt) g_markup_parse_context_free(ctxt);
2283 if (buf.buffer) g_free(buf.buffer);
2285 if (result != S3_RESULT_OK) {
2286 g_slist_free(thunk.filename_list);
2289 *list = thunk.filename_list;
2291 *total_size = thunk.size;
2298 s3_read(S3Handle *hdl,
2301 s3_write_func write_func,
2302 s3_reset_func reset_func,
2303 gpointer write_data,
2304 s3_progress_func progress_func,
2305 gpointer progress_data)
2307 s3_result_t result = S3_RESULT_FAIL;
2308 static result_handling_t result_handling[] = {
2309 { 200, 0, 0, S3_RESULT_OK },
2310 RESULT_HANDLING_ALWAYS_RETRY,
2311 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2314 g_assert(hdl != NULL);
2315 g_assert(write_func != NULL);
2316 result = perform_request(hdl, "GET", bucket, key, NULL, NULL,
2317 NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
2318 progress_func, progress_data, result_handling);
2320 return result == S3_RESULT_OK;
2324 s3_delete(S3Handle *hdl,
2328 s3_result_t result = S3_RESULT_FAIL;
2329 static result_handling_t result_handling[] = {
2330 { 204, 0, 0, S3_RESULT_OK },
2331 { 404, 0, 0, S3_RESULT_OK },
2332 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
2333 RESULT_HANDLING_ALWAYS_RETRY,
2334 { 409, 0, 0, S3_RESULT_OK },
2335 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2338 g_assert(hdl != NULL);
2340 result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL,
2341 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2344 return result == S3_RESULT_OK;
2348 s3_make_bucket(S3Handle *hdl,
2352 s3_result_t result = S3_RESULT_FAIL;
2353 static result_handling_t result_handling[] = {
2354 { 200, 0, 0, S3_RESULT_OK },
2355 { 201, 0, 0, S3_RESULT_OK },
2356 { 202, 0, 0, S3_RESULT_OK },
2357 { 204, 0, 0, S3_RESULT_OK },
2358 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
2359 RESULT_HANDLING_ALWAYS_RETRY,
2360 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2362 regmatch_t pmatch[4];
2363 char *loc_end_open, *loc_content;
2364 CurlBuffer buf = {NULL, 0, 0, 0}, *ptr = NULL;
2365 s3_read_func read_func = NULL;
2366 s3_reset_func reset_func = NULL;
2367 s3_md5_func md5_func = NULL;
2368 s3_size_func size_func = NULL;
2370 g_assert(hdl != NULL);
2372 if (is_non_empty_string(hdl->bucket_location) &&
2373 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
2374 if (s3_bucket_location_compat(bucket)) {
2376 buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE,
2377 g_str_equal(hdl->host, "gss.iijgio.com")?
2378 " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"":
2380 hdl->bucket_location);
2381 buf.buffer_len = (guint) strlen(buf.buffer);
2383 buf.max_buffer_size = buf.buffer_len;
2384 read_func = s3_buffer_read_func;
2385 reset_func = s3_buffer_reset_func;
2386 size_func = s3_buffer_size_func;
2387 md5_func = s3_buffer_md5_func;
2389 hdl->last_message = g_strdup_printf(_(
2390 "Location constraint given for Amazon S3 bucket, "
2391 "but the bucket name (%s) is not usable as a subdomain."), bucket);
2396 result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL,
2397 read_func, reset_func, size_func, md5_func, ptr,
2398 NULL, NULL, NULL, NULL, NULL, result_handling);
2400 if (result == S3_RESULT_OK ||
2401 (result != S3_RESULT_OK &&
2402 hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
2403 /* verify the that the location constraint on the existing bucket matches
2404 * the one that's configured.
2406 if (is_non_empty_string(hdl->bucket_location)) {
2407 result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
2408 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2409 NULL, NULL, result_handling);
2411 result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL,
2412 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2413 NULL, NULL, result_handling);
2416 if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
2417 /* return to the default state of failure */
2418 result = S3_RESULT_FAIL;
2420 if (body) g_free(body);
2421 /* use strndup to get a null-terminated string */
2422 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
2424 hdl->last_message = g_strdup(_("No body received for location request"));
2426 } else if ('\0' == body[0]) {
2427 hdl->last_message = g_strdup(_("Empty body received for location request"));
2431 if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
2432 loc_end_open = find_regex_substring(body, pmatch[1]);
2433 loc_content = find_regex_substring(body, pmatch[3]);
2435 /* The case of an empty string is special because XML allows
2436 * "self-closing" tags
2438 if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
2439 '/' != loc_end_open[0])
2440 hdl->last_message = g_strdup(_("A wildcard location constraint is "
2441 "configured, but the bucket has a non-empty location constraint"));
2442 else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
2443 strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
2444 ('\0' != loc_content[0]))
2445 hdl->last_message = g_strdup(_("The location constraint configured "
2446 "does not match the constraint currently on the bucket"));
2448 result = S3_RESULT_OK;
2450 hdl->last_message = g_strdup(_("Unexpected location response from Amazon S3"));
2456 if (body) g_free(body);
2458 return result == S3_RESULT_OK;
2463 s3_is_bucket_exists(S3Handle *hdl,
2466 s3_result_t result = S3_RESULT_FAIL;
2467 static result_handling_t result_handling[] = {
2468 { 200, 0, 0, S3_RESULT_OK },
2469 { 204, 0, 0, S3_RESULT_OK },
2470 RESULT_HANDLING_ALWAYS_RETRY,
2471 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2474 result = perform_request(hdl, "GET", bucket, NULL, NULL,
2475 hdl->openstack_swift_api?"limit=1":"max-keys=1",
2476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2477 NULL, NULL, result_handling);
2479 return result == S3_RESULT_OK;
2483 s3_delete_bucket(S3Handle *hdl,
2486 return s3_delete(hdl, bucket, NULL);