2 * Copyright (c) 2008-2012 Zmanda, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
10 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
18 * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
22 * - collect speed statistics
27 /* use a relative path here to avoid conflicting with Perl's config.h. */
28 #include "../config/config.h"
36 #ifdef HAVE_SYS_TYPES_H
37 #include <sys/types.h>
39 #ifdef HAVE_SYS_STAT_H
58 #include <curl/curl.h>
60 /* Constant renamed after version 7.10.7 */
61 #ifndef CURLINFO_RESPONSE_CODE
62 #define CURLINFO_RESPONSE_CODE CURLINFO_HTTP_CODE
65 /* We don't need OpenSSL's kerberos support, and it's broken in
67 #define OPENSSL_NO_KRB5
69 #ifdef HAVE_OPENSSL_HMAC_H
70 # include <openssl/hmac.h>
72 # ifdef HAVE_CRYPTO_HMAC_H
73 # include <crypto/hmac.h>
81 #include <openssl/err.h>
82 #include <openssl/ssl.h>
83 #include <openssl/md5.h>
85 /* Maximum key length as specified in the S3 documentation
86 * (*excluding* null terminator) */
87 #define S3_MAX_KEY_LENGTH 1024
89 #define AMAZON_SECURITY_HEADER "x-amz-security-token"
90 #define AMAZON_BUCKET_CONF_TEMPLATE "\
91 <CreateBucketConfiguration%s>\n\
92 <LocationConstraint>%s</LocationConstraint>\n\
93 </CreateBucketConfiguration>"
95 #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
97 #define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption"
99 #define AMAZON_WILDCARD_LOCATION "*"
101 /* parameters for exponential backoff in the face of retriable errors */
104 #define EXPONENTIAL_BACKOFF_START_USEC G_USEC_PER_SEC/100
105 /* double at each retry */
106 #define EXPONENTIAL_BACKOFF_BASE 2
107 /* retry 14 times (for a total of about 3 minutes spent waiting) */
108 #define EXPONENTIAL_BACKOFF_MAX_RETRIES 14
110 /* general "reasonable size" parameters */
111 #define MAX_ERROR_RESPONSE_LEN (100*1024)
113 /* Results which should always be retried */
114 #define RESULT_HANDLING_ALWAYS_RETRY \
115 { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
116 { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \
117 { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
118 { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
119 { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
120 { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
121 { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
122 { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
123 { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
124 { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
125 { 0, 0, CURLE_SSL_CONNECT_ERROR, S3_RESULT_RETRY }, \
126 { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
127 { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
128 { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
131 * Data structures and associated functions
135 /* (all strings in this struct are freed by s3_free()) */
140 char *swift_account_id;
141 char *swift_access_key;
151 gboolean getting_oauth2_access_token;
152 gboolean getting_swift_2_token;
154 /* attributes for new objects */
155 char *bucket_location;
157 char *server_side_encryption;
161 gboolean use_subdomain;
172 guint64 max_send_speed;
173 guint64 max_recv_speed;
175 /* information from the last request */
177 guint last_response_code;
178 s3_error_code_t last_s3_error_code;
179 CURLcode last_curl_code;
180 guint last_num_retries;
181 void *last_response_body;
182 guint last_response_body_size;
185 time_t time_offset_with_s3;
188 gboolean reuse_connection;
193 s3_write_func write_func;
194 s3_reset_func reset_func;
197 gboolean headers_done;
198 gboolean int_write_done;
200 /* Points to current handle: Added to get hold of s3 offset */
201 struct S3Handle *hdl;
204 /* Callback function to examine headers one-at-a-time
206 * @note this is the same as CURLOPT_HEADERFUNCTION
208 * @param data: The pointer to read data from
209 * @param size: The size of each "element" of the data buffer in bytes
210 * @param nmemb: The number of elements in the data buffer.
211 * So, the buffer's size is size*nmemb bytes.
212 * @param stream: the header_data (an opaque pointer)
214 * @return The number of bytes written to the buffer or
215 * CURL_WRITEFUNC_PAUSE to pause.
216 * If it's the number of bytes written, it should match the buffer size
218 typedef size_t (*s3_header_func)(void *data, size_t size, size_t nmemb, void *stream);
224 /* (see preprocessor magic in s3.h) */
226 static char * s3_error_code_names[] = {
227 #define S3_ERROR(NAME) #NAME
232 /* Convert an s3 error name to an error code. This function
233 * matches strings case-insensitively, and is appropriate for use
234 * on data from the network.
236 * @param s3_error_code: the error name
237 * @returns: the error code (see constants in s3.h)
239 static s3_error_code_t
240 s3_error_code_from_name(char *s3_error_name);
242 /* Convert an s3 error code to a string
244 * @param s3_error_code: the error code to convert
245 * @returns: statically allocated string
248 s3_error_name_from_code(s3_error_code_t s3_error_code);
254 /* result handling is specified by a static array of result_handling structs,
255 * which match based on response_code (from HTTP) and S3 error code. The result
256 * given for the first match is used. 0 acts as a wildcard for both response_code
257 * and s3_error_code. The list is terminated with a struct containing 0 for both
258 * response_code and s3_error_code; the result for that struct is the default
261 * See RESULT_HANDLING_ALWAYS_RETRY for an example.
264 S3_RESULT_RETRY = -1,
267 S3_RESULT_NOTIMPL = 2
270 typedef struct result_handling {
272 s3_error_code_t s3_error_code;
278 * get the access token for OAUTH2
280 static gboolean oauth2_get_access_token(S3Handle *hdl);
282 /* Lookup a result in C{result_handling}.
284 * @param result_handling: array of handling specifications
285 * @param response_code: response code from operation
286 * @param s3_error_code: s3 error code from operation, if any
287 * @param curl_code: the CURL error, if any
288 * @returns: the matching result
291 lookup_result(const result_handling_t *result_handling,
293 s3_error_code_t s3_error_code,
297 * Precompiled regular expressions */
298 static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
299 location_con_regex, date_sync_regex, x_auth_token_regex,
300 x_storage_url_regex, access_token_regex, expires_in_regex,
301 content_type_regex, details_regex, code_regex;
308 /* Check if a string is non-empty
310 * @param str: string to check
311 * @returns: true iff str is non-NULL and not "\0"
313 static gboolean is_non_empty_string(const char *str);
315 /* Construct the URL for an Amazon S3 REST request.
317 * A new string is allocated and returned; it is the responsiblity of the caller.
319 * @param hdl: the S3Handle object
320 * @param service_path: A path to add in the URL, or NULL for none.
321 * @param bucket: the bucket being accessed, or NULL for none
322 * @param key: the key being accessed, or NULL for none
323 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
324 * @param query: the query being accessed (e.g. "acl"), or NULL for none
326 * !use_subdomain: http://host/service_path/bucket/key
327 * use_subdomain : http://bucket.host/service_path/key
335 const char *subresource,
338 /* Create proper authorization headers for an Amazon S3 REST
339 * request to C{headers}.
341 * @note: C{X-Amz} headers (in C{headers}) must
343 * - be in alphabetical order
344 * - have no spaces around the colon
345 * (don't yell at me -- see the Amazon Developer Guide)
347 * @param hdl: the S3Handle object
348 * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
349 * @param bucket: the bucket being accessed, or NULL for none
350 * @param key: the key being accessed, or NULL for none
351 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
352 * @param md5_hash: the MD5 hash of the request body, or NULL for none
354 static struct curl_slist *
355 authenticate_request(S3Handle *hdl,
359 const char *subresource,
360 const char *md5_hash,
361 const char *content_type,
362 const size_t content_length,
363 const char *project_id);
367 /* Interpret the response to an S3 operation, assuming CURL completed its request
368 * successfully. This function fills in the relevant C{hdl->last*} members.
370 * @param hdl: The S3Handle object
371 * @param body: the response body
372 * @param body_len: the length of the response body
373 * @param etag: The response's ETag header
374 * @param content_md5: The hex-encoded MD5 hash of the request body,
375 * which will be checked against the response's ETag header.
376 * If NULL, the header is not checked.
377 * If non-NULL, then the body should have the response headers at its beginnning.
378 * @returns: TRUE if the response should be retried (e.g., network error)
381 interpret_response(S3Handle *hdl,
383 char *curl_error_buffer,
387 const char *content_md5);
389 /* Perform an S3 operation. This function handles all of the details
390 * of retryig requests and so on.
392 * The concepts of bucket and keys are defined by the Amazon S3 API.
393 * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
395 * Individual sub-resources are defined in several places. In the REST API,
396 * they they are represented by a "flag" in the "query string".
397 * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
399 * @param hdl: the S3Handle object
400 * @param verb: the HTTP request method
401 * @param bucket: the bucket to access, or NULL for none
402 * @param key: the key to access, or NULL for none
403 * @param subresource: the "sub-resource" to request (e.g. "acl") or NULL for none
404 * @param query: the query string to send (not including th initial '?'),
406 * @param read_func: the callback for reading data
407 * Will use s3_empty_read_func if NULL is passed in.
408 * @param read_reset_func: the callback for to reset reading data
409 * @param size_func: the callback to get the number of bytes to upload
410 * @param md5_func: the callback to get the MD5 hash of the data to upload
411 * @param read_data: pointer to pass to the above functions
412 * @param write_func: the callback for writing data.
413 * Will use s3_counter_write_func if NULL is passed in.
414 * @param write_reset_func: the callback for to reset writing data
415 * @param write_data: pointer to pass to C{write_func}
416 * @param progress_func: the callback for progress information
417 * @param progress_data: pointer to pass to C{progress_func}
418 * @param result_handling: instructions for handling the results; see above.
419 * @returns: the result specified by result_handling; details of the response
420 * are then available in C{hdl->last*}
423 perform_request(S3Handle *hdl,
427 const char *subresource,
429 const char *content_type,
430 const char *project_id,
431 s3_read_func read_func,
432 s3_reset_func read_reset_func,
433 s3_size_func size_func,
434 s3_md5_func md5_func,
436 s3_write_func write_func,
437 s3_reset_func write_reset_func,
439 s3_progress_func progress_func,
440 gpointer progress_data,
441 const result_handling_t *result_handling);
444 * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
445 * call an external function if one was provided.
448 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream);
451 * a function to reset to our internal buffer
454 s3_internal_reset_func(void * stream);
457 * a CURLOPT_HEADERFUNCTION to save the ETag header only.
460 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream);
463 compile_regexes(void);
465 static gboolean get_openstack_swift_api_v1_setting(S3Handle *hdl);
466 static gboolean get_openstack_swift_api_v2_setting(S3Handle *hdl);
469 * Static function implementations
471 static s3_error_code_t
472 s3_error_code_from_name(char *s3_error_name)
476 if (!s3_error_name) return S3_ERROR_Unknown;
478 /* do a brute-force search through the list, since it's not sorted */
479 for (i = 0; i < S3_ERROR_END; i++) {
480 if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
484 return S3_ERROR_Unknown;
488 s3_error_name_from_code(s3_error_code_t s3_error_code)
490 if (s3_error_code >= S3_ERROR_END)
491 s3_error_code = S3_ERROR_Unknown;
493 return s3_error_code_names[s3_error_code];
497 s3_curl_supports_ssl(void)
499 static int supported = -1;
500 if (supported == -1) {
501 #if defined(CURL_VERSION_SSL)
502 curl_version_info_data *info = curl_version_info(CURLVERSION_NOW);
503 if (info->features & CURL_VERSION_SSL)
516 s3_curl_throttling_compat(void)
518 /* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
519 #if LIBCURL_VERSION_NUM >= 0x070f05
520 curl_version_info_data *info;
522 /* check the runtime version too */
523 info = curl_version_info(CURLVERSION_NOW);
524 return info->version_num >= 0x070f05;
531 lookup_result(const result_handling_t *result_handling,
533 s3_error_code_t s3_error_code,
536 while (result_handling->response_code
537 || result_handling->s3_error_code
538 || result_handling->curl_code) {
539 if ((result_handling->response_code && result_handling->response_code != response_code)
540 || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
541 || (result_handling->curl_code && result_handling->curl_code != curl_code)) {
546 return result_handling->result;
549 /* return the result for the terminator, as the default */
550 return result_handling->result;
557 gint year, month, day, hour, minute, seconds;
560 if (strlen(date) < 19)
564 month = atoi(date+5);
566 hour = atoi(date+11);
567 minute = atoi(date+14);
568 seconds = atoi(date+17);
570 if (*atz == '.') { /* skip decimal seconds */
572 while (*atz >= '0' && *atz <= '9') {
577 #if GLIB_CHECK_VERSION(2,26,0)
578 if (!glib_check_version(2,26,0)) {
583 tz = g_time_zone_new(atz);
584 dt = g_date_time_new(tz, year, month, day, hour, minute, seconds);
585 a = g_date_time_to_unix(dt);
586 g_time_zone_unref(tz);
587 g_date_time_unref(dt);
595 tm.tm_year = year - 1900;
596 tm.tm_mon = month - 1;
606 if (*atz == '-' || *atz == '+') { /* numeric timezone */
610 gint Hour = atoi(atz);
611 gint Min = atoi(atz+4);
618 localtime_r(&t, <t);
622 tm.tm_sec += lt - gt;
625 } else if (*atz == 'Z' && *(atz+1) == '\0') { /* Z timezone */
631 localtime_r(&t, <t);
635 tm.tm_sec += lt - gt;
638 } else { /* named timezone */
656 setenv("TZ", atz, 1);
659 g_snprintf(buf, 100, "%d", (int)a);
660 size = write(fd[1], buf, strlen(buf));
665 size = read(fd[0], buf, 100);
668 waitpid(pid, NULL, 0);
678 is_non_empty_string(const char *str)
680 return str && str[0] != '\0';
688 const char *subresource,
692 char *esc_bucket = NULL, *esc_key = NULL;
694 if ((hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2 ||
695 hdl->s3_api == S3_API_OAUTH2) &&
696 hdl->x_storage_url) {
697 url = g_string_new(hdl->x_storage_url);
698 g_string_append(url, "/");
701 url = g_string_new("http");
703 g_string_append(url, "s");
705 g_string_append(url, "://");
708 if (hdl->use_subdomain && bucket)
709 g_string_append_printf(url, "%s.%s", bucket, hdl->host);
711 g_string_append_printf(url, "%s", hdl->host);
713 if (hdl->service_path) {
714 g_string_append_printf(url, "%s/", hdl->service_path);
716 g_string_append(url, "/");
721 if (!hdl->use_subdomain && bucket) {
722 /* curl_easy_escape addeded in 7.15.4 */
723 #if LIBCURL_VERSION_NUM >= 0x070f04
724 curl_version_info_data *info;
725 /* check the runtime version too */
726 info = curl_version_info(CURLVERSION_NOW);
727 if (info->version_num >= 0x070f04)
728 esc_bucket = curl_easy_escape(hdl->curl, bucket, 0);
730 esc_bucket = curl_escape(bucket, 0);
732 esc_bucket = curl_escape(bucket, 0);
734 if (!esc_bucket) goto cleanup;
735 g_string_append_printf(url, "%s", esc_bucket);
737 g_string_append(url, "/");
738 curl_free(esc_bucket);
742 /* curl_easy_escape addeded in 7.15.4 */
743 #if LIBCURL_VERSION_NUM >= 0x070f04
744 curl_version_info_data *info;
745 /* check the runtime version too */
746 info = curl_version_info(CURLVERSION_NOW);
747 if (info->version_num >= 0x070f04)
748 esc_key = curl_easy_escape(hdl->curl, key, 0);
750 esc_key = curl_escape(key, 0);
752 esc_key = curl_escape(key, 0);
754 if (!esc_key) goto cleanup;
755 g_string_append_printf(url, "%s", esc_key);
759 if (url->str[strlen(url->str)-1] == '/') {
760 g_string_truncate(url, strlen(url->str)-1);
764 if (subresource || query)
765 g_string_append(url, "?");
768 g_string_append(url, subresource);
770 if (subresource && query)
771 g_string_append(url, "&");
774 g_string_append(url, query);
778 return g_string_free(url, FALSE);
781 static struct curl_slist *
782 authenticate_request(S3Handle *hdl,
786 const char *subresource,
787 const char *md5_hash,
788 const char *content_type,
789 const size_t content_length,
790 const char *project_id)
797 GByteArray *md = NULL;
798 char *auth_base64 = NULL;
799 struct curl_slist *headers = NULL;
800 char *esc_bucket = NULL, *esc_key = NULL;
801 GString *auth_string = NULL;
804 static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
805 static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
806 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
808 /* calculate the date */
811 /* sync clock with amazon s3 */
812 t = t + hdl->time_offset_with_s3;
815 if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
817 if (!gmtime_r(&t, &tmp)) perror("localtime");
821 date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
822 wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
823 tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
825 if (hdl->s3_api == S3_API_SWIFT_1) {
827 buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id);
828 headers = curl_slist_append(headers, buf);
830 buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key);
831 headers = curl_slist_append(headers, buf);
834 buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
835 headers = curl_slist_append(headers, buf);
838 } else if (hdl->s3_api == S3_API_SWIFT_2) {
840 buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
841 headers = curl_slist_append(headers, buf);
844 buf = g_strdup_printf("Accept: %s", "application/xml");
845 headers = curl_slist_append(headers, buf);
847 } else if (hdl->s3_api == S3_API_OAUTH2) {
849 buf = g_strdup_printf("Authorization: Bearer %s", hdl->access_token);
850 headers = curl_slist_append(headers, buf);
854 /* Build the string to sign, per the S3 spec.
855 * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
859 auth_string = g_string_new(verb);
860 g_string_append(auth_string, "\n");
862 /* Content-MD5 header */
864 g_string_append(auth_string, md5_hash);
865 g_string_append(auth_string, "\n");
868 g_string_append(auth_string, content_type);
870 g_string_append(auth_string, "\n");
873 g_string_append(auth_string, date);
874 g_string_append(auth_string, "\n");
876 /* CanonicalizedAmzHeaders, sorted lexicographically */
877 if (is_non_empty_string(hdl->user_token)) {
878 g_string_append(auth_string, AMAZON_SECURITY_HEADER);
879 g_string_append(auth_string, ":");
880 g_string_append(auth_string, hdl->user_token);
881 g_string_append(auth_string, ",");
882 g_string_append(auth_string, STS_PRODUCT_TOKEN);
883 g_string_append(auth_string, "\n");
886 if (g_str_equal(verb,"PUT") &&
887 is_non_empty_string(hdl->server_side_encryption)) {
888 g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER);
889 g_string_append(auth_string, ":");
890 g_string_append(auth_string, hdl->server_side_encryption);
891 g_string_append(auth_string, "\n");
894 if (is_non_empty_string(hdl->storage_class)) {
895 g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
896 g_string_append(auth_string, ":");
897 g_string_append(auth_string, hdl->storage_class);
898 g_string_append(auth_string, "\n");
901 /* CanonicalizedResource */
902 if (hdl->service_path) {
903 g_string_append(auth_string, hdl->service_path);
905 g_string_append(auth_string, "/");
907 if (hdl->use_subdomain)
908 g_string_append(auth_string, bucket);
910 esc_bucket = curl_escape(bucket, 0);
911 if (!esc_bucket) goto cleanup;
912 g_string_append(auth_string, esc_bucket);
916 if (bucket && (hdl->use_subdomain || key))
917 g_string_append(auth_string, "/");
920 esc_key = curl_escape(key, 0);
921 if (!esc_key) goto cleanup;
922 g_string_append(auth_string, esc_key);
926 g_string_append(auth_string, "?");
927 g_string_append(auth_string, subresource);
930 /* run HMAC-SHA1 on the canonicalized string */
931 md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
933 HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key),
935 HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
936 HMAC_Final(&ctx, md->data, &md->len);
937 HMAC_CTX_cleanup(&ctx);
938 auth_base64 = s3_base64_encode(md);
939 /* append the new headers */
940 if (is_non_empty_string(hdl->user_token)) {
941 /* Devpay headers are included in hash. */
942 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
944 headers = curl_slist_append(headers, buf);
947 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
949 headers = curl_slist_append(headers, buf);
953 if (g_str_equal(verb,"PUT") &&
954 is_non_empty_string(hdl->server_side_encryption)) {
955 buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s",
956 hdl->server_side_encryption);
957 headers = curl_slist_append(headers, buf);
961 if (is_non_empty_string(hdl->storage_class)) {
962 buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s",
964 headers = curl_slist_append(headers, buf);
968 buf = g_strdup_printf("Authorization: AWS %s:%s",
969 hdl->access_key, auth_base64);
970 headers = curl_slist_append(headers, buf);
974 if (md5_hash && '\0' != md5_hash[0]) {
975 buf = g_strdup_printf("Content-MD5: %s", md5_hash);
976 headers = curl_slist_append(headers, buf);
979 if (content_length > 0) {
980 buf = g_strdup_printf("Content-Length: %zu", content_length);
981 headers = curl_slist_append(headers, buf);
986 buf = g_strdup_printf("Content-Type: %s", content_type);
987 headers = curl_slist_append(headers, buf);
991 if (hdl->s3_api == S3_API_OAUTH2) {
992 buf = g_strdup_printf("x-goog-api-version: 2");
993 headers = curl_slist_append(headers, buf);
997 if (project_id && hdl->s3_api == S3_API_OAUTH2) {
998 buf = g_strdup_printf("x-goog-project-id: %s", project_id);
999 headers = curl_slist_append(headers, buf);
1003 buf = g_strdup_printf("Date: %s", date);
1004 headers = curl_slist_append(headers, buf);
1011 if (md) g_byte_array_free(md, TRUE);
1012 g_free(auth_base64);
1013 if (auth_string) g_string_free(auth_string, TRUE);
1018 /* Functions for a SAX parser to parse the XML failure from Amazon */
1020 /* Private structure for our "thunk", which tracks where the user is in the list
1022 struct failure_thunk {
1028 gboolean in_message;
1029 gboolean in_details;
1032 gboolean in_serviceCatalog;
1033 gboolean in_service;
1034 gboolean in_endpoint;
1044 gchar *service_type;
1045 gchar *service_public_url;
1050 failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
1051 const gchar *element_name,
1052 const gchar **attribute_names,
1053 const gchar **attribute_values,
1055 GError **error G_GNUC_UNUSED)
1057 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1058 const gchar **att_name, **att_value;
1060 if (g_ascii_strcasecmp(element_name, "title") == 0) {
1061 thunk->in_title = 1;
1062 thunk->in_others = 0;
1063 thunk->want_text = 1;
1064 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
1066 thunk->in_others = 0;
1067 thunk->want_text = 1;
1068 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
1070 thunk->in_others = 0;
1071 thunk->want_text = 1;
1072 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
1073 thunk->in_message = 1;
1074 thunk->in_others = 0;
1075 thunk->want_text = 1;
1076 } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
1077 thunk->in_details = 1;
1078 thunk->in_others = 0;
1079 thunk->want_text = 1;
1080 } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
1081 thunk->in_access = 1;
1082 thunk->in_others = 0;
1083 } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
1084 thunk->in_token = 1;
1085 thunk->in_others = 0;
1086 for (att_name=attribute_names, att_value=attribute_values;
1088 att_name++, att_value++) {
1089 if (g_str_equal(*att_name, "id")) {
1090 thunk->token_id = g_strdup(*att_value);
1092 if (g_str_equal(*att_name, "expires") && strlen(*att_value) >= 19) {
1093 thunk->expires = rfc3339_date(*att_value) - 600;
1096 } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
1097 thunk->in_serviceCatalog = 1;
1098 thunk->in_others = 0;
1099 } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
1100 thunk->in_service = 1;
1101 thunk->in_others = 0;
1102 for (att_name=attribute_names, att_value=attribute_values;
1104 att_name++, att_value++) {
1105 if (g_str_equal(*att_name, "type")) {
1106 thunk->service_type = g_strdup(*att_value);
1109 } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
1110 thunk->in_endpoint = 1;
1111 thunk->in_others = 0;
1112 if (thunk->service_type &&
1113 g_str_equal(thunk->service_type, "object-store")) {
1114 for (att_name=attribute_names, att_value=attribute_values;
1116 att_name++, att_value++) {
1117 if (g_str_equal(*att_name, "publicURL")) {
1118 thunk->service_public_url = g_strdup(*att_value);
1122 } else if (g_ascii_strcasecmp(element_name, "error") == 0) {
1123 for (att_name=attribute_names, att_value=attribute_values;
1125 att_name++, att_value++) {
1126 if (g_str_equal(*att_name, "message")) {
1127 thunk->message = g_strdup(*att_value);
1136 failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
1137 const gchar *element_name,
1139 GError **error G_GNUC_UNUSED)
1141 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1143 if (g_ascii_strcasecmp(element_name, "title") == 0) {
1144 char *p = strchr(thunk->text, ' ');
1148 thunk->error_name = g_strdup(p);
1151 g_free(thunk->text);
1153 thunk->in_title = 0;
1154 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
1155 thunk->message = thunk->text;
1156 g_strstrip(thunk->message);
1159 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
1160 thunk->error_name = thunk->text;
1163 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
1164 thunk->message = thunk->text;
1166 thunk->in_message = 0;
1167 } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
1168 thunk->details = thunk->text;
1170 thunk->in_details = 0;
1171 } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
1172 thunk->message = thunk->text;
1174 thunk->in_access = 0;
1175 } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
1176 thunk->message = thunk->text;
1178 thunk->in_token = 0;
1179 } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
1180 thunk->message = thunk->text;
1182 thunk->in_serviceCatalog = 0;
1183 } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
1184 thunk->message = thunk->text;
1186 g_free(thunk->service_type);
1187 thunk->service_type = NULL;
1188 thunk->in_service = 0;
1189 } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
1190 thunk->message = thunk->text;
1192 thunk->in_endpoint = 0;
1199 failure_text(GMarkupParseContext *context G_GNUC_UNUSED,
1203 GError **error G_GNUC_UNUSED)
1205 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1207 if (thunk->want_text && thunk->in_others == 0) {
1210 new_text = g_strndup(text, text_len);
1212 strappend(thunk->text, new_text);
1215 thunk->text = new_text;
1221 interpret_response(S3Handle *hdl,
1223 char *curl_error_buffer,
1227 const char *content_md5)
1229 long response_code = 0;
1230 gboolean ret = TRUE;
1231 struct failure_thunk thunk;
1232 GMarkupParseContext *ctxt = NULL;
1233 static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL };
1236 if (!hdl) return FALSE;
1238 if (hdl->last_message) g_free(hdl->last_message);
1239 hdl->last_message = NULL;
1241 /* bail out from a CURL error */
1242 if (curl_code != CURLE_OK) {
1243 hdl->last_curl_code = curl_code;
1244 hdl->last_message = g_strdup_printf("CURL error: %s", curl_error_buffer);
1248 /* CURL seems to think things were OK, so get its response code */
1249 curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
1250 hdl->last_response_code = response_code;
1252 /* check ETag, if present */
1253 if (etag && content_md5 && 200 == response_code) {
1254 if (etag && g_ascii_strcasecmp(etag, content_md5))
1255 hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
1261 /* Now look at the body to try to get the actual Amazon error message. */
1263 /* impose a reasonable limit on body size */
1264 if (body_len > MAX_ERROR_RESPONSE_LEN) {
1265 hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
1267 } else if (!body || body_len == 0) {
1268 if (response_code < 100 || response_code >= 400) {
1270 g_strdup("S3 Error: Unknown (empty response body)");
1271 return TRUE; /* perhaps a network error; retry the request */
1273 /* 2xx and 3xx codes without body are good result */
1274 hdl->last_s3_error_code = S3_ERROR_None;
1279 thunk.in_title = FALSE;
1280 thunk.in_body = FALSE;
1281 thunk.in_code = FALSE;
1282 thunk.in_message = FALSE;
1283 thunk.in_details = FALSE;
1284 thunk.in_access = FALSE;
1285 thunk.in_token = FALSE;
1286 thunk.in_serviceCatalog = FALSE;
1287 thunk.in_service = FALSE;
1288 thunk.in_endpoint = FALSE;
1289 thunk.in_others = 0;
1291 thunk.want_text = FALSE;
1293 thunk.message = NULL;
1294 thunk.details = NULL;
1295 thunk.error_name = NULL;
1296 thunk.token_id = NULL;
1297 thunk.service_type = NULL;
1298 thunk.service_public_url = NULL;
1301 if ((hdl->s3_api == S3_API_SWIFT_1 ||
1302 hdl->s3_api == S3_API_SWIFT_2) &&
1303 hdl->content_type &&
1304 (g_str_equal(hdl->content_type, "text/html") ||
1305 g_str_equal(hdl->content_type, "text/plain"))) {
1307 char *body_copy = g_strndup(body, body_len);
1308 char *b = body_copy;
1309 char *p = strchr(b, '\n');
1311 if (p) { /* first line: error code */
1314 p1 = strchr(b, ' ');
1318 thunk.error_name = g_strdup(p1);
1323 p = strchr(b, '\n');
1324 if (p) { /* second line: error message */
1327 thunk.message = g_strdup(p);
1328 g_strstrip(thunk.message);
1332 } else if ((hdl->s3_api == S3_API_SWIFT_1 ||
1333 hdl->s3_api == S3_API_SWIFT_2) &&
1334 hdl->content_type &&
1335 g_str_equal(hdl->content_type, "application/json")) {
1336 char *body_copy = g_strndup(body, body_len);
1338 char *details = NULL;
1339 regmatch_t pmatch[2];
1341 if (!s3_regexec_wrap(&code_regex, body_copy, 2, pmatch, 0)) {
1342 code = find_regex_substring(body_copy, pmatch[1]);
1344 if (!s3_regexec_wrap(&details_regex, body_copy, 2, pmatch, 0)) {
1345 details = find_regex_substring(body_copy, pmatch[1]);
1347 if (code && details) {
1348 hdl->last_message = g_strdup_printf("%s (%s)", details, code);
1350 hdl->last_message = g_strdup_printf("(%s)", code);
1351 } else if (details) {
1352 hdl->last_message = g_strdup_printf("%s", details);
1354 hdl->last_message = NULL;
1360 } else if (!hdl->content_type ||
1361 !g_str_equal(hdl->content_type, "application/xml")) {
1365 /* run the parser over it */
1366 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
1367 if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) {
1368 if (hdl->last_message) g_free(hdl->last_message);
1369 hdl->last_message = g_strdup(err->message);
1373 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
1374 if (hdl->last_message) g_free(hdl->last_message);
1375 hdl->last_message = g_strdup(err->message);
1379 g_markup_parse_context_free(ctxt);
1382 if (hdl->s3_api == S3_API_SWIFT_2) {
1383 if (!hdl->x_auth_token && thunk.token_id) {
1384 hdl->x_auth_token = thunk.token_id;
1385 thunk.token_id = NULL;
1387 if (!hdl->x_storage_url && thunk.service_public_url) {
1388 hdl->x_storage_url = thunk.service_public_url;
1389 thunk.service_public_url = NULL;
1393 if (thunk.expires > 0) {
1394 hdl->expires = thunk.expires;
1397 if (thunk.error_name) {
1398 hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name);
1399 g_free(thunk.error_name);
1400 thunk.error_name = NULL;
1403 if (thunk.message) {
1404 g_free(hdl->last_message);
1405 if (thunk.details) {
1406 hdl->last_message = g_strdup_printf("%s: %s", thunk.message,
1408 amfree(thunk.message);
1409 amfree(thunk.details);
1411 hdl->last_message = thunk.message;
1412 thunk.message = NULL; /* steal the reference to the string */
1418 g_free(thunk.message);
1419 g_free(thunk.error_name);
1420 g_free(thunk.token_id);
1421 g_free(thunk.service_public_url);
1422 g_free(thunk.service_type);
1426 /* a CURLOPT_READFUNCTION to read data from a buffer. */
1428 s3_buffer_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1430 CurlBuffer *data = stream;
1431 guint bytes_desired = (guint) size * nmemb;
1433 /* check the number of bytes remaining, just to be safe */
1434 if (bytes_desired > data->buffer_len - data->buffer_pos)
1435 bytes_desired = data->buffer_len - data->buffer_pos;
1437 memcpy((char *)ptr, data->buffer + data->buffer_pos, bytes_desired);
1438 data->buffer_pos += bytes_desired;
1440 return bytes_desired;
1444 s3_buffer_size_func(void *stream)
1446 CurlBuffer *data = stream;
1447 return data->buffer_len;
1451 s3_buffer_md5_func(void *stream)
1453 CurlBuffer *data = stream;
1454 GByteArray req_body_gba = {(guint8 *)data->buffer, data->buffer_len};
1456 return s3_compute_md5_hash(&req_body_gba);
1460 s3_buffer_reset_func(void *stream)
1462 CurlBuffer *data = stream;
1463 data->buffer_pos = 0;
1466 /* a CURLOPT_WRITEFUNCTION to write data to a buffer. */
1468 s3_buffer_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1470 CurlBuffer * data = stream;
1471 guint new_bytes = (guint) size * nmemb;
1472 guint bytes_needed = data->buffer_pos + new_bytes;
1474 /* error out if the new size is greater than the maximum allowed */
1475 if (data->max_buffer_size && bytes_needed > data->max_buffer_size)
1478 /* reallocate if necessary. We use exponential sizing to make this
1479 * happen less often. */
1480 if (bytes_needed > data->buffer_len) {
1481 guint new_size = MAX(bytes_needed, data->buffer_len * 2);
1482 if (data->max_buffer_size) {
1483 new_size = MIN(new_size, data->max_buffer_size);
1485 data->buffer = g_realloc(data->buffer, new_size);
1486 data->buffer_len = new_size;
1489 return 0; /* returning zero signals an error to libcurl */
1491 /* actually copy the data to the buffer */
1492 memcpy(data->buffer + data->buffer_pos, ptr, new_bytes);
1493 data->buffer_pos += new_bytes;
1495 /* signal success to curl */
1499 /* a CURLOPT_READFUNCTION that writes nothing. */
1501 s3_empty_read_func(G_GNUC_UNUSED void *ptr, G_GNUC_UNUSED size_t size, G_GNUC_UNUSED size_t nmemb, G_GNUC_UNUSED void * stream)
1507 s3_empty_size_func(G_GNUC_UNUSED void *stream)
1513 s3_empty_md5_func(G_GNUC_UNUSED void *stream)
1515 static const GByteArray empty = {(guint8 *) "", 0};
1517 return s3_compute_md5_hash(&empty);
1520 /* a CURLOPT_WRITEFUNCTION to write data that just counts data.
1521 * s3_write_data should be NULL or a pointer to an gint64.
1524 s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
1526 gint64 *count = (gint64*) stream, inc = nmemb*size;
1528 if (count) *count += inc;
1533 s3_counter_reset_func(void *stream)
1535 gint64 *count = (gint64*) stream;
1537 if (count) *count = 0;
1541 /* a CURLOPT_READFUNCTION to read data from a file. */
1543 s3_file_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1545 HANDLE *hFile = (HANDLE *) stream;
1548 ReadFile(hFile, ptr, (DWORD) size*nmemb, &bytes_read, NULL);
1553 s3_file_size_func(void *stream)
1555 HANDLE *hFile = (HANDLE *) stream;
1556 DWORD size = GetFileSize(hFile, NULL);
1558 if (INVALID_FILE_SIZE == size) {
1566 s3_file_md5_func(void *stream)
1568 #define S3_MD5_BUF_SIZE (10*1024)
1569 HANDLE *hFile = (HANDLE *) stream;
1570 guint8 buf[S3_MD5_BUF_SIZE];
1573 GByteArray *ret = NULL;
1575 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1577 ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
1578 g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
1581 while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
1582 MD5_Update(&md5_ctx, buf, bytes_read);
1584 MD5_Final(ret->data, &md5_ctx);
1586 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1588 #undef S3_MD5_BUF_SIZE
1592 s3_file_reset_func(void *stream)
1594 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1597 /* a CURLOPT_WRITEFUNCTION to write data to a file. */
1599 s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1601 HANDLE *hFile = (HANDLE *) stream;
1602 DWORD bytes_written;
1604 WriteFile(hFile, ptr, (DWORD) size*nmemb, &bytes_written, NULL);
1605 return bytes_written;
1610 curl_debug_message(CURL *curl G_GNUC_UNUSED,
1614 void *unused G_GNUC_UNUSED)
1618 char **lines, **line;
1626 case CURLINFO_HEADER_IN:
1627 lineprefix="Hdr In: ";
1630 case CURLINFO_HEADER_OUT:
1631 lineprefix="Hdr Out: ";
1634 case CURLINFO_DATA_IN:
1635 if (len > 3000) return 0;
1636 for (i=0;i<len;i++) {
1637 if (!g_ascii_isprint(s[i])) {
1641 lineprefix="Data In: ";
1644 case CURLINFO_DATA_OUT:
1645 if (len > 3000) return 0;
1646 for (i=0;i<len;i++) {
1647 if (!g_ascii_isprint(s[i])) {
1651 lineprefix="Data Out: ";
1655 /* ignore data in/out -- nobody wants to see that in the
1660 /* split the input into lines */
1661 message = g_strndup(s, (gsize) len);
1662 lines = g_strsplit(message, "\n", -1);
1665 for (line = lines; *line; line++) {
1666 if (**line == '\0') continue; /* skip blank lines */
1667 g_debug("%s%s", lineprefix, *line);
1675 perform_request(S3Handle *hdl,
1679 const char *subresource,
1681 const char *content_type,
1682 const char *project_id,
1683 s3_read_func read_func,
1684 s3_reset_func read_reset_func,
1685 s3_size_func size_func,
1686 s3_md5_func md5_func,
1688 s3_write_func write_func,
1689 s3_reset_func write_reset_func,
1690 gpointer write_data,
1691 s3_progress_func progress_func,
1692 gpointer progress_data,
1693 const result_handling_t *result_handling)
1696 s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
1697 CURLcode curl_code = CURLE_OK;
1698 char curl_error_buffer[CURL_ERROR_SIZE] = "";
1699 struct curl_slist *headers = NULL;
1700 /* Set S3Internal Data */
1701 S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
1702 gboolean should_retry;
1704 gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
1705 /* corresponds to PUT, HEAD, GET, and POST */
1706 int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
1707 /* do we want to examine the headers */
1708 const char *curlopt_customrequest = NULL;
1709 /* for MD5 calculation */
1710 GByteArray *md5_hash = NULL;
1711 gchar *md5_hash_hex = NULL, *md5_hash_b64 = NULL;
1712 size_t request_body_size = 0;
1714 g_assert(hdl != NULL && hdl->curl != NULL);
1716 if (hdl->s3_api == S3_API_OAUTH2 && !hdl->getting_oauth2_access_token &&
1717 (!hdl->access_token || hdl->expires < time(NULL))) {
1718 result = oauth2_get_access_token(hdl);
1720 g_debug("oauth2_get_access_token returned %d", result);
1723 } else if (hdl->s3_api == S3_API_SWIFT_2 && !hdl->getting_swift_2_token &&
1724 (!hdl->x_auth_token || hdl->expires < time(NULL))) {
1725 result = get_openstack_swift_api_v2_setting(hdl);
1727 g_debug("get_openstack_swift_api_v2_setting returned %d", result);
1734 url = build_url(hdl, bucket, key, subresource, query);
1735 if (!url) goto cleanup;
1737 /* libcurl may behave strangely if these are not set correctly */
1738 if (!strncmp(verb, "PUT", 4)) {
1740 } else if (!strncmp(verb, "GET", 4)) {
1741 curlopt_httpget = 1;
1742 } else if (!strncmp(verb, "POST", 5)) {
1744 } else if (!strncmp(verb, "HEAD", 5)) {
1747 curlopt_customrequest = verb;
1751 request_body_size = size_func(read_data);
1755 md5_hash = md5_func(read_data);
1757 md5_hash_b64 = s3_base64_encode(md5_hash);
1758 md5_hash_hex = s3_hex_encode(md5_hash);
1759 g_byte_array_free(md5_hash, TRUE);
1763 /* Curl will use fread() otherwise */
1764 read_func = s3_empty_read_func;
1768 int_writedata.write_func = write_func;
1769 int_writedata.reset_func = write_reset_func;
1770 int_writedata.write_data = write_data;
1772 /* Curl will use fwrite() otherwise */
1773 int_writedata.write_func = s3_counter_write_func;
1774 int_writedata.reset_func = s3_counter_reset_func;
1775 int_writedata.write_data = NULL;
1781 curl_slist_free_all(headers);
1783 curl_error_buffer[0] = '\0';
1784 if (read_reset_func) {
1785 read_reset_func(read_data);
1787 /* calls write_reset_func */
1788 s3_internal_reset_func(&int_writedata);
1790 /* set up the request */
1791 headers = authenticate_request(hdl, verb, bucket, key, subresource,
1792 md5_hash_b64, content_type, request_body_size, project_id);
1795 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
1799 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
1802 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
1803 curl_debug_message)))
1806 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_ERRORBUFFER,
1807 curl_error_buffer)))
1809 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS, 1)))
1811 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1)))
1813 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_URL, url)))
1815 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
1818 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
1820 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
1822 /* Note: we always have to set this apparently, for consistent "end of header" detection */
1823 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
1825 /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
1826 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
1828 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
1830 if (progress_func) {
1831 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS,0)))
1834 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
1837 /* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
1838 #if LIBCURL_VERSION_NUM >= 0x070b00
1839 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
1842 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
1845 /* CURLOPT_POSTFIELDSIZE_LARGE added in 7.11.1 */
1846 #if LIBCURL_VERSION_NUM >= 0x070b01
1847 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)request_body_size)))
1850 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE, (long)request_body_size)))
1854 /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
1855 #if LIBCURL_VERSION_NUM >= 0x070f05
1856 if (s3_curl_throttling_compat()) {
1857 if (hdl->max_send_speed)
1858 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
1861 if (hdl->max_recv_speed)
1862 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
1867 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
1869 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_UPLOAD, curlopt_upload)))
1871 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POST, curlopt_post)))
1873 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOBODY, curlopt_nobody)))
1875 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CUSTOMREQUEST,
1876 curlopt_customrequest)))
1880 if (curlopt_upload || curlopt_post) {
1881 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
1883 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
1886 /* Clear request_body options. */
1887 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
1890 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
1895 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROXY,
1900 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FRESH_CONNECT,
1901 (long)(hdl->reuse_connection? 0 : 1)))) {
1904 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FORBID_REUSE,
1905 (long)(hdl->reuse_connection? 0 : 1)))) {
1909 /* Perform the request */
1910 curl_code = curl_easy_perform(hdl->curl);
1913 /* interpret the response into hdl->last* */
1914 curl_error: /* (label for short-circuiting the curl_easy_perform call) */
1915 should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
1916 int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
1918 if (hdl->s3_api == S3_API_OAUTH2 &&
1919 hdl->last_response_code == 401 &&
1920 hdl->last_s3_error_code == S3_ERROR_AuthenticationRequired) {
1921 should_retry = oauth2_get_access_token(hdl);
1923 /* and, unless we know we need to retry, see what we're to do now */
1924 if (!should_retry) {
1925 result = lookup_result(result_handling, hdl->last_response_code,
1926 hdl->last_s3_error_code, hdl->last_curl_code);
1928 /* break out of the while(1) unless we're retrying */
1929 if (result != S3_RESULT_RETRY)
1933 if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
1934 /* we're out of retries, so annotate hdl->last_message appropriately and bail
1936 char *m = g_strdup_printf("Too many retries; last message was '%s'", hdl->last_message);
1937 if (hdl->last_message) g_free(hdl->last_message);
1938 hdl->last_message = m;
1939 result = S3_RESULT_FAIL;
1945 backoff *= EXPONENTIAL_BACKOFF_BASE;
1948 if (result != S3_RESULT_OK) {
1949 g_debug(_("%s %s failed with %d/%s"), verb, url,
1950 hdl->last_response_code,
1951 s3_error_name_from_code(hdl->last_s3_error_code));
1956 if (headers) curl_slist_free_all(headers);
1957 g_free(md5_hash_b64);
1958 g_free(md5_hash_hex);
1960 /* we don't deallocate the response body -- we keep it for later */
1961 hdl->last_response_body = int_writedata.resp_buf.buffer;
1962 hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
1963 hdl->last_num_retries = retries;
1970 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream)
1972 S3InternalData *data = (S3InternalData *) stream;
1975 if (!data->headers_done)
1978 /* call write on internal buffer (if not full) */
1979 if (data->int_write_done) {
1982 bytes_saved = s3_buffer_write_func(ptr, size, nmemb, &data->resp_buf);
1984 data->int_write_done = TRUE;
1987 /* call write on user buffer */
1988 if (data->write_func) {
1989 return data->write_func(ptr, size, nmemb, data->write_data);
1996 s3_internal_reset_func(void * stream)
1998 S3InternalData *data = (S3InternalData *) stream;
2000 s3_buffer_reset_func(&data->resp_buf);
2001 data->headers_done = FALSE;
2002 data->int_write_done = FALSE;
2004 if (data->reset_func) {
2005 data->reset_func(data->write_data);
2010 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
2012 static const char *final_header = "\r\n";
2013 time_t remote_time_in_sec,local_time;
2015 regmatch_t pmatch[2];
2016 S3InternalData *data = (S3InternalData *) stream;
2018 header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
2020 if (header[strlen(header)-1] == '\n')
2021 header[strlen(header)-1] = '\0';
2022 if (header[strlen(header)-1] == '\r')
2023 header[strlen(header)-1] = '\0';
2024 if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
2025 data->etag = find_regex_substring(header, pmatch[1]);
2026 if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0))
2027 data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]);
2029 if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0))
2030 data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]);
2032 if (!s3_regexec_wrap(&content_type_regex, header, 2, pmatch, 0))
2033 data->hdl->content_type = find_regex_substring(header, pmatch[1]);
2035 if (strlen(header) == 0)
2036 data->headers_done = TRUE;
2037 if (g_str_equal(final_header, header))
2038 data->headers_done = TRUE;
2039 if (g_str_equal("\n", header))
2040 data->headers_done = TRUE;
2042 /* If date header is found */
2043 if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
2044 char *date = find_regex_substring(header, pmatch[1]);
2046 /* Remote time is always in GMT: RFC 2616 */
2047 /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
2048 if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
2049 g_debug("Error: Conversion of remote time to seconds failed.");
2050 data->hdl->time_offset_with_s3 = 0;
2052 local_time = time(NULL);
2054 data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
2056 if (data->hdl->verbose)
2057 g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
2068 compile_regexes(void)
2072 /* using POSIX regular expressions */
2073 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
2074 {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
2075 {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
2076 {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex},
2077 {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex},
2078 {"^Content-Type:[[:space:]]*([^ ;]+).*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &content_type_regex},
2079 {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
2080 {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
2081 {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
2082 {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
2083 {"\"access_token\" : \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &access_token_regex},
2084 {"\"expires_in\" : (.*)", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &expires_in_regex},
2085 {"\"details\": \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &details_regex},
2086 {"\"code\": (.*),", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &code_regex},
2089 char regmessage[1024];
2093 for (i = 0; regexes[i].str; i++) {
2094 reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
2095 if (reg_result != 0) {
2096 regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
2097 g_error(_("Regex error: %s"), regmessage);
2101 #else /* ! HAVE_REGEX_H */
2102 /* using PCRE via GLib */
2103 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
2104 {"<Code>\\s*([^<]*)\\s*</Code>",
2105 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2107 {"^ETag:\\s*\"([^\"]+)\"\\s*$",
2108 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2110 {"^X-Auth-Token:\\s*([^ ]+)\\s*$",
2111 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2112 &x_auth_token_regex},
2113 {"^X-Storage-Url:\\s*([^ ]+)\\s*$",
2114 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2115 &x_storage_url_regex},
2116 {"^Content-Type:\\s*([^ ]+)\\s*$",
2117 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2118 &content_type_regex},
2119 {"<Message>\\s*([^<]*)\\s*</Message>",
2120 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2122 {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$",
2123 G_REGEX_OPTIMIZE | G_REGEX_NO_AUTO_CAPTURE,
2125 {"(/>)|(>([^<]*)</LocationConstraint>)",
2127 &location_con_regex},
2129 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2131 {"\"access_token\" : \"([^\"]*)\"",
2132 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2133 &access_token_regex},
2134 {"\"expires_n\" : (.*)",
2135 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2137 {"\"details\" : \"([^\"]*)\"",
2138 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2141 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2148 for (i = 0; regexes[i].str; i++) {
2149 *(regexes[i].regex) = g_regex_new(regexes[i].str, regexes[i].flags, 0, &err);
2151 g_error(_("Regex error: %s"), err->message);
2161 * Public function implementations
2164 #if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
2165 # pragma GCC diagnostic push
2166 # pragma GCC diagnostic ignored "-Wmissing-field-initializers"
2168 gboolean s3_init(void)
2170 static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
2171 static gboolean init = FALSE, ret;
2173 /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
2175 g_static_mutex_lock (&mutex);
2177 ret = compile_regexes();
2180 g_static_mutex_unlock(&mutex);
2183 #if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
2184 # pragma GCC diagnostic pop
2188 s3_curl_location_compat(void)
2190 curl_version_info_data *info;
2192 info = curl_version_info(CURLVERSION_NOW);
2193 return info->version_num > 0x070a02;
2197 s3_bucket_location_compat(const char *bucket)
2199 return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
2203 get_openstack_swift_api_v1_setting(
2206 s3_result_t result = S3_RESULT_FAIL;
2207 static result_handling_t result_handling[] = {
2208 { 200, 0, 0, S3_RESULT_OK },
2209 RESULT_HANDLING_ALWAYS_RETRY,
2210 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2214 result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL, NULL, NULL,
2215 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2216 NULL, NULL, result_handling);
2218 return result == S3_RESULT_OK;
2222 get_openstack_swift_api_v2_setting(
2225 s3_result_t result = S3_RESULT_FAIL;
2226 static result_handling_t result_handling[] = {
2227 { 200, 0, 0, S3_RESULT_OK },
2228 RESULT_HANDLING_ALWAYS_RETRY,
2229 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2232 CurlBuffer buf = {NULL, 0, 0, 0};
2233 GString *body = g_string_new("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
2234 if (hdl->username && hdl->password) {
2235 g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://docs.openstack.org/identity/api/v2.0\"");
2237 g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://www.hp.com/identity/api/ext/HP-IDM/v1.0\"");
2240 if (hdl->tenant_id) {
2241 g_string_append_printf(body, " tenantId=\"%s\"", hdl->tenant_id);
2243 if (hdl->tenant_name) {
2244 g_string_append_printf(body, " tenantName=\"%s\"", hdl->tenant_name);
2246 g_string_append(body, ">");
2247 if (hdl->username && hdl->password) {
2248 g_string_append_printf(body, "<passwordCredentials username=\"%s\" password=\"%s\"/>", hdl->username, hdl->password);
2250 g_string_append_printf(body, "<apiAccessKeyCredentials accessKey=\"%s\" secretKey=\"%s\"/>", hdl->access_key, hdl->secret_key);
2252 g_string_append(body, "</auth>");
2254 buf.buffer = g_string_free(body, FALSE);
2255 buf.buffer_len = strlen(buf.buffer);
2257 hdl->getting_swift_2_token = 1;
2258 g_free(hdl->x_storage_url);
2259 hdl->x_storage_url = NULL;
2260 result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
2261 "application/xml", NULL,
2262 S3_BUFFER_READ_FUNCS, &buf,
2264 NULL, NULL, result_handling);
2265 hdl->getting_swift_2_token = 0;
2267 return result == S3_RESULT_OK;
2271 s3_open(const char *access_key,
2272 const char *secret_key,
2273 const char *swift_account_id,
2274 const char *swift_access_key,
2276 const char *service_path,
2277 const gboolean use_subdomain,
2278 const char *user_token,
2279 const char *bucket_location,
2280 const char *storage_class,
2281 const char *ca_info,
2282 const char *server_side_encryption,
2284 const S3_api s3_api,
2285 const char *username,
2286 const char *password,
2287 const char *tenant_id,
2288 const char *tenant_name,
2289 const char *client_id,
2290 const char *client_secret,
2291 const char *refresh_token,
2292 const gboolean reuse_connection)
2296 hdl = g_new0(S3Handle, 1);
2297 if (!hdl) goto error;
2299 hdl->verbose = TRUE;
2300 hdl->use_ssl = s3_curl_supports_ssl();
2301 hdl->reuse_connection = reuse_connection;
2303 if (s3_api == S3_API_S3) {
2304 g_assert(access_key);
2305 hdl->access_key = g_strdup(access_key);
2306 g_assert(secret_key);
2307 hdl->secret_key = g_strdup(secret_key);
2308 } else if (s3_api == S3_API_SWIFT_1) {
2309 g_assert(swift_account_id);
2310 hdl->swift_account_id = g_strdup(swift_account_id);
2311 g_assert(swift_access_key);
2312 hdl->swift_access_key = g_strdup(swift_access_key);
2313 } else if (s3_api == S3_API_SWIFT_2) {
2314 g_assert((username && password) || (access_key && secret_key));
2315 hdl->username = g_strdup(username);
2316 hdl->password = g_strdup(password);
2317 hdl->access_key = g_strdup(access_key);
2318 hdl->secret_key = g_strdup(secret_key);
2319 g_assert(tenant_id || tenant_name);
2320 hdl->tenant_id = g_strdup(tenant_id);
2321 hdl->tenant_name = g_strdup(tenant_name);
2322 } else if (s3_api == S3_API_OAUTH2) {
2323 hdl->client_id = g_strdup(client_id);
2324 hdl->client_secret = g_strdup(client_secret);
2325 hdl->refresh_token = g_strdup(refresh_token);
2329 hdl->user_token = g_strdup(user_token);
2332 hdl->bucket_location = g_strdup(bucket_location);
2335 hdl->storage_class = g_strdup(storage_class);
2338 hdl->server_side_encryption = g_strdup(server_side_encryption);
2341 hdl->proxy = g_strdup(proxy);
2344 hdl->ca_info = g_strdup(ca_info);
2346 if (!is_non_empty_string(host))
2347 host = "s3.amazonaws.com";
2348 hdl->host = g_ascii_strdown(host, -1);
2349 hdl->use_subdomain = use_subdomain ||
2350 (strcmp(hdl->host, "s3.amazonaws.com") == 0 &&
2351 is_non_empty_string(hdl->bucket_location));
2352 hdl->s3_api = s3_api;
2354 if (strlen(service_path) == 0 ||
2355 (strlen(service_path) == 1 && service_path[0] == '/')) {
2356 hdl->service_path = NULL;
2357 } else if (service_path[0] != '/') {
2358 hdl->service_path = g_strdup_printf("/%s", service_path);
2360 hdl->service_path = g_strdup(service_path);
2362 if (hdl->service_path) {
2363 /* remove trailling / */
2364 size_t len = strlen(hdl->service_path) - 1;
2365 if (hdl->service_path[len] == '/')
2366 hdl->service_path[len] = '\0';
2369 hdl->service_path = NULL;
2372 hdl->curl = curl_easy_init();
2373 if (!hdl->curl) goto error;
2386 gboolean ret = TRUE;
2388 /* get the X-Storage-Url and X-Auth-Token */
2389 if (hdl->s3_api == S3_API_SWIFT_1) {
2390 ret = get_openstack_swift_api_v1_setting(hdl);
2391 } else if (hdl->s3_api == S3_API_SWIFT_2) {
2392 ret = get_openstack_swift_api_v2_setting(hdl);
2399 s3_free(S3Handle *hdl)
2404 g_free(hdl->access_key);
2405 g_free(hdl->secret_key);
2406 g_free(hdl->swift_account_id);
2407 g_free(hdl->swift_access_key);
2408 g_free(hdl->content_type);
2409 g_free(hdl->user_token);
2410 g_free(hdl->ca_info);
2412 g_free(hdl->username);
2413 g_free(hdl->password);
2414 g_free(hdl->tenant_id);
2415 g_free(hdl->tenant_name);
2416 g_free(hdl->client_id);
2417 g_free(hdl->client_secret);
2418 g_free(hdl->refresh_token);
2419 g_free(hdl->access_token);
2420 if (hdl->user_token) g_free(hdl->user_token);
2421 if (hdl->bucket_location) g_free(hdl->bucket_location);
2422 if (hdl->storage_class) g_free(hdl->storage_class);
2423 if (hdl->server_side_encryption) g_free(hdl->server_side_encryption);
2424 if (hdl->host) g_free(hdl->host);
2425 if (hdl->service_path) g_free(hdl->service_path);
2426 if (hdl->curl) curl_easy_cleanup(hdl->curl);
2433 s3_reset(S3Handle *hdl)
2436 /* We don't call curl_easy_reset here, because doing that in curl
2437 * < 7.16 blanks the default CA certificate path, and there's no way
2438 * to get it back. */
2439 if (hdl->last_message) {
2440 g_free(hdl->last_message);
2441 hdl->last_message = NULL;
2444 hdl->last_response_code = 0;
2445 hdl->last_curl_code = 0;
2446 hdl->last_s3_error_code = 0;
2447 hdl->last_num_retries = 0;
2449 if (hdl->last_response_body) {
2450 g_free(hdl->last_response_body);
2451 hdl->last_response_body = NULL;
2453 if (hdl->content_type) {
2454 g_free(hdl->content_type);
2455 hdl->content_type = NULL;
2458 hdl->last_response_body_size = 0;
2463 s3_error(S3Handle *hdl,
2464 const char **message,
2465 guint *response_code,
2466 s3_error_code_t *s3_error_code,
2467 const char **s3_error_name,
2468 CURLcode *curl_code,
2472 if (message) *message = hdl->last_message;
2473 if (response_code) *response_code = hdl->last_response_code;
2474 if (s3_error_code) *s3_error_code = hdl->last_s3_error_code;
2475 if (s3_error_name) *s3_error_name = s3_error_name_from_code(hdl->last_s3_error_code);
2476 if (curl_code) *curl_code = hdl->last_curl_code;
2477 if (num_retries) *num_retries = hdl->last_num_retries;
2479 /* no hdl? return something coherent, anyway */
2480 if (message) *message = "NULL S3Handle";
2481 if (response_code) *response_code = 0;
2482 if (s3_error_code) *s3_error_code = 0;
2483 if (s3_error_name) *s3_error_name = NULL;
2484 if (curl_code) *curl_code = 0;
2485 if (num_retries) *num_retries = 0;
2490 s3_verbose(S3Handle *hdl, gboolean verbose)
2492 hdl->verbose = verbose;
2496 s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
2498 if (!s3_curl_throttling_compat())
2501 hdl->max_send_speed = max_send_speed;
2507 s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
2509 if (!s3_curl_throttling_compat())
2512 hdl->max_recv_speed = max_recv_speed;
2518 s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
2520 gboolean ret = TRUE;
2521 if (use_ssl & !s3_curl_supports_ssl()) {
2524 hdl->use_ssl = use_ssl;
2530 s3_strerror(S3Handle *hdl)
2532 const char *message;
2533 guint response_code;
2534 const char *s3_error_name;
2538 char s3_info[256] = "";
2539 char response_info[16] = "";
2540 char curl_info[32] = "";
2541 char retries_info[32] = "";
2543 s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
2546 message = "Unknown S3 error";
2548 g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
2550 g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
2552 g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
2554 g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
2556 return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
2559 /* Perform an upload. When this function returns, KEY and
2560 * BUFFER remain the responsibility of the caller.
2562 * @param self: the s3 device
2563 * @param bucket: the bucket to which the upload should be made
2564 * @param key: the key to which the upload should be made
2565 * @param buffer: the data to be uploaded
2566 * @param buffer_len: the length of the data to upload
2567 * @returns: false if an error ocurred
2570 s3_upload(S3Handle *hdl,
2573 s3_read_func read_func,
2574 s3_reset_func reset_func,
2575 s3_size_func size_func,
2576 s3_md5_func md5_func,
2578 s3_progress_func progress_func,
2579 gpointer progress_data)
2581 s3_result_t result = S3_RESULT_FAIL;
2582 static result_handling_t result_handling[] = {
2583 { 200, 0, 0, S3_RESULT_OK },
2584 { 201, 0, 0, S3_RESULT_OK },
2585 RESULT_HANDLING_ALWAYS_RETRY,
2586 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2589 g_assert(hdl != NULL);
2591 result = perform_request(hdl, "PUT", bucket, key, NULL, NULL, NULL, NULL,
2592 read_func, reset_func, size_func, md5_func, read_data,
2593 NULL, NULL, NULL, progress_func, progress_data,
2596 return result == S3_RESULT_OK;
2600 /* Private structure for our "thunk", which tracks where the user is in the list
2602 struct list_keys_thunk {
2603 GSList *filename_list; /* all pending filenames */
2605 gboolean in_contents; /* look for "key" entities in here */
2606 gboolean in_common_prefixes; /* look for "prefix" entities in here */
2608 gboolean is_truncated;
2618 /* Functions for a SAX parser to parse the XML from Amazon */
2621 list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
2622 const gchar *element_name,
2623 const gchar **attribute_names G_GNUC_UNUSED,
2624 const gchar **attribute_values G_GNUC_UNUSED,
2626 GError **error G_GNUC_UNUSED)
2628 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2630 thunk->want_text = 0;
2631 if (g_ascii_strcasecmp(element_name, "contents") == 0 ||
2632 g_ascii_strcasecmp(element_name, "object") == 0) {
2633 thunk->in_contents = 1;
2634 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2635 thunk->in_common_prefixes = 1;
2636 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2637 thunk->want_text = 1;
2638 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2639 g_ascii_strcasecmp(element_name, "name") == 0) &&
2640 thunk->in_contents) {
2641 thunk->want_text = 1;
2642 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2643 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2644 thunk->in_contents) {
2645 thunk->want_text = 1;
2646 } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
2647 thunk->want_text = 1;
2648 } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
2649 thunk->want_text = 1;
2654 list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
2655 const gchar *element_name,
2657 GError **error G_GNUC_UNUSED)
2659 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2661 if (g_ascii_strcasecmp(element_name, "contents") == 0) {
2662 thunk->in_contents = 0;
2663 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2664 thunk->in_common_prefixes = 0;
2665 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2666 g_ascii_strcasecmp(element_name, "name") == 0) &&
2667 thunk->in_contents) {
2668 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2669 if (thunk->is_truncated) {
2670 if (thunk->next_marker) g_free(thunk->next_marker);
2671 thunk->next_marker = g_strdup(thunk->text);
2674 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2675 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2676 thunk->in_contents) {
2677 thunk->size += g_ascii_strtoull (thunk->text, NULL, 10);
2679 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2680 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2682 } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
2683 if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
2684 thunk->is_truncated = TRUE;
2685 } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
2686 if (thunk->next_marker) g_free(thunk->next_marker);
2687 thunk->next_marker = thunk->text;
2693 list_text(GMarkupParseContext *context G_GNUC_UNUSED,
2697 GError **error G_GNUC_UNUSED)
2699 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2701 if (thunk->want_text) {
2702 if (thunk->text) g_free(thunk->text);
2703 thunk->text = g_strndup(text, text_len);
2707 /* Perform a fetch from S3; several fetches may be involved in a
2708 * single listing operation */
2710 list_fetch(S3Handle *hdl,
2713 const char *delimiter,
2715 const char *max_keys,
2718 s3_result_t result = S3_RESULT_FAIL;
2719 static result_handling_t result_handling[] = {
2720 { 200, 0, 0, S3_RESULT_OK },
2721 { 204, 0, 0, S3_RESULT_OK },
2722 RESULT_HANDLING_ALWAYS_RETRY,
2723 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2725 const char* pos_parts[][2] = {
2727 {"delimiter", delimiter},
2729 {"max-keys", max_keys},
2735 gboolean have_prev_part = FALSE;
2737 /* loop over possible parts to build query string */
2738 query = g_string_new("");
2739 for (i = 0; pos_parts[i][0]; i++) {
2740 if (pos_parts[i][1]) {
2741 const char *keyword;
2743 g_string_append(query, "&");
2745 have_prev_part = TRUE;
2746 esc_value = curl_escape(pos_parts[i][1], 0);
2747 keyword = pos_parts[i][0];
2748 if ((hdl->s3_api == S3_API_SWIFT_1 ||
2749 hdl->s3_api == S3_API_SWIFT_2) &&
2750 strcmp(keyword, "max-keys") == 0) {
2753 g_string_append_printf(query, "%s=%s", keyword, esc_value);
2754 curl_free(esc_value);
2757 if (hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2) {
2759 g_string_append(query, "&");
2760 g_string_append(query, "format=xml");
2763 /* and perform the request on that URI */
2764 result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str, NULL,
2766 NULL, NULL, NULL, NULL, NULL,
2767 S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
2770 if (query) g_string_free(query, TRUE);
2776 s3_list_keys(S3Handle *hdl,
2779 const char *delimiter,
2781 guint64 *total_size)
2784 * max len of XML variables:
2785 * bucket: 255 bytes (p12 API Version 2006-03-01)
2786 * key: 1024 bytes (p15 API Version 2006-03-01)
2787 * size per key: 5GB bytes (p6 API Version 2006-03-01)
2788 * size of size 10 bytes (i.e. 10 decimal digits)
2789 * etag: 44 (observed+assumed)
2790 * owner ID: 64 (observed+assumed)
2791 * owner DisplayName: 255 (assumed)
2792 * StorageClass: const (p18 API Version 2006-03-01)
2794 static const guint MAX_RESPONSE_LEN = 1000*2000;
2795 static const char *MAX_KEYS = "1000";
2796 struct list_keys_thunk thunk;
2797 GMarkupParseContext *ctxt = NULL;
2798 static GMarkupParser parser = { list_start_element, list_end_element, list_text, NULL, NULL };
2800 s3_result_t result = S3_RESULT_FAIL;
2801 CurlBuffer buf = {NULL, 0, 0, MAX_RESPONSE_LEN};
2805 thunk.filename_list = NULL;
2807 thunk.next_marker = NULL;
2810 /* Loop until S3 has given us the entire picture */
2812 s3_buffer_reset_func(&buf);
2813 /* get some data from S3 */
2814 result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
2815 if (result != S3_RESULT_OK) goto cleanup;
2816 if (buf.buffer_pos == 0) goto cleanup; /* no body */
2818 /* run the parser over it */
2819 thunk.in_contents = FALSE;
2820 thunk.in_common_prefixes = FALSE;
2821 thunk.is_truncated = FALSE;
2822 if (thunk.next_marker) g_free(thunk.next_marker);
2823 thunk.next_marker = NULL;
2824 thunk.want_text = FALSE;
2826 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
2828 if (!g_markup_parse_context_parse(ctxt, buf.buffer, buf.buffer_pos, &err)) {
2829 if (hdl->last_message) g_free(hdl->last_message);
2830 hdl->last_message = g_strdup(err->message);
2831 result = S3_RESULT_FAIL;
2835 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
2836 if (hdl->last_message) g_free(hdl->last_message);
2837 hdl->last_message = g_strdup(err->message);
2838 result = S3_RESULT_FAIL;
2842 g_markup_parse_context_free(ctxt);
2844 } while (thunk.next_marker);
2847 if (err) g_error_free(err);
2848 if (thunk.text) g_free(thunk.text);
2849 if (thunk.next_marker) g_free(thunk.next_marker);
2850 if (ctxt) g_markup_parse_context_free(ctxt);
2851 if (buf.buffer) g_free(buf.buffer);
2853 if (result != S3_RESULT_OK) {
2854 g_slist_free(thunk.filename_list);
2857 *list = thunk.filename_list;
2859 *total_size = thunk.size;
2866 s3_read(S3Handle *hdl,
2869 s3_write_func write_func,
2870 s3_reset_func reset_func,
2871 gpointer write_data,
2872 s3_progress_func progress_func,
2873 gpointer progress_data)
2875 s3_result_t result = S3_RESULT_FAIL;
2876 static result_handling_t result_handling[] = {
2877 { 200, 0, 0, S3_RESULT_OK },
2878 RESULT_HANDLING_ALWAYS_RETRY,
2879 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2882 g_assert(hdl != NULL);
2883 g_assert(write_func != NULL);
2885 result = perform_request(hdl, "GET", bucket, key, NULL, NULL, NULL, NULL,
2886 NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
2887 progress_func, progress_data, result_handling);
2889 return result == S3_RESULT_OK;
2893 s3_delete(S3Handle *hdl,
2897 s3_result_t result = S3_RESULT_FAIL;
2898 static result_handling_t result_handling[] = {
2899 { 204, 0, 0, S3_RESULT_OK },
2900 { 404, 0, 0, S3_RESULT_OK },
2901 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
2902 RESULT_HANDLING_ALWAYS_RETRY,
2903 { 409, 0, 0, S3_RESULT_OK },
2904 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2907 g_assert(hdl != NULL);
2909 result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL, NULL, NULL,
2910 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2913 return result == S3_RESULT_OK;
2917 s3_multi_delete(S3Handle *hdl,
2923 s3_result_t result = S3_RESULT_FAIL;
2924 static result_handling_t result_handling[] = {
2925 { 200, 0, 0, S3_RESULT_OK },
2926 { 204, 0, 0, S3_RESULT_OK },
2927 { 400, 0, 0, S3_RESULT_NOTIMPL },
2928 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
2929 RESULT_HANDLING_ALWAYS_RETRY,
2930 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2933 g_assert(hdl != NULL);
2935 query = g_string_new(NULL);
2936 g_string_append(query, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
2937 g_string_append(query, "<Delete>\n");
2938 if (!hdl->verbose) {
2939 g_string_append(query, " <Quiet>true</Quiet>\n");
2941 while (*key != NULL) {
2942 g_string_append(query, " <Object>\n");
2943 g_string_append(query, " <Key>");
2944 g_string_append(query, *key);
2945 g_string_append(query, "</Key>\n");
2946 g_string_append(query, " </Object>\n");
2949 g_string_append(query, "</Delete>\n");
2951 data.buffer_len = query->len;
2952 data.buffer = query->str;
2953 data.buffer_pos = 0;
2954 data.max_buffer_size = data.buffer_len;
2956 result = perform_request(hdl, "POST", bucket, NULL, "delete", NULL,
2957 "application/xml", NULL,
2958 s3_buffer_read_func, s3_buffer_reset_func,
2959 s3_buffer_size_func, s3_buffer_md5_func,
2960 &data, NULL, NULL, NULL, NULL, NULL,
2963 g_string_free(query, TRUE);
2964 if (result == S3_RESULT_OK)
2966 else if (result == S3_RESULT_NOTIMPL)
2973 s3_make_bucket(S3Handle *hdl,
2975 const char *project_id)
2978 s3_result_t result = S3_RESULT_FAIL;
2979 static result_handling_t result_handling[] = {
2980 { 200, 0, 0, S3_RESULT_OK },
2981 { 201, 0, 0, S3_RESULT_OK },
2982 { 202, 0, 0, S3_RESULT_OK },
2983 { 204, 0, 0, S3_RESULT_OK },
2984 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
2985 RESULT_HANDLING_ALWAYS_RETRY,
2986 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2988 regmatch_t pmatch[4];
2989 char *loc_end_open, *loc_content;
2990 CurlBuffer buf = {NULL, 0, 0, 0}, *ptr = NULL;
2991 s3_read_func read_func = NULL;
2992 s3_reset_func reset_func = NULL;
2993 s3_md5_func md5_func = NULL;
2994 s3_size_func size_func = NULL;
2996 g_assert(hdl != NULL);
2998 if (is_non_empty_string(hdl->bucket_location) &&
2999 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
3000 if (s3_bucket_location_compat(bucket)) {
3002 buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE,
3003 g_str_equal(hdl->host, "gss.iijgio.com")?
3004 " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"":
3006 hdl->bucket_location);
3007 buf.buffer_len = (guint) strlen(buf.buffer);
3009 buf.max_buffer_size = buf.buffer_len;
3010 read_func = s3_buffer_read_func;
3011 reset_func = s3_buffer_reset_func;
3012 size_func = s3_buffer_size_func;
3013 md5_func = s3_buffer_md5_func;
3015 hdl->last_message = g_strdup_printf(_(
3016 "Location constraint given for Amazon S3 bucket, "
3017 "but the bucket name (%s) is not usable as a subdomain."), bucket);
3022 result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL, NULL,
3024 read_func, reset_func, size_func, md5_func, ptr,
3025 NULL, NULL, NULL, NULL, NULL, result_handling);
3027 if (result == S3_RESULT_OK ||
3028 (result != S3_RESULT_OK &&
3029 hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
3030 /* verify the that the location constraint on the existing bucket matches
3031 * the one that's configured.
3033 if (is_non_empty_string(hdl->bucket_location)) {
3034 result = perform_request(hdl, "GET", bucket, NULL, "location", NULL, NULL, NULL,
3035 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3036 NULL, NULL, result_handling);
3038 result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL, NULL, NULL,
3039 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3040 NULL, NULL, result_handling);
3043 if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
3044 /* return to the default state of failure */
3045 result = S3_RESULT_FAIL;
3047 if (body) g_free(body);
3048 /* use strndup to get a null-terminated string */
3049 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
3051 hdl->last_message = g_strdup(_("No body received for location request"));
3053 } else if ('\0' == body[0]) {
3054 hdl->last_message = g_strdup(_("Empty body received for location request"));
3058 if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
3059 loc_end_open = find_regex_substring(body, pmatch[1]);
3060 loc_content = find_regex_substring(body, pmatch[3]);
3062 /* The case of an empty string is special because XML allows
3063 * "self-closing" tags
3065 if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
3066 '/' != loc_end_open[0])
3067 hdl->last_message = g_strdup(_("A wildcard location constraint is "
3068 "configured, but the bucket has a non-empty location constraint"));
3069 else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
3070 strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
3071 ('\0' != loc_content[0]))
3072 hdl->last_message = g_strdup(_("The location constraint configured "
3073 "does not match the constraint currently on the bucket"));
3075 result = S3_RESULT_OK;
3077 hdl->last_message = g_strdup(_("Unexpected location response from Amazon S3"));
3083 if (body) g_free(body);
3085 return result == S3_RESULT_OK;
3090 oauth2_get_access_token(
3095 s3_result_t result = S3_RESULT_FAIL;
3096 static result_handling_t result_handling[] = {
3097 { 200, 0, 0, S3_RESULT_OK },
3098 { 204, 0, 0, S3_RESULT_OK },
3099 RESULT_HANDLING_ALWAYS_RETRY,
3100 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3103 regmatch_t pmatch[2];
3105 g_assert(hdl != NULL);
3107 query = g_string_new(NULL);
3108 g_string_append(query, "client_id=");
3109 g_string_append(query, hdl->client_id);
3110 g_string_append(query, "&client_secret=");
3111 g_string_append(query, hdl->client_secret);
3112 g_string_append(query, "&refresh_token=");
3113 g_string_append(query, hdl->refresh_token);
3114 g_string_append(query, "&grant_type=refresh_token");
3116 data.buffer_len = query->len;
3117 data.buffer = query->str;
3118 data.buffer_pos = 0;
3119 data.max_buffer_size = data.buffer_len;
3121 hdl->x_storage_url = "https://accounts.google.com/o/oauth2/token";
3122 hdl->getting_oauth2_access_token = 1;
3123 result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
3124 "application/x-www-form-urlencoded", NULL,
3125 s3_buffer_read_func, s3_buffer_reset_func,
3126 s3_buffer_size_func, s3_buffer_md5_func,
3127 &data, NULL, NULL, NULL, NULL, NULL,
3129 hdl->x_storage_url = NULL;
3130 hdl->getting_oauth2_access_token = 0;
3132 /* use strndup to get a null-terminated string */
3133 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
3135 hdl->last_message = g_strdup(_("No body received for location request"));
3137 } else if ('\0' == body[0]) {
3138 hdl->last_message = g_strdup(_("Empty body received for location request"));
3142 if (!s3_regexec_wrap(&access_token_regex, body, 2, pmatch, 0)) {
3143 hdl->access_token = find_regex_substring(body, pmatch[1]);
3144 hdl->x_auth_token = g_strdup(hdl->access_token);
3146 if (!s3_regexec_wrap(&expires_in_regex, body, 2, pmatch, 0)) {
3147 char *expires_in = find_regex_substring(body, pmatch[1]);
3148 hdl->expires = time(NULL) + atoi(expires_in) - 600;
3154 return result == S3_RESULT_OK;
3158 s3_is_bucket_exists(S3Handle *hdl,
3160 const char *project_id)
3162 s3_result_t result = S3_RESULT_FAIL;
3164 static result_handling_t result_handling[] = {
3165 { 200, 0, 0, S3_RESULT_OK },
3166 { 204, 0, 0, S3_RESULT_OK },
3167 RESULT_HANDLING_ALWAYS_RETRY,
3168 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3171 if (hdl->s3_api == S3_API_SWIFT_1 ||
3172 hdl->s3_api == S3_API_SWIFT_2) {
3175 query = "max-keys=1";
3178 result = perform_request(hdl, "GET", bucket, NULL, NULL, query,
3180 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3181 NULL, NULL, result_handling);
3183 return result == S3_RESULT_OK;
3187 s3_delete_bucket(S3Handle *hdl,
3190 return s3_delete(hdl, bucket, NULL);