2 * Copyright (c) 2008-2012 Zmanda, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
19 * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
23 * - collect speed statistics
28 /* use a relative path here to avoid conflicting with Perl's config.h. */
29 #include "../config/config.h"
37 #ifdef HAVE_SYS_TYPES_H
38 #include <sys/types.h>
40 #ifdef HAVE_SYS_STAT_H
59 #include <curl/curl.h>
61 /* Constant renamed after version 7.10.7 */
62 #ifndef CURLINFO_RESPONSE_CODE
63 #define CURLINFO_RESPONSE_CODE CURLINFO_HTTP_CODE
66 /* We don't need OpenSSL's kerberos support, and it's broken in
68 #define OPENSSL_NO_KRB5
70 #ifdef HAVE_OPENSSL_HMAC_H
71 # include <openssl/hmac.h>
73 # ifdef HAVE_CRYPTO_HMAC_H
74 # include <crypto/hmac.h>
82 #include <openssl/err.h>
83 #include <openssl/ssl.h>
84 #include <openssl/md5.h>
86 /* Maximum key length as specified in the S3 documentation
87 * (*excluding* null terminator) */
88 #define S3_MAX_KEY_LENGTH 1024
90 #define AMAZON_SECURITY_HEADER "x-amz-security-token"
91 #define AMAZON_BUCKET_CONF_TEMPLATE "\
92 <CreateBucketConfiguration%s>\n\
93 <LocationConstraint>%s</LocationConstraint>\n\
94 </CreateBucketConfiguration>"
96 #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
98 #define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption"
100 #define AMAZON_WILDCARD_LOCATION "*"
102 /* parameters for exponential backoff in the face of retriable errors */
105 #define EXPONENTIAL_BACKOFF_START_USEC G_USEC_PER_SEC/100
106 /* double at each retry */
107 #define EXPONENTIAL_BACKOFF_BASE 2
108 /* retry 14 times (for a total of about 3 minutes spent waiting) */
109 #define EXPONENTIAL_BACKOFF_MAX_RETRIES 14
111 /* general "reasonable size" parameters */
112 #define MAX_ERROR_RESPONSE_LEN (100*1024)
114 /* Results which should always be retried */
115 #define RESULT_HANDLING_ALWAYS_RETRY \
116 { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
117 { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \
118 { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
119 { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
120 { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
121 { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
122 { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
123 { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
124 { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
125 { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
126 { 0, 0, CURLE_SSL_CONNECT_ERROR, S3_RESULT_RETRY }, \
127 { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
128 { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
129 { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
132 * Data structures and associated functions
136 /* (all strings in this struct are freed by s3_free()) */
141 char *swift_account_id;
142 char *swift_access_key;
152 gboolean getting_oauth2_access_token;
153 gboolean getting_swift_2_token;
155 /* attributes for new objects */
156 char *bucket_location;
158 char *server_side_encryption;
162 gboolean use_subdomain;
173 guint64 max_send_speed;
174 guint64 max_recv_speed;
176 /* information from the last request */
178 guint last_response_code;
179 s3_error_code_t last_s3_error_code;
180 CURLcode last_curl_code;
181 guint last_num_retries;
182 void *last_response_body;
183 guint last_response_body_size;
186 time_t time_offset_with_s3;
189 gboolean reuse_connection;
198 s3_write_func write_func;
199 s3_reset_func reset_func;
202 gboolean headers_done;
203 gboolean int_write_done;
205 /* Points to current handle: Added to get hold of s3 offset */
206 struct S3Handle *hdl;
209 /* Callback function to examine headers one-at-a-time
211 * @note this is the same as CURLOPT_HEADERFUNCTION
213 * @param data: The pointer to read data from
214 * @param size: The size of each "element" of the data buffer in bytes
215 * @param nmemb: The number of elements in the data buffer.
216 * So, the buffer's size is size*nmemb bytes.
217 * @param stream: the header_data (an opaque pointer)
219 * @return The number of bytes written to the buffer or
220 * CURL_WRITEFUNC_PAUSE to pause.
221 * If it's the number of bytes written, it should match the buffer size
223 typedef size_t (*s3_header_func)(void *data, size_t size, size_t nmemb, void *stream);
229 /* (see preprocessor magic in s3.h) */
231 static char * s3_error_code_names[] = {
232 #define S3_ERROR(NAME) #NAME
237 /* Convert an s3 error name to an error code. This function
238 * matches strings case-insensitively, and is appropriate for use
239 * on data from the network.
241 * @param s3_error_code: the error name
242 * @returns: the error code (see constants in s3.h)
244 static s3_error_code_t
245 s3_error_code_from_name(char *s3_error_name);
247 /* Convert an s3 error code to a string
249 * @param s3_error_code: the error code to convert
250 * @returns: statically allocated string
253 s3_error_name_from_code(s3_error_code_t s3_error_code);
259 /* result handling is specified by a static array of result_handling structs,
260 * which match based on response_code (from HTTP) and S3 error code. The result
261 * given for the first match is used. 0 acts as a wildcard for both response_code
262 * and s3_error_code. The list is terminated with a struct containing 0 for both
263 * response_code and s3_error_code; the result for that struct is the default
266 * See RESULT_HANDLING_ALWAYS_RETRY for an example.
269 S3_RESULT_RETRY = -1,
272 S3_RESULT_NOTIMPL = 2
275 typedef struct result_handling {
277 s3_error_code_t s3_error_code;
283 * get the access token for OAUTH2
285 static gboolean oauth2_get_access_token(S3Handle *hdl);
287 /* Lookup a result in C{result_handling}.
289 * @param result_handling: array of handling specifications
290 * @param response_code: response code from operation
291 * @param s3_error_code: s3 error code from operation, if any
292 * @param curl_code: the CURL error, if any
293 * @returns: the matching result
296 lookup_result(const result_handling_t *result_handling,
298 s3_error_code_t s3_error_code,
302 * Precompiled regular expressions */
303 static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
304 location_con_regex, date_sync_regex, x_auth_token_regex,
305 x_storage_url_regex, access_token_regex, expires_in_regex,
306 content_type_regex, details_regex, code_regex;
313 /* Check if a string is non-empty
315 * @param str: string to check
316 * @returns: true iff str is non-NULL and not "\0"
318 static gboolean is_non_empty_string(const char *str);
320 /* Construct the URL for an Amazon S3 REST request.
322 * A new string is allocated and returned; it is the responsiblity of the caller.
324 * @param hdl: the S3Handle object
325 * @param service_path: A path to add in the URL, or NULL for none.
326 * @param bucket: the bucket being accessed, or NULL for none
327 * @param key: the key being accessed, or NULL for none
328 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
329 * @param query: the query being accessed (e.g. "acl"), or NULL for none
331 * !use_subdomain: http://host/service_path/bucket/key
332 * use_subdomain : http://bucket.host/service_path/key
340 const char *subresource,
343 /* Create proper authorization headers for an Amazon S3 REST
344 * request to C{headers}.
346 * @note: C{X-Amz} headers (in C{headers}) must
348 * - be in alphabetical order
349 * - have no spaces around the colon
350 * (don't yell at me -- see the Amazon Developer Guide)
352 * @param hdl: the S3Handle object
353 * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
354 * @param bucket: the bucket being accessed, or NULL for none
355 * @param key: the key being accessed, or NULL for none
356 * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
357 * @param md5_hash: the MD5 hash of the request body, or NULL for none
359 static struct curl_slist *
360 authenticate_request(S3Handle *hdl,
364 const char *subresource,
365 const char *md5_hash,
366 const char *content_type,
367 const size_t content_length,
368 const char *project_id);
372 /* Interpret the response to an S3 operation, assuming CURL completed its request
373 * successfully. This function fills in the relevant C{hdl->last*} members.
375 * @param hdl: The S3Handle object
376 * @param body: the response body
377 * @param body_len: the length of the response body
378 * @param etag: The response's ETag header
379 * @param content_md5: The hex-encoded MD5 hash of the request body,
380 * which will be checked against the response's ETag header.
381 * If NULL, the header is not checked.
382 * If non-NULL, then the body should have the response headers at its beginnning.
383 * @returns: TRUE if the response should be retried (e.g., network error)
386 interpret_response(S3Handle *hdl,
388 char *curl_error_buffer,
392 const char *content_md5);
394 /* Perform an S3 operation. This function handles all of the details
395 * of retryig requests and so on.
397 * The concepts of bucket and keys are defined by the Amazon S3 API.
398 * See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
400 * Individual sub-resources are defined in several places. In the REST API,
401 * they they are represented by a "flag" in the "query string".
402 * See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
404 * @param hdl: the S3Handle object
405 * @param verb: the HTTP request method
406 * @param bucket: the bucket to access, or NULL for none
407 * @param key: the key to access, or NULL for none
408 * @param subresource: the "sub-resource" to request (e.g. "acl") or NULL for none
409 * @param query: the query string to send (not including th initial '?'),
411 * @param read_func: the callback for reading data
412 * Will use s3_empty_read_func if NULL is passed in.
413 * @param read_reset_func: the callback for to reset reading data
414 * @param size_func: the callback to get the number of bytes to upload
415 * @param md5_func: the callback to get the MD5 hash of the data to upload
416 * @param read_data: pointer to pass to the above functions
417 * @param write_func: the callback for writing data.
418 * Will use s3_counter_write_func if NULL is passed in.
419 * @param write_reset_func: the callback for to reset writing data
420 * @param write_data: pointer to pass to C{write_func}
421 * @param progress_func: the callback for progress information
422 * @param progress_data: pointer to pass to C{progress_func}
423 * @param result_handling: instructions for handling the results; see above.
424 * @returns: the result specified by result_handling; details of the response
425 * are then available in C{hdl->last*}
428 perform_request(S3Handle *hdl,
432 const char *subresource,
434 const char *content_type,
435 const char *project_id,
436 s3_read_func read_func,
437 s3_reset_func read_reset_func,
438 s3_size_func size_func,
439 s3_md5_func md5_func,
441 s3_write_func write_func,
442 s3_reset_func write_reset_func,
444 s3_progress_func progress_func,
445 gpointer progress_data,
446 const result_handling_t *result_handling);
449 * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
450 * call an external function if one was provided.
453 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream);
456 * a function to reset to our internal buffer
459 s3_internal_reset_func(void * stream);
462 * a CURLOPT_HEADERFUNCTION to save the ETag header only.
465 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream);
468 compile_regexes(void);
470 static gboolean get_openstack_swift_api_v1_setting(S3Handle *hdl);
471 static gboolean get_openstack_swift_api_v2_setting(S3Handle *hdl);
474 * Static function implementations
476 static s3_error_code_t
477 s3_error_code_from_name(char *s3_error_name)
481 if (!s3_error_name) return S3_ERROR_Unknown;
483 /* do a brute-force search through the list, since it's not sorted */
484 for (i = 0; i < S3_ERROR_END; i++) {
485 if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
489 return S3_ERROR_Unknown;
493 s3_error_name_from_code(s3_error_code_t s3_error_code)
495 if (s3_error_code >= S3_ERROR_END)
496 s3_error_code = S3_ERROR_Unknown;
498 return s3_error_code_names[s3_error_code];
502 s3_curl_supports_ssl(void)
504 static int supported = -1;
505 if (supported == -1) {
506 #if defined(CURL_VERSION_SSL)
507 curl_version_info_data *info = curl_version_info(CURLVERSION_NOW);
508 if (info->features & CURL_VERSION_SSL)
521 s3_curl_throttling_compat(void)
523 /* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
524 #if LIBCURL_VERSION_NUM >= 0x070f05
525 curl_version_info_data *info;
527 /* check the runtime version too */
528 info = curl_version_info(CURLVERSION_NOW);
529 return info->version_num >= 0x070f05;
536 lookup_result(const result_handling_t *result_handling,
538 s3_error_code_t s3_error_code,
541 while (result_handling->response_code
542 || result_handling->s3_error_code
543 || result_handling->curl_code) {
544 if ((result_handling->response_code && result_handling->response_code != response_code)
545 || (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
546 || (result_handling->curl_code && result_handling->curl_code != curl_code)) {
551 return result_handling->result;
554 /* return the result for the terminator, as the default */
555 return result_handling->result;
562 gint year, month, day, hour, minute, seconds;
565 if (strlen(date) < 19)
569 month = atoi(date+5);
571 hour = atoi(date+11);
572 minute = atoi(date+14);
573 seconds = atoi(date+17);
575 if (*atz == '.') { /* skip decimal seconds */
577 while (*atz >= '0' && *atz <= '9') {
582 #if GLIB_CHECK_VERSION(2,26,0)
583 if (!glib_check_version(2,26,0)) {
588 tz = g_time_zone_new(atz);
589 dt = g_date_time_new(tz, year, month, day, hour, minute, seconds);
590 a = g_date_time_to_unix(dt);
591 g_time_zone_unref(tz);
592 g_date_time_unref(dt);
600 tm.tm_year = year - 1900;
601 tm.tm_mon = month - 1;
611 if (*atz == '-' || *atz == '+') { /* numeric timezone */
615 gint Hour = atoi(atz);
616 gint Min = atoi(atz+4);
623 localtime_r(&t, <t);
627 tm.tm_sec += lt - gt;
630 } else if (*atz == 'Z' && *(atz+1) == '\0') { /* Z timezone */
636 localtime_r(&t, <t);
640 tm.tm_sec += lt - gt;
643 } else { /* named timezone */
661 setenv("TZ", atz, 1);
664 g_snprintf(buf, 100, "%d", (int)a);
665 size = write(fd[1], buf, strlen(buf));
670 size = read(fd[0], buf, 100);
673 waitpid(pid, NULL, 0);
683 is_non_empty_string(const char *str)
685 return str && str[0] != '\0';
693 const char *subresource,
697 char *esc_bucket = NULL, *esc_key = NULL;
699 if ((hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2 ||
700 hdl->s3_api == S3_API_OAUTH2) &&
701 hdl->x_storage_url) {
702 url = g_string_new(hdl->x_storage_url);
703 g_string_append(url, "/");
706 url = g_string_new("http");
708 g_string_append(url, "s");
710 g_string_append(url, "://");
713 if (hdl->use_subdomain && bucket)
714 g_string_append_printf(url, "%s.%s", bucket, hdl->host);
716 g_string_append_printf(url, "%s", hdl->host);
718 if (hdl->service_path) {
719 g_string_append_printf(url, "%s/", hdl->service_path);
721 g_string_append(url, "/");
726 if (!hdl->use_subdomain && bucket) {
727 /* curl_easy_escape addeded in 7.15.4 */
728 #if LIBCURL_VERSION_NUM >= 0x070f04
729 curl_version_info_data *info;
730 /* check the runtime version too */
731 info = curl_version_info(CURLVERSION_NOW);
732 if (info->version_num >= 0x070f04)
733 esc_bucket = curl_easy_escape(hdl->curl, bucket, 0);
735 esc_bucket = curl_escape(bucket, 0);
737 esc_bucket = curl_escape(bucket, 0);
739 if (!esc_bucket) goto cleanup;
740 g_string_append_printf(url, "%s", esc_bucket);
742 g_string_append(url, "/");
743 curl_free(esc_bucket);
747 /* curl_easy_escape addeded in 7.15.4 */
748 #if LIBCURL_VERSION_NUM >= 0x070f04
749 curl_version_info_data *info;
750 /* check the runtime version too */
751 info = curl_version_info(CURLVERSION_NOW);
752 if (info->version_num >= 0x070f04)
753 esc_key = curl_easy_escape(hdl->curl, key, 0);
755 esc_key = curl_escape(key, 0);
757 esc_key = curl_escape(key, 0);
759 if (!esc_key) goto cleanup;
760 g_string_append_printf(url, "%s", esc_key);
764 if (url->str[strlen(url->str)-1] == '/') {
765 g_string_truncate(url, strlen(url->str)-1);
769 if (subresource || query || (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name))
770 g_string_append(url, "?");
773 g_string_append(url, subresource);
775 if (subresource && query)
776 g_string_append(url, "&");
779 g_string_append(url, query);
781 /* add CAStor tenant domain override query arg */
782 if (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name) {
783 if (subresource || query) {
784 g_string_append(url, "&");
786 g_string_append_printf(url, "domain=%s", hdl->tenant_name);
791 return g_string_free(url, FALSE);
794 static struct curl_slist *
795 authenticate_request(S3Handle *hdl,
799 const char *subresource,
800 const char *md5_hash,
801 const char *content_type,
802 const size_t content_length,
803 const char *project_id)
810 GByteArray *md = NULL;
811 char *auth_base64 = NULL;
812 struct curl_slist *headers = NULL;
813 char *esc_bucket = NULL, *esc_key = NULL;
814 GString *auth_string = NULL;
818 static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
819 static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
820 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
822 /* calculate the date */
825 /* sync clock with amazon s3 */
826 t = t + hdl->time_offset_with_s3;
829 if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
831 if (!gmtime_r(&t, &tmp)) perror("localtime");
835 date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
836 wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
837 tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
839 if (hdl->s3_api == S3_API_SWIFT_1) {
841 buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id);
842 headers = curl_slist_append(headers, buf);
844 buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key);
845 headers = curl_slist_append(headers, buf);
848 buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
849 headers = curl_slist_append(headers, buf);
852 } else if (hdl->s3_api == S3_API_SWIFT_2) {
854 buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
855 headers = curl_slist_append(headers, buf);
858 buf = g_strdup_printf("Accept: %s", "application/xml");
859 headers = curl_slist_append(headers, buf);
861 } else if (hdl->s3_api == S3_API_OAUTH2) {
863 buf = g_strdup_printf("Authorization: Bearer %s", hdl->access_token);
864 headers = curl_slist_append(headers, buf);
867 } else if (hdl->s3_api == S3_API_CASTOR) {
868 if (g_str_equal(verb, "PUT") || g_str_equal(verb, "POST")) {
870 buf = g_strdup("CAStor-Application: Amanda");
871 headers = curl_slist_append(headers, buf);
873 reps = g_strdup(hdl->reps); /* object replication level */
875 reps = g_strdup(hdl->reps_bucket); /* bucket replication level */
878 /* set object replicas in lifepoint */
879 buf = g_strdup_printf("lifepoint: [] reps=%s", reps);
880 headers = curl_slist_append(headers, buf);
885 /* Build the string to sign, per the S3 spec.
886 * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
890 auth_string = g_string_new(verb);
891 g_string_append(auth_string, "\n");
893 /* Content-MD5 header */
895 g_string_append(auth_string, md5_hash);
896 g_string_append(auth_string, "\n");
899 g_string_append(auth_string, content_type);
901 g_string_append(auth_string, "\n");
904 g_string_append(auth_string, date);
905 g_string_append(auth_string, "\n");
907 /* CanonicalizedAmzHeaders, sorted lexicographically */
908 if (is_non_empty_string(hdl->user_token)) {
909 g_string_append(auth_string, AMAZON_SECURITY_HEADER);
910 g_string_append(auth_string, ":");
911 g_string_append(auth_string, hdl->user_token);
912 g_string_append(auth_string, ",");
913 g_string_append(auth_string, STS_PRODUCT_TOKEN);
914 g_string_append(auth_string, "\n");
917 if (g_str_equal(verb,"PUT") &&
918 is_non_empty_string(hdl->server_side_encryption)) {
919 g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER);
920 g_string_append(auth_string, ":");
921 g_string_append(auth_string, hdl->server_side_encryption);
922 g_string_append(auth_string, "\n");
925 if (is_non_empty_string(hdl->storage_class)) {
926 g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
927 g_string_append(auth_string, ":");
928 g_string_append(auth_string, hdl->storage_class);
929 g_string_append(auth_string, "\n");
932 /* CanonicalizedResource */
933 if (hdl->service_path) {
934 g_string_append(auth_string, hdl->service_path);
936 g_string_append(auth_string, "/");
938 if (hdl->use_subdomain)
939 g_string_append(auth_string, bucket);
941 esc_bucket = curl_escape(bucket, 0);
942 if (!esc_bucket) goto cleanup;
943 g_string_append(auth_string, esc_bucket);
947 if (bucket && (hdl->use_subdomain || key))
948 g_string_append(auth_string, "/");
951 esc_key = curl_escape(key, 0);
952 if (!esc_key) goto cleanup;
953 g_string_append(auth_string, esc_key);
957 g_string_append(auth_string, "?");
958 g_string_append(auth_string, subresource);
961 /* run HMAC-SHA1 on the canonicalized string */
962 md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
964 HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key),
966 HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
967 HMAC_Final(&ctx, md->data, &md->len);
968 HMAC_CTX_cleanup(&ctx);
969 auth_base64 = s3_base64_encode(md);
970 /* append the new headers */
971 if (is_non_empty_string(hdl->user_token)) {
972 /* Devpay headers are included in hash. */
973 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
975 headers = curl_slist_append(headers, buf);
978 buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
980 headers = curl_slist_append(headers, buf);
984 if (g_str_equal(verb,"PUT") &&
985 is_non_empty_string(hdl->server_side_encryption)) {
986 buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s",
987 hdl->server_side_encryption);
988 headers = curl_slist_append(headers, buf);
992 if (is_non_empty_string(hdl->storage_class)) {
993 buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s",
995 headers = curl_slist_append(headers, buf);
999 buf = g_strdup_printf("Authorization: AWS %s:%s",
1000 hdl->access_key, auth_base64);
1001 headers = curl_slist_append(headers, buf);
1005 if (md5_hash && '\0' != md5_hash[0]) {
1006 buf = g_strdup_printf("Content-MD5: %s", md5_hash);
1007 headers = curl_slist_append(headers, buf);
1010 if (content_length > 0) {
1011 buf = g_strdup_printf("Content-Length: %zu", content_length);
1012 headers = curl_slist_append(headers, buf);
1017 buf = g_strdup_printf("Content-Type: %s", content_type);
1018 headers = curl_slist_append(headers, buf);
1022 if (hdl->s3_api == S3_API_OAUTH2) {
1023 buf = g_strdup_printf("x-goog-api-version: 2");
1024 headers = curl_slist_append(headers, buf);
1028 if (project_id && hdl->s3_api == S3_API_OAUTH2) {
1029 buf = g_strdup_printf("x-goog-project-id: %s", project_id);
1030 headers = curl_slist_append(headers, buf);
1034 buf = g_strdup_printf("Date: %s", date);
1035 headers = curl_slist_append(headers, buf);
1042 if (md) g_byte_array_free(md, TRUE);
1043 g_free(auth_base64);
1044 if (auth_string) g_string_free(auth_string, TRUE);
1049 /* Functions for a SAX parser to parse the XML failure from Amazon */
1051 /* Private structure for our "thunk", which tracks where the user is in the list
1053 struct failure_thunk {
1059 gboolean in_message;
1060 gboolean in_details;
1063 gboolean in_serviceCatalog;
1064 gboolean in_service;
1065 gboolean in_endpoint;
1075 gchar *service_type;
1076 gchar *service_public_url;
1081 failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
1082 const gchar *element_name,
1083 const gchar **attribute_names,
1084 const gchar **attribute_values,
1086 GError **error G_GNUC_UNUSED)
1088 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1089 const gchar **att_name, **att_value;
1091 if (g_ascii_strcasecmp(element_name, "title") == 0) {
1092 thunk->in_title = 1;
1093 thunk->in_others = 0;
1094 thunk->want_text = 1;
1095 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
1097 thunk->in_others = 0;
1098 thunk->want_text = 1;
1099 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
1101 thunk->in_others = 0;
1102 thunk->want_text = 1;
1103 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
1104 thunk->in_message = 1;
1105 thunk->in_others = 0;
1106 thunk->want_text = 1;
1107 } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
1108 thunk->in_details = 1;
1109 thunk->in_others = 0;
1110 thunk->want_text = 1;
1111 } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
1112 thunk->in_access = 1;
1113 thunk->in_others = 0;
1114 } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
1115 thunk->in_token = 1;
1116 thunk->in_others = 0;
1117 for (att_name=attribute_names, att_value=attribute_values;
1119 att_name++, att_value++) {
1120 if (g_str_equal(*att_name, "id")) {
1121 thunk->token_id = g_strdup(*att_value);
1123 if (g_str_equal(*att_name, "expires") && strlen(*att_value) >= 19) {
1124 thunk->expires = rfc3339_date(*att_value) - 600;
1127 } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
1128 thunk->in_serviceCatalog = 1;
1129 thunk->in_others = 0;
1130 } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
1131 thunk->in_service = 1;
1132 thunk->in_others = 0;
1133 for (att_name=attribute_names, att_value=attribute_values;
1135 att_name++, att_value++) {
1136 if (g_str_equal(*att_name, "type")) {
1137 thunk->service_type = g_strdup(*att_value);
1140 } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
1141 thunk->in_endpoint = 1;
1142 thunk->in_others = 0;
1143 if (thunk->service_type &&
1144 g_str_equal(thunk->service_type, "object-store")) {
1145 for (att_name=attribute_names, att_value=attribute_values;
1147 att_name++, att_value++) {
1148 if (g_str_equal(*att_name, "publicURL")) {
1149 thunk->service_public_url = g_strdup(*att_value);
1153 } else if (g_ascii_strcasecmp(element_name, "error") == 0) {
1154 for (att_name=attribute_names, att_value=attribute_values;
1156 att_name++, att_value++) {
1157 if (g_str_equal(*att_name, "message")) {
1158 thunk->message = g_strdup(*att_value);
1167 failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
1168 const gchar *element_name,
1170 GError **error G_GNUC_UNUSED)
1172 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1174 if (g_ascii_strcasecmp(element_name, "title") == 0) {
1175 char *p = strchr(thunk->text, ' ');
1179 thunk->error_name = g_strdup(p);
1182 g_free(thunk->text);
1184 thunk->in_title = 0;
1185 } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
1186 thunk->message = thunk->text;
1187 g_strstrip(thunk->message);
1190 } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
1191 thunk->error_name = thunk->text;
1194 } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
1195 thunk->message = thunk->text;
1197 thunk->in_message = 0;
1198 } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
1199 thunk->details = thunk->text;
1201 thunk->in_details = 0;
1202 } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
1203 thunk->message = thunk->text;
1205 thunk->in_access = 0;
1206 } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
1207 thunk->message = thunk->text;
1209 thunk->in_token = 0;
1210 } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
1211 thunk->message = thunk->text;
1213 thunk->in_serviceCatalog = 0;
1214 } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
1215 thunk->message = thunk->text;
1217 g_free(thunk->service_type);
1218 thunk->service_type = NULL;
1219 thunk->in_service = 0;
1220 } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
1221 thunk->message = thunk->text;
1223 thunk->in_endpoint = 0;
1230 failure_text(GMarkupParseContext *context G_GNUC_UNUSED,
1234 GError **error G_GNUC_UNUSED)
1236 struct failure_thunk *thunk = (struct failure_thunk *)user_data;
1238 if (thunk->want_text && thunk->in_others == 0) {
1241 new_text = g_strndup(text, text_len);
1243 strappend(thunk->text, new_text);
1246 thunk->text = new_text;
1252 interpret_response(S3Handle *hdl,
1254 char *curl_error_buffer,
1258 const char *content_md5)
1260 long response_code = 0;
1261 gboolean ret = TRUE;
1262 struct failure_thunk thunk;
1263 GMarkupParseContext *ctxt = NULL;
1264 static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL };
1267 if (!hdl) return FALSE;
1269 if (hdl->last_message) g_free(hdl->last_message);
1270 hdl->last_message = NULL;
1272 /* bail out from a CURL error */
1273 if (curl_code != CURLE_OK) {
1274 hdl->last_curl_code = curl_code;
1275 hdl->last_message = g_strdup_printf("CURL error: %s", curl_error_buffer);
1279 /* CURL seems to think things were OK, so get its response code */
1280 curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
1281 hdl->last_response_code = response_code;
1283 /* check ETag, if present and not CAStor */
1284 if (etag && content_md5 && 200 == response_code &&
1285 hdl->s3_api != S3_API_CASTOR) {
1286 if (etag && g_ascii_strcasecmp(etag, content_md5))
1287 hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
1293 /* Now look at the body to try to get the actual Amazon error message. */
1295 /* impose a reasonable limit on body size */
1296 if (body_len > MAX_ERROR_RESPONSE_LEN) {
1297 hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
1299 } else if (!body || body_len == 0) {
1300 if (response_code < 100 || response_code >= 400) {
1302 g_strdup("S3 Error: Unknown (empty response body)");
1303 return TRUE; /* perhaps a network error; retry the request */
1305 /* 2xx and 3xx codes without body are good result */
1306 hdl->last_s3_error_code = S3_ERROR_None;
1311 thunk.in_title = FALSE;
1312 thunk.in_body = FALSE;
1313 thunk.in_code = FALSE;
1314 thunk.in_message = FALSE;
1315 thunk.in_details = FALSE;
1316 thunk.in_access = FALSE;
1317 thunk.in_token = FALSE;
1318 thunk.in_serviceCatalog = FALSE;
1319 thunk.in_service = FALSE;
1320 thunk.in_endpoint = FALSE;
1321 thunk.in_others = 0;
1323 thunk.want_text = FALSE;
1325 thunk.message = NULL;
1326 thunk.details = NULL;
1327 thunk.error_name = NULL;
1328 thunk.token_id = NULL;
1329 thunk.service_type = NULL;
1330 thunk.service_public_url = NULL;
1333 if ((hdl->s3_api == S3_API_SWIFT_1 ||
1334 hdl->s3_api == S3_API_SWIFT_2) &&
1335 hdl->content_type &&
1336 (g_str_equal(hdl->content_type, "text/html") ||
1337 g_str_equal(hdl->content_type, "text/plain"))) {
1339 char *body_copy = g_strndup(body, body_len);
1340 char *b = body_copy;
1341 char *p = strchr(b, '\n');
1343 if (p) { /* first line: error code */
1346 p1 = strchr(b, ' ');
1350 thunk.error_name = g_strdup(p1);
1355 p = strchr(b, '\n');
1356 if (p) { /* second line: error message */
1359 thunk.message = g_strdup(p);
1360 g_strstrip(thunk.message);
1364 } else if ((hdl->s3_api == S3_API_SWIFT_1 ||
1365 hdl->s3_api == S3_API_SWIFT_2) &&
1366 hdl->content_type &&
1367 g_str_equal(hdl->content_type, "application/json")) {
1368 char *body_copy = g_strndup(body, body_len);
1370 char *details = NULL;
1371 regmatch_t pmatch[2];
1373 if (!s3_regexec_wrap(&code_regex, body_copy, 2, pmatch, 0)) {
1374 code = find_regex_substring(body_copy, pmatch[1]);
1376 if (!s3_regexec_wrap(&details_regex, body_copy, 2, pmatch, 0)) {
1377 details = find_regex_substring(body_copy, pmatch[1]);
1379 if (code && details) {
1380 hdl->last_message = g_strdup_printf("%s (%s)", details, code);
1382 hdl->last_message = g_strdup_printf("(%s)", code);
1383 } else if (details) {
1384 hdl->last_message = g_strdup_printf("%s", details);
1386 hdl->last_message = NULL;
1392 } else if (hdl->s3_api == S3_API_CASTOR) {
1393 /* The error mesage is the body */
1394 hdl->last_message = g_strndup(body, body_len);
1396 } else if (!hdl->content_type ||
1397 !g_str_equal(hdl->content_type, "application/xml")) {
1401 /* run the parser over it */
1402 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
1403 if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) {
1404 if (hdl->last_message) g_free(hdl->last_message);
1405 hdl->last_message = g_strdup(err->message);
1409 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
1410 if (hdl->last_message) g_free(hdl->last_message);
1411 hdl->last_message = g_strdup(err->message);
1415 g_markup_parse_context_free(ctxt);
1418 if (hdl->s3_api == S3_API_SWIFT_2) {
1419 if (!hdl->x_auth_token && thunk.token_id) {
1420 hdl->x_auth_token = thunk.token_id;
1421 thunk.token_id = NULL;
1423 if (!hdl->x_storage_url && thunk.service_public_url) {
1424 hdl->x_storage_url = thunk.service_public_url;
1425 thunk.service_public_url = NULL;
1429 if (thunk.expires > 0) {
1430 hdl->expires = thunk.expires;
1433 if (thunk.error_name) {
1434 hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name);
1435 g_free(thunk.error_name);
1436 thunk.error_name = NULL;
1439 if (thunk.message) {
1440 g_free(hdl->last_message);
1441 if (thunk.details) {
1442 hdl->last_message = g_strdup_printf("%s: %s", thunk.message,
1444 amfree(thunk.message);
1445 amfree(thunk.details);
1447 hdl->last_message = thunk.message;
1448 thunk.message = NULL; /* steal the reference to the string */
1454 g_free(thunk.message);
1455 g_free(thunk.error_name);
1456 g_free(thunk.token_id);
1457 g_free(thunk.service_public_url);
1458 g_free(thunk.service_type);
1462 /* a CURLOPT_READFUNCTION to read data from a buffer. */
1464 s3_buffer_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1466 CurlBuffer *data = stream;
1467 guint bytes_desired = (guint) size * nmemb;
1469 /* check the number of bytes remaining, just to be safe */
1470 if (bytes_desired > data->buffer_len - data->buffer_pos)
1471 bytes_desired = data->buffer_len - data->buffer_pos;
1473 memcpy((char *)ptr, data->buffer + data->buffer_pos, bytes_desired);
1474 data->buffer_pos += bytes_desired;
1476 return bytes_desired;
1480 s3_buffer_size_func(void *stream)
1482 CurlBuffer *data = stream;
1483 return data->buffer_len;
1487 s3_buffer_md5_func(void *stream)
1489 CurlBuffer *data = stream;
1490 GByteArray req_body_gba = {(guint8 *)data->buffer, data->buffer_len};
1492 return s3_compute_md5_hash(&req_body_gba);
1496 s3_buffer_reset_func(void *stream)
1498 CurlBuffer *data = stream;
1499 data->buffer_pos = 0;
1502 /* a CURLOPT_WRITEFUNCTION to write data to a buffer. */
1504 s3_buffer_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1506 CurlBuffer * data = stream;
1507 guint new_bytes = (guint) size * nmemb;
1508 guint bytes_needed = data->buffer_pos + new_bytes;
1510 /* error out if the new size is greater than the maximum allowed */
1511 if (data->max_buffer_size && bytes_needed > data->max_buffer_size)
1514 /* reallocate if necessary. We use exponential sizing to make this
1515 * happen less often. */
1516 if (bytes_needed > data->buffer_len) {
1517 guint new_size = MAX(bytes_needed, data->buffer_len * 2);
1518 if (data->max_buffer_size) {
1519 new_size = MIN(new_size, data->max_buffer_size);
1521 data->buffer = g_realloc(data->buffer, new_size);
1522 data->buffer_len = new_size;
1525 return 0; /* returning zero signals an error to libcurl */
1527 /* actually copy the data to the buffer */
1528 memcpy(data->buffer + data->buffer_pos, ptr, new_bytes);
1529 data->buffer_pos += new_bytes;
1531 /* signal success to curl */
1535 /* a CURLOPT_READFUNCTION that writes nothing. */
1537 s3_empty_read_func(G_GNUC_UNUSED void *ptr, G_GNUC_UNUSED size_t size, G_GNUC_UNUSED size_t nmemb, G_GNUC_UNUSED void * stream)
1543 s3_empty_size_func(G_GNUC_UNUSED void *stream)
1549 s3_empty_md5_func(G_GNUC_UNUSED void *stream)
1551 static const GByteArray empty = {(guint8 *) "", 0};
1553 return s3_compute_md5_hash(&empty);
1556 /* a CURLOPT_WRITEFUNCTION to write data that just counts data.
1557 * s3_write_data should be NULL or a pointer to an gint64.
1560 s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
1562 gint64 *count = (gint64*) stream, inc = nmemb*size;
1564 if (count) *count += inc;
1569 s3_counter_reset_func(void *stream)
1571 gint64 *count = (gint64*) stream;
1573 if (count) *count = 0;
1577 /* a CURLOPT_READFUNCTION to read data from a file. */
1579 s3_file_read_func(void *ptr, size_t size, size_t nmemb, void * stream)
1581 HANDLE *hFile = (HANDLE *) stream;
1584 ReadFile(hFile, ptr, (DWORD) size*nmemb, &bytes_read, NULL);
1589 s3_file_size_func(void *stream)
1591 HANDLE *hFile = (HANDLE *) stream;
1592 DWORD size = GetFileSize(hFile, NULL);
1594 if (INVALID_FILE_SIZE == size) {
1602 s3_file_md5_func(void *stream)
1604 #define S3_MD5_BUF_SIZE (10*1024)
1605 HANDLE *hFile = (HANDLE *) stream;
1606 guint8 buf[S3_MD5_BUF_SIZE];
1609 GByteArray *ret = NULL;
1611 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1613 ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
1614 g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
1617 while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
1618 MD5_Update(&md5_ctx, buf, bytes_read);
1620 MD5_Final(ret->data, &md5_ctx);
1622 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1624 #undef S3_MD5_BUF_SIZE
1628 s3_file_reset_func(void *stream)
1630 g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
1633 /* a CURLOPT_WRITEFUNCTION to write data to a file. */
1635 s3_file_write_func(void *ptr, size_t size, size_t nmemb, void *stream)
1637 HANDLE *hFile = (HANDLE *) stream;
1638 DWORD bytes_written;
1640 WriteFile(hFile, ptr, (DWORD) size*nmemb, &bytes_written, NULL);
1641 return bytes_written;
1646 curl_debug_message(CURL *curl G_GNUC_UNUSED,
1650 void *unused G_GNUC_UNUSED)
1654 char **lines, **line;
1662 case CURLINFO_HEADER_IN:
1663 lineprefix="Hdr In: ";
1666 case CURLINFO_HEADER_OUT:
1667 lineprefix="Hdr Out: ";
1670 case CURLINFO_DATA_IN:
1671 if (len > 3000) return 0;
1672 for (i=0;i<len;i++) {
1673 if (!g_ascii_isprint(s[i])) {
1677 lineprefix="Data In: ";
1680 case CURLINFO_DATA_OUT:
1681 if (len > 3000) return 0;
1682 for (i=0;i<len;i++) {
1683 if (!g_ascii_isprint(s[i])) {
1687 lineprefix="Data Out: ";
1691 /* ignore data in/out -- nobody wants to see that in the
1696 /* split the input into lines */
1697 message = g_strndup(s, (gsize) len);
1698 lines = g_strsplit(message, "\n", -1);
1701 for (line = lines; *line; line++) {
1702 if (**line == '\0') continue; /* skip blank lines */
1703 g_debug("%s%s", lineprefix, *line);
1711 perform_request(S3Handle *hdl,
1715 const char *subresource,
1717 const char *content_type,
1718 const char *project_id,
1719 s3_read_func read_func,
1720 s3_reset_func read_reset_func,
1721 s3_size_func size_func,
1722 s3_md5_func md5_func,
1724 s3_write_func write_func,
1725 s3_reset_func write_reset_func,
1726 gpointer write_data,
1727 s3_progress_func progress_func,
1728 gpointer progress_data,
1729 const result_handling_t *result_handling)
1732 s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
1733 CURLcode curl_code = CURLE_OK;
1734 char curl_error_buffer[CURL_ERROR_SIZE] = "";
1735 struct curl_slist *headers = NULL;
1736 /* Set S3Internal Data */
1737 S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
1738 gboolean should_retry;
1740 gint retry_after_close = 0;
1741 gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
1742 /* corresponds to PUT, HEAD, GET, and POST */
1743 int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
1744 /* do we want to examine the headers */
1745 const char *curlopt_customrequest = NULL;
1746 /* for MD5 calculation */
1747 GByteArray *md5_hash = NULL;
1748 gchar *md5_hash_hex = NULL, *md5_hash_b64 = NULL;
1749 size_t request_body_size = 0;
1751 g_assert(hdl != NULL && hdl->curl != NULL);
1753 if (hdl->s3_api == S3_API_OAUTH2 && !hdl->getting_oauth2_access_token &&
1754 (!hdl->access_token || hdl->expires < time(NULL))) {
1755 result = oauth2_get_access_token(hdl);
1757 g_debug("oauth2_get_access_token returned %d", result);
1760 } else if (hdl->s3_api == S3_API_SWIFT_2 && !hdl->getting_swift_2_token &&
1761 (!hdl->x_auth_token || hdl->expires < time(NULL))) {
1762 result = get_openstack_swift_api_v2_setting(hdl);
1764 g_debug("get_openstack_swift_api_v2_setting returned %d", result);
1771 url = build_url(hdl, bucket, key, subresource, query);
1772 if (!url) goto cleanup;
1774 /* libcurl may behave strangely if these are not set correctly */
1775 if (!strncmp(verb, "PUT", 4)) {
1777 } else if (!strncmp(verb, "GET", 4)) {
1778 curlopt_httpget = 1;
1779 } else if (!strncmp(verb, "POST", 5)) {
1781 } else if (!strncmp(verb, "HEAD", 5)) {
1784 curlopt_customrequest = verb;
1788 request_body_size = size_func(read_data);
1792 md5_hash = md5_func(read_data);
1794 md5_hash_b64 = s3_base64_encode(md5_hash);
1795 md5_hash_hex = s3_hex_encode(md5_hash);
1796 g_byte_array_free(md5_hash, TRUE);
1800 /* Curl will use fread() otherwise */
1801 read_func = s3_empty_read_func;
1805 int_writedata.write_func = write_func;
1806 int_writedata.reset_func = write_reset_func;
1807 int_writedata.write_data = write_data;
1809 /* Curl will use fwrite() otherwise */
1810 int_writedata.write_func = s3_counter_write_func;
1811 int_writedata.reset_func = s3_counter_reset_func;
1812 int_writedata.write_data = NULL;
1818 curl_slist_free_all(headers);
1820 curl_error_buffer[0] = '\0';
1821 if (read_reset_func) {
1822 read_reset_func(read_data);
1824 /* calls write_reset_func */
1825 s3_internal_reset_func(&int_writedata);
1827 /* set up the request */
1828 headers = authenticate_request(hdl, verb, bucket, key, subresource,
1829 md5_hash_b64, content_type, request_body_size, project_id);
1832 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
1836 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
1839 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
1840 curl_debug_message)))
1843 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_ERRORBUFFER,
1844 curl_error_buffer)))
1846 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS, 1)))
1848 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1)))
1850 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_URL, url)))
1852 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
1855 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
1857 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
1859 /* Note: we always have to set this apparently, for consistent "end of header" detection */
1860 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
1862 /* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
1863 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
1865 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
1867 if (progress_func) {
1868 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS,0)))
1871 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
1874 /* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
1875 #if LIBCURL_VERSION_NUM >= 0x070b00
1876 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
1879 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
1882 /* CURLOPT_POSTFIELDSIZE_LARGE added in 7.11.1 */
1883 #if LIBCURL_VERSION_NUM >= 0x070b01
1884 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)request_body_size)))
1887 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE, (long)request_body_size)))
1891 /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
1892 #if LIBCURL_VERSION_NUM >= 0x070f05
1893 if (s3_curl_throttling_compat()) {
1894 if (hdl->max_send_speed)
1895 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
1898 if (hdl->max_recv_speed)
1899 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
1904 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
1906 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_UPLOAD, curlopt_upload)))
1908 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POST, curlopt_post)))
1910 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOBODY, curlopt_nobody)))
1912 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CUSTOMREQUEST,
1913 curlopt_customrequest)))
1917 if (curlopt_upload || curlopt_post) {
1918 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
1920 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
1923 /* Clear request_body options. */
1924 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
1927 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
1932 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROXY,
1937 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FRESH_CONNECT,
1938 (long)(hdl->reuse_connection && retry_after_close == 0 ? 0 : 1)))) {
1941 if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FORBID_REUSE,
1942 (long)(hdl->reuse_connection? 0 : 1)))) {
1946 /* Perform the request */
1947 curl_code = curl_easy_perform(hdl->curl);
1950 /* interpret the response into hdl->last* */
1951 curl_error: /* (label for short-circuiting the curl_easy_perform call) */
1952 should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
1953 int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
1955 if (hdl->s3_api == S3_API_OAUTH2 &&
1956 hdl->last_response_code == 401 &&
1957 hdl->last_s3_error_code == S3_ERROR_AuthenticationRequired) {
1958 should_retry = oauth2_get_access_token(hdl);
1960 /* and, unless we know we need to retry, see what we're to do now */
1961 if (!should_retry) {
1962 result = lookup_result(result_handling, hdl->last_response_code,
1963 hdl->last_s3_error_code, hdl->last_curl_code);
1965 /* break out of the while(1) unless we're retrying */
1966 if (result != S3_RESULT_RETRY)
1970 if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES &&
1971 retry_after_close < 3 &&
1972 hdl->last_s3_error_code == S3_ERROR_RequestTimeout) {
1974 retry_after_close++;
1975 g_debug("Retry on a new connection");
1977 if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
1978 /* we're out of retries, so annotate hdl->last_message appropriately and bail
1980 char *m = g_strdup_printf("Too many retries; last message was '%s'", hdl->last_message);
1981 if (hdl->last_message) g_free(hdl->last_message);
1982 hdl->last_message = m;
1983 result = S3_RESULT_FAIL;
1989 backoff *= EXPONENTIAL_BACKOFF_BASE;
1992 if (result != S3_RESULT_OK) {
1993 g_debug(_("%s %s failed with %d/%s"), verb, url,
1994 hdl->last_response_code,
1995 s3_error_name_from_code(hdl->last_s3_error_code));
2000 if (headers) curl_slist_free_all(headers);
2001 g_free(md5_hash_b64);
2002 g_free(md5_hash_hex);
2004 /* we don't deallocate the response body -- we keep it for later */
2005 hdl->last_response_body = int_writedata.resp_buf.buffer;
2006 hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
2007 hdl->last_num_retries = retries;
2014 s3_internal_write_func(void *ptr, size_t size, size_t nmemb, void * stream)
2016 S3InternalData *data = (S3InternalData *) stream;
2019 if (!data->headers_done)
2022 /* call write on internal buffer (if not full) */
2023 if (data->int_write_done) {
2026 bytes_saved = s3_buffer_write_func(ptr, size, nmemb, &data->resp_buf);
2028 data->int_write_done = TRUE;
2031 /* call write on user buffer */
2032 if (data->write_func) {
2033 return data->write_func(ptr, size, nmemb, data->write_data);
2040 s3_internal_reset_func(void * stream)
2042 S3InternalData *data = (S3InternalData *) stream;
2044 s3_buffer_reset_func(&data->resp_buf);
2045 data->headers_done = FALSE;
2046 data->int_write_done = FALSE;
2048 if (data->reset_func) {
2049 data->reset_func(data->write_data);
2054 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
2056 static const char *final_header = "\r\n";
2057 time_t remote_time_in_sec,local_time;
2059 regmatch_t pmatch[2];
2060 S3InternalData *data = (S3InternalData *) stream;
2062 header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
2064 if (header[strlen(header)-1] == '\n')
2065 header[strlen(header)-1] = '\0';
2066 if (header[strlen(header)-1] == '\r')
2067 header[strlen(header)-1] = '\0';
2068 if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
2069 data->etag = find_regex_substring(header, pmatch[1]);
2070 if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0))
2071 data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]);
2073 if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0))
2074 data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]);
2076 if (!s3_regexec_wrap(&content_type_regex, header, 2, pmatch, 0))
2077 data->hdl->content_type = find_regex_substring(header, pmatch[1]);
2079 if (strlen(header) == 0)
2080 data->headers_done = TRUE;
2081 if (g_str_equal(final_header, header))
2082 data->headers_done = TRUE;
2083 if (g_str_equal("\n", header))
2084 data->headers_done = TRUE;
2086 /* If date header is found */
2087 if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
2088 char *date = find_regex_substring(header, pmatch[1]);
2090 /* Remote time is always in GMT: RFC 2616 */
2091 /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
2092 if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
2093 g_debug("Error: Conversion of remote time to seconds failed.");
2094 data->hdl->time_offset_with_s3 = 0;
2096 local_time = time(NULL);
2098 data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
2100 if (data->hdl->verbose)
2101 g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
2112 compile_regexes(void)
2116 /* using POSIX regular expressions */
2117 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
2118 {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
2119 {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
2120 {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex},
2121 {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex},
2122 {"^Content-Type:[[:space:]]*([^ ;]+).*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &content_type_regex},
2123 {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
2124 {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
2125 {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
2126 {"^Date:(.*)$",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
2127 {"\"access_token\" : \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &access_token_regex},
2128 {"\"expires_in\" : (.*)", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &expires_in_regex},
2129 {"\"details\": \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &details_regex},
2130 {"\"code\": (.*),", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &code_regex},
2133 char regmessage[1024];
2137 for (i = 0; regexes[i].str; i++) {
2138 reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
2139 if (reg_result != 0) {
2140 regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
2141 g_error(_("Regex error: %s"), regmessage);
2145 #else /* ! HAVE_REGEX_H */
2146 /* using PCRE via GLib */
2147 struct {const char * str; int flags; regex_t *regex;} regexes[] = {
2148 {"<Code>\\s*([^<]*)\\s*</Code>",
2149 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2151 {"^ETag:\\s*\"([^\"]+)\"\\s*$",
2152 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2154 {"^X-Auth-Token:\\s*([^ ]+)\\s*$",
2155 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2156 &x_auth_token_regex},
2157 {"^X-Storage-Url:\\s*([^ ]+)\\s*$",
2158 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2159 &x_storage_url_regex},
2160 {"^Content-Type:\\s*([^ ]+)\\s*$",
2161 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2162 &content_type_regex},
2163 {"<Message>\\s*([^<]*)\\s*</Message>",
2164 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2166 {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$",
2167 G_REGEX_OPTIMIZE | G_REGEX_NO_AUTO_CAPTURE,
2169 {"(/>)|(>([^<]*)</LocationConstraint>)",
2171 &location_con_regex},
2173 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2175 {"\"access_token\" : \"([^\"]*)\"",
2176 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2177 &access_token_regex},
2178 {"\"expires_n\" : (.*)",
2179 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2181 {"\"details\" : \"([^\"]*)\"",
2182 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2185 G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
2192 for (i = 0; regexes[i].str; i++) {
2193 *(regexes[i].regex) = g_regex_new(regexes[i].str, regexes[i].flags, 0, &err);
2195 g_error(_("Regex error: %s"), err->message);
2205 * Public function implementations
2208 #if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
2209 # pragma GCC diagnostic push
2210 # pragma GCC diagnostic ignored "-Wmissing-field-initializers"
2212 gboolean s3_init(void)
2214 static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
2215 static gboolean init = FALSE, ret;
2217 /* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
2219 g_static_mutex_lock (&mutex);
2221 ret = compile_regexes();
2224 g_static_mutex_unlock(&mutex);
2227 #if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
2228 # pragma GCC diagnostic pop
2232 s3_curl_location_compat(void)
2234 curl_version_info_data *info;
2236 info = curl_version_info(CURLVERSION_NOW);
2237 return info->version_num > 0x070a02;
2241 s3_bucket_location_compat(const char *bucket)
2243 return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
2247 get_openstack_swift_api_v1_setting(
2250 s3_result_t result = S3_RESULT_FAIL;
2251 static result_handling_t result_handling[] = {
2252 { 200, 0, 0, S3_RESULT_OK },
2253 RESULT_HANDLING_ALWAYS_RETRY,
2254 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2258 result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL, NULL, NULL,
2259 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2260 NULL, NULL, result_handling);
2262 return result == S3_RESULT_OK;
2266 get_openstack_swift_api_v2_setting(
2269 s3_result_t result = S3_RESULT_FAIL;
2270 static result_handling_t result_handling[] = {
2271 { 200, 0, 0, S3_RESULT_OK },
2272 RESULT_HANDLING_ALWAYS_RETRY,
2273 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2276 CurlBuffer buf = {NULL, 0, 0, 0};
2277 GString *body = g_string_new("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
2278 if (hdl->username && hdl->password) {
2279 g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://docs.openstack.org/identity/api/v2.0\"");
2281 g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://www.hp.com/identity/api/ext/HP-IDM/v1.0\"");
2284 if (hdl->tenant_id) {
2285 g_string_append_printf(body, " tenantId=\"%s\"", hdl->tenant_id);
2287 if (hdl->tenant_name) {
2288 g_string_append_printf(body, " tenantName=\"%s\"", hdl->tenant_name);
2290 g_string_append(body, ">");
2291 if (hdl->username && hdl->password) {
2292 g_string_append_printf(body, "<passwordCredentials username=\"%s\" password=\"%s\"/>", hdl->username, hdl->password);
2294 g_string_append_printf(body, "<apiAccessKeyCredentials accessKey=\"%s\" secretKey=\"%s\"/>", hdl->access_key, hdl->secret_key);
2296 g_string_append(body, "</auth>");
2298 buf.buffer = g_string_free(body, FALSE);
2299 buf.buffer_len = strlen(buf.buffer);
2301 hdl->getting_swift_2_token = 1;
2302 g_free(hdl->x_storage_url);
2303 hdl->x_storage_url = NULL;
2304 result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
2305 "application/xml", NULL,
2306 S3_BUFFER_READ_FUNCS, &buf,
2308 NULL, NULL, result_handling);
2309 hdl->getting_swift_2_token = 0;
2311 return result == S3_RESULT_OK;
2315 s3_open(const char *access_key,
2316 const char *secret_key,
2317 const char *swift_account_id,
2318 const char *swift_access_key,
2320 const char *service_path,
2321 const gboolean use_subdomain,
2322 const char *user_token,
2323 const char *bucket_location,
2324 const char *storage_class,
2325 const char *ca_info,
2326 const char *server_side_encryption,
2328 const S3_api s3_api,
2329 const char *username,
2330 const char *password,
2331 const char *tenant_id,
2332 const char *tenant_name,
2333 const char *client_id,
2334 const char *client_secret,
2335 const char *refresh_token,
2336 const gboolean reuse_connection,
2338 const char *reps_bucket)
2342 hdl = g_new0(S3Handle, 1);
2343 if (!hdl) goto error;
2345 hdl->verbose = TRUE;
2346 hdl->use_ssl = s3_curl_supports_ssl();
2347 hdl->reuse_connection = reuse_connection;
2349 if (s3_api == S3_API_S3) {
2350 g_assert(access_key);
2351 hdl->access_key = g_strdup(access_key);
2352 g_assert(secret_key);
2353 hdl->secret_key = g_strdup(secret_key);
2354 } else if (s3_api == S3_API_SWIFT_1) {
2355 g_assert(swift_account_id);
2356 hdl->swift_account_id = g_strdup(swift_account_id);
2357 g_assert(swift_access_key);
2358 hdl->swift_access_key = g_strdup(swift_access_key);
2359 } else if (s3_api == S3_API_SWIFT_2) {
2360 g_assert((username && password) || (access_key && secret_key));
2361 hdl->username = g_strdup(username);
2362 hdl->password = g_strdup(password);
2363 hdl->access_key = g_strdup(access_key);
2364 hdl->secret_key = g_strdup(secret_key);
2365 g_assert(tenant_id || tenant_name);
2366 hdl->tenant_id = g_strdup(tenant_id);
2367 hdl->tenant_name = g_strdup(tenant_name);
2368 } else if (s3_api == S3_API_OAUTH2) {
2369 hdl->client_id = g_strdup(client_id);
2370 hdl->client_secret = g_strdup(client_secret);
2371 hdl->refresh_token = g_strdup(refresh_token);
2372 } else if (s3_api == S3_API_CASTOR) {
2373 hdl->username = g_strdup(username);
2374 hdl->password = g_strdup(password);
2375 hdl->tenant_name = g_strdup(tenant_name);
2376 hdl->reps = g_strdup(reps);
2377 hdl->reps_bucket = g_strdup(reps_bucket);
2381 hdl->user_token = g_strdup(user_token);
2384 hdl->bucket_location = g_strdup(bucket_location);
2387 hdl->storage_class = g_strdup(storage_class);
2390 hdl->server_side_encryption = g_strdup(server_side_encryption);
2393 hdl->proxy = g_strdup(proxy);
2396 hdl->ca_info = g_strdup(ca_info);
2398 if (!is_non_empty_string(host))
2399 host = "s3.amazonaws.com";
2400 hdl->host = g_ascii_strdown(host, -1);
2401 hdl->use_subdomain = use_subdomain ||
2402 (strcmp(hdl->host, "s3.amazonaws.com") == 0 &&
2403 is_non_empty_string(hdl->bucket_location));
2404 hdl->s3_api = s3_api;
2406 if (strlen(service_path) == 0 ||
2407 (strlen(service_path) == 1 && service_path[0] == '/')) {
2408 hdl->service_path = NULL;
2409 } else if (service_path[0] != '/') {
2410 hdl->service_path = g_strdup_printf("/%s", service_path);
2412 hdl->service_path = g_strdup(service_path);
2414 if (hdl->service_path) {
2415 /* remove trailling / */
2416 size_t len = strlen(hdl->service_path) - 1;
2417 if (hdl->service_path[len] == '/')
2418 hdl->service_path[len] = '\0';
2421 hdl->service_path = NULL;
2424 hdl->curl = curl_easy_init();
2425 if (!hdl->curl) goto error;
2427 /* Set HTTP handling options for CAStor */
2428 if (s3_api == S3_API_CASTOR) {
2429 #if LIBCURL_VERSION_NUM >= 0x071301
2430 curl_version_info_data *info;
2431 /* check the runtime version too */
2432 info = curl_version_info(CURLVERSION_NOW);
2433 if (info->version_num >= 0x071301) {
2434 curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1);
2435 curl_easy_setopt(hdl->curl, CURLOPT_UNRESTRICTED_AUTH, 1);
2436 curl_easy_setopt(hdl->curl, CURLOPT_MAXREDIRS, 5);
2437 curl_easy_setopt(hdl->curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL);
2438 curl_easy_setopt(hdl->curl, CURLOPT_HTTP_VERSION,
2439 CURL_HTTP_VERSION_1_1);
2441 curl_easy_setopt(hdl->curl, CURLOPT_USERNAME, hdl->username);
2443 curl_easy_setopt(hdl->curl, CURLOPT_PASSWORD, hdl->password);
2444 curl_easy_setopt(hdl->curl, CURLOPT_HTTPAUTH,
2445 (CURLAUTH_BASIC | CURLAUTH_DIGEST));
2461 gboolean ret = TRUE;
2463 /* get the X-Storage-Url and X-Auth-Token */
2464 if (hdl->s3_api == S3_API_SWIFT_1) {
2465 ret = get_openstack_swift_api_v1_setting(hdl);
2466 } else if (hdl->s3_api == S3_API_SWIFT_2) {
2467 ret = get_openstack_swift_api_v2_setting(hdl);
2474 s3_free(S3Handle *hdl)
2479 g_free(hdl->access_key);
2480 g_free(hdl->secret_key);
2481 g_free(hdl->swift_account_id);
2482 g_free(hdl->swift_access_key);
2483 g_free(hdl->content_type);
2484 g_free(hdl->user_token);
2485 g_free(hdl->ca_info);
2487 g_free(hdl->username);
2488 g_free(hdl->password);
2489 g_free(hdl->tenant_id);
2490 g_free(hdl->tenant_name);
2491 g_free(hdl->client_id);
2492 g_free(hdl->client_secret);
2493 g_free(hdl->refresh_token);
2494 g_free(hdl->access_token);
2495 if (hdl->user_token) g_free(hdl->user_token);
2496 if (hdl->bucket_location) g_free(hdl->bucket_location);
2497 if (hdl->storage_class) g_free(hdl->storage_class);
2498 if (hdl->server_side_encryption) g_free(hdl->server_side_encryption);
2499 if (hdl->host) g_free(hdl->host);
2500 if (hdl->service_path) g_free(hdl->service_path);
2501 if (hdl->curl) curl_easy_cleanup(hdl->curl);
2508 s3_reset(S3Handle *hdl)
2511 /* We don't call curl_easy_reset here, because doing that in curl
2512 * < 7.16 blanks the default CA certificate path, and there's no way
2513 * to get it back. */
2514 if (hdl->last_message) {
2515 g_free(hdl->last_message);
2516 hdl->last_message = NULL;
2519 hdl->last_response_code = 0;
2520 hdl->last_curl_code = 0;
2521 hdl->last_s3_error_code = 0;
2522 hdl->last_num_retries = 0;
2524 if (hdl->last_response_body) {
2525 g_free(hdl->last_response_body);
2526 hdl->last_response_body = NULL;
2528 if (hdl->content_type) {
2529 g_free(hdl->content_type);
2530 hdl->content_type = NULL;
2533 hdl->last_response_body_size = 0;
2538 s3_error(S3Handle *hdl,
2539 const char **message,
2540 guint *response_code,
2541 s3_error_code_t *s3_error_code,
2542 const char **s3_error_name,
2543 CURLcode *curl_code,
2547 if (message) *message = hdl->last_message;
2548 if (response_code) *response_code = hdl->last_response_code;
2549 if (s3_error_code) *s3_error_code = hdl->last_s3_error_code;
2550 if (s3_error_name) *s3_error_name = s3_error_name_from_code(hdl->last_s3_error_code);
2551 if (curl_code) *curl_code = hdl->last_curl_code;
2552 if (num_retries) *num_retries = hdl->last_num_retries;
2554 /* no hdl? return something coherent, anyway */
2555 if (message) *message = "NULL S3Handle";
2556 if (response_code) *response_code = 0;
2557 if (s3_error_code) *s3_error_code = 0;
2558 if (s3_error_name) *s3_error_name = NULL;
2559 if (curl_code) *curl_code = 0;
2560 if (num_retries) *num_retries = 0;
2565 s3_verbose(S3Handle *hdl, gboolean verbose)
2567 hdl->verbose = verbose;
2571 s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
2573 if (!s3_curl_throttling_compat())
2576 hdl->max_send_speed = max_send_speed;
2582 s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
2584 if (!s3_curl_throttling_compat())
2587 hdl->max_recv_speed = max_recv_speed;
2593 s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
2595 gboolean ret = TRUE;
2596 if (use_ssl & !s3_curl_supports_ssl()) {
2599 hdl->use_ssl = use_ssl;
2605 s3_strerror(S3Handle *hdl)
2607 const char *message;
2608 guint response_code;
2609 const char *s3_error_name;
2613 char s3_info[256] = "";
2614 char response_info[16] = "";
2615 char curl_info[32] = "";
2616 char retries_info[32] = "";
2618 s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
2621 message = "Unknown S3 error";
2623 g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
2625 g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
2627 g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
2629 g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
2631 return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
2634 /* Perform an upload. When this function returns, KEY and
2635 * BUFFER remain the responsibility of the caller.
2637 * @param self: the s3 device
2638 * @param bucket: the bucket to which the upload should be made
2639 * @param key: the key to which the upload should be made
2640 * @param buffer: the data to be uploaded
2641 * @param buffer_len: the length of the data to upload
2642 * @returns: false if an error ocurred
2645 s3_upload(S3Handle *hdl,
2648 s3_read_func read_func,
2649 s3_reset_func reset_func,
2650 s3_size_func size_func,
2651 s3_md5_func md5_func,
2653 s3_progress_func progress_func,
2654 gpointer progress_data)
2656 s3_result_t result = S3_RESULT_FAIL;
2657 static result_handling_t result_handling[] = {
2658 { 200, 0, 0, S3_RESULT_OK },
2659 { 201, 0, 0, S3_RESULT_OK },
2660 RESULT_HANDLING_ALWAYS_RETRY,
2661 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2664 char *content_type = NULL;
2666 g_assert(hdl != NULL);
2668 if (hdl->s3_api == S3_API_CASTOR) {
2670 content_type = "application/x-amanda-backup-data";
2673 result = perform_request(hdl, verb, bucket, key, NULL, NULL, content_type, NULL,
2674 read_func, reset_func, size_func, md5_func, read_data,
2675 NULL, NULL, NULL, progress_func, progress_data,
2678 return result == S3_RESULT_OK;
2682 /* Private structure for our "thunk", which tracks where the user is in the list
2684 struct list_keys_thunk {
2685 GSList *filename_list; /* all pending filenames */
2687 gboolean in_contents; /* look for "key" entities in here */
2688 gboolean in_common_prefixes; /* look for "prefix" entities in here */
2690 gboolean is_truncated;
2700 /* Functions for a SAX parser to parse the XML from Amazon */
2703 list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
2704 const gchar *element_name,
2705 const gchar **attribute_names G_GNUC_UNUSED,
2706 const gchar **attribute_values G_GNUC_UNUSED,
2708 GError **error G_GNUC_UNUSED)
2710 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2712 thunk->want_text = 0;
2713 if (g_ascii_strcasecmp(element_name, "contents") == 0 ||
2714 g_ascii_strcasecmp(element_name, "object") == 0) {
2715 thunk->in_contents = 1;
2716 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2717 thunk->in_common_prefixes = 1;
2718 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2719 thunk->want_text = 1;
2720 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2721 g_ascii_strcasecmp(element_name, "name") == 0) &&
2722 thunk->in_contents) {
2723 thunk->want_text = 1;
2724 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2725 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2726 thunk->in_contents) {
2727 thunk->want_text = 1;
2728 } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
2729 thunk->want_text = 1;
2730 } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
2731 thunk->want_text = 1;
2736 list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
2737 const gchar *element_name,
2739 GError **error G_GNUC_UNUSED)
2741 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2743 if (g_ascii_strcasecmp(element_name, "contents") == 0) {
2744 thunk->in_contents = 0;
2745 } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
2746 thunk->in_common_prefixes = 0;
2747 } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
2748 g_ascii_strcasecmp(element_name, "name") == 0) &&
2749 thunk->in_contents) {
2750 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2751 if (thunk->is_truncated) {
2752 if (thunk->next_marker) g_free(thunk->next_marker);
2753 thunk->next_marker = g_strdup(thunk->text);
2756 } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
2757 g_ascii_strcasecmp(element_name, "bytes") == 0) &&
2758 thunk->in_contents) {
2759 thunk->size += g_ascii_strtoull (thunk->text, NULL, 10);
2761 } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
2762 thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
2764 } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
2765 if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
2766 thunk->is_truncated = TRUE;
2767 } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
2768 if (thunk->next_marker) g_free(thunk->next_marker);
2769 thunk->next_marker = thunk->text;
2775 list_text(GMarkupParseContext *context G_GNUC_UNUSED,
2779 GError **error G_GNUC_UNUSED)
2781 struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
2783 if (thunk->want_text) {
2784 if (thunk->text) g_free(thunk->text);
2785 thunk->text = g_strndup(text, text_len);
2789 /* Perform a fetch from S3; several fetches may be involved in a
2790 * single listing operation */
2792 list_fetch(S3Handle *hdl,
2795 const char *delimiter,
2797 const char *max_keys,
2800 s3_result_t result = S3_RESULT_FAIL;
2801 static result_handling_t result_handling[] = {
2802 { 200, 0, 0, S3_RESULT_OK },
2803 { 204, 0, 0, S3_RESULT_OK },
2804 RESULT_HANDLING_ALWAYS_RETRY,
2805 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2807 const char* pos_parts[][2] = {
2809 {"delimiter", delimiter},
2811 {"max-keys", max_keys},
2817 gboolean have_prev_part = FALSE;
2819 /* loop over possible parts to build query string */
2820 query = g_string_new("");
2821 for (i = 0; pos_parts[i][0]; i++) {
2822 if (pos_parts[i][1]) {
2823 const char *keyword;
2825 g_string_append(query, "&");
2827 have_prev_part = TRUE;
2828 esc_value = curl_escape(pos_parts[i][1], 0);
2829 keyword = pos_parts[i][0];
2830 if ((hdl->s3_api == S3_API_SWIFT_1 ||
2831 hdl->s3_api == S3_API_SWIFT_2) &&
2832 strcmp(keyword, "max-keys") == 0) {
2834 } else if ((hdl->s3_api == S3_API_CASTOR) &&
2835 strcmp(keyword, "max-keys") == 0) {
2838 g_string_append_printf(query, "%s=%s", keyword, esc_value);
2839 curl_free(esc_value);
2842 if (hdl->s3_api == S3_API_SWIFT_1 ||
2843 hdl->s3_api == S3_API_SWIFT_2 ||
2844 hdl->s3_api == S3_API_CASTOR) {
2846 g_string_append(query, "&");
2847 g_string_append(query, "format=xml");
2850 /* and perform the request on that URI */
2851 result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str, NULL,
2853 NULL, NULL, NULL, NULL, NULL,
2854 S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
2857 if (query) g_string_free(query, TRUE);
2863 s3_list_keys(S3Handle *hdl,
2866 const char *delimiter,
2868 guint64 *total_size)
2871 * max len of XML variables:
2872 * bucket: 255 bytes (p12 API Version 2006-03-01)
2873 * key: 1024 bytes (p15 API Version 2006-03-01)
2874 * size per key: 5GB bytes (p6 API Version 2006-03-01)
2875 * size of size 10 bytes (i.e. 10 decimal digits)
2876 * etag: 44 (observed+assumed)
2877 * owner ID: 64 (observed+assumed)
2878 * owner DisplayName: 255 (assumed)
2879 * StorageClass: const (p18 API Version 2006-03-01)
2881 static const guint MAX_RESPONSE_LEN = 1000*2000;
2882 static const char *MAX_KEYS = "1000";
2883 struct list_keys_thunk thunk;
2884 GMarkupParseContext *ctxt = NULL;
2885 static GMarkupParser parser = { list_start_element, list_end_element, list_text, NULL, NULL };
2887 s3_result_t result = S3_RESULT_FAIL;
2888 CurlBuffer buf = {NULL, 0, 0, MAX_RESPONSE_LEN};
2892 thunk.filename_list = NULL;
2894 thunk.next_marker = NULL;
2897 /* Loop until S3 has given us the entire picture */
2899 s3_buffer_reset_func(&buf);
2900 /* get some data from S3 */
2901 result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
2902 if (result != S3_RESULT_OK) goto cleanup;
2903 if (buf.buffer_pos == 0) goto cleanup; /* no body */
2905 /* run the parser over it */
2906 thunk.in_contents = FALSE;
2907 thunk.in_common_prefixes = FALSE;
2908 thunk.is_truncated = FALSE;
2909 if (thunk.next_marker) g_free(thunk.next_marker);
2910 thunk.next_marker = NULL;
2911 thunk.want_text = FALSE;
2913 ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
2915 if (!g_markup_parse_context_parse(ctxt, buf.buffer, buf.buffer_pos, &err)) {
2916 if (hdl->last_message) g_free(hdl->last_message);
2917 hdl->last_message = g_strdup(err->message);
2918 result = S3_RESULT_FAIL;
2922 if (!g_markup_parse_context_end_parse(ctxt, &err)) {
2923 if (hdl->last_message) g_free(hdl->last_message);
2924 hdl->last_message = g_strdup(err->message);
2925 result = S3_RESULT_FAIL;
2929 g_markup_parse_context_free(ctxt);
2931 } while (thunk.next_marker);
2934 if (err) g_error_free(err);
2935 if (thunk.text) g_free(thunk.text);
2936 if (thunk.next_marker) g_free(thunk.next_marker);
2937 if (ctxt) g_markup_parse_context_free(ctxt);
2938 if (buf.buffer) g_free(buf.buffer);
2940 if (result != S3_RESULT_OK) {
2941 g_slist_free(thunk.filename_list);
2944 *list = thunk.filename_list;
2946 *total_size = thunk.size;
2953 s3_read(S3Handle *hdl,
2956 s3_write_func write_func,
2957 s3_reset_func reset_func,
2958 gpointer write_data,
2959 s3_progress_func progress_func,
2960 gpointer progress_data)
2962 s3_result_t result = S3_RESULT_FAIL;
2963 static result_handling_t result_handling[] = {
2964 { 200, 0, 0, S3_RESULT_OK },
2965 RESULT_HANDLING_ALWAYS_RETRY,
2966 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2969 g_assert(hdl != NULL);
2970 g_assert(write_func != NULL);
2972 result = perform_request(hdl, "GET", bucket, key, NULL, NULL, NULL, NULL,
2973 NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
2974 progress_func, progress_data, result_handling);
2976 return result == S3_RESULT_OK;
2980 s3_delete(S3Handle *hdl,
2984 s3_result_t result = S3_RESULT_FAIL;
2985 static result_handling_t result_handling[] = {
2986 { 200, 0, 0, S3_RESULT_OK },
2987 { 204, 0, 0, S3_RESULT_OK },
2988 { 404, 0, 0, S3_RESULT_OK },
2989 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
2990 RESULT_HANDLING_ALWAYS_RETRY,
2991 { 409, 0, 0, S3_RESULT_OK },
2992 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
2995 g_assert(hdl != NULL);
2997 result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL, NULL, NULL,
2998 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3001 return result == S3_RESULT_OK;
3005 s3_multi_delete(S3Handle *hdl,
3011 s3_result_t result = S3_RESULT_FAIL;
3012 static result_handling_t result_handling[] = {
3013 { 200, 0, 0, S3_RESULT_OK },
3014 { 204, 0, 0, S3_RESULT_OK },
3015 { 400, 0, 0, S3_RESULT_NOTIMPL },
3016 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
3017 RESULT_HANDLING_ALWAYS_RETRY,
3018 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3021 g_assert(hdl != NULL);
3023 query = g_string_new(NULL);
3024 g_string_append(query, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
3025 g_string_append(query, "<Delete>\n");
3026 if (!hdl->verbose) {
3027 g_string_append(query, " <Quiet>true</Quiet>\n");
3029 while (*key != NULL) {
3030 g_string_append(query, " <Object>\n");
3031 g_string_append(query, " <Key>");
3032 g_string_append(query, *key);
3033 g_string_append(query, "</Key>\n");
3034 g_string_append(query, " </Object>\n");
3037 g_string_append(query, "</Delete>\n");
3039 data.buffer_len = query->len;
3040 data.buffer = query->str;
3041 data.buffer_pos = 0;
3042 data.max_buffer_size = data.buffer_len;
3044 result = perform_request(hdl, "POST", bucket, NULL, "delete", NULL,
3045 "application/xml", NULL,
3046 s3_buffer_read_func, s3_buffer_reset_func,
3047 s3_buffer_size_func, s3_buffer_md5_func,
3048 &data, NULL, NULL, NULL, NULL, NULL,
3051 g_string_free(query, TRUE);
3052 if (result == S3_RESULT_OK)
3054 else if (result == S3_RESULT_NOTIMPL)
3061 s3_make_bucket(S3Handle *hdl,
3063 const char *project_id)
3067 char *content_type = NULL;
3068 s3_result_t result = S3_RESULT_FAIL;
3069 static result_handling_t result_handling[] = {
3070 { 200, 0, 0, S3_RESULT_OK },
3071 { 201, 0, 0, S3_RESULT_OK },
3072 { 202, 0, 0, S3_RESULT_OK },
3073 { 204, 0, 0, S3_RESULT_OK },
3074 { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
3075 RESULT_HANDLING_ALWAYS_RETRY,
3076 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3078 regmatch_t pmatch[4];
3079 char *loc_end_open, *loc_content;
3080 CurlBuffer buf = {NULL, 0, 0, 0}, *ptr = NULL;
3081 s3_read_func read_func = NULL;
3082 s3_reset_func reset_func = NULL;
3083 s3_md5_func md5_func = NULL;
3084 s3_size_func size_func = NULL;
3086 g_assert(hdl != NULL);
3088 if (is_non_empty_string(hdl->bucket_location) &&
3089 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
3090 if (s3_bucket_location_compat(bucket)) {
3092 buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE,
3093 g_str_equal(hdl->host, "gss.iijgio.com")?
3094 " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"":
3096 hdl->bucket_location);
3097 buf.buffer_len = (guint) strlen(buf.buffer);
3099 buf.max_buffer_size = buf.buffer_len;
3100 read_func = s3_buffer_read_func;
3101 reset_func = s3_buffer_reset_func;
3102 size_func = s3_buffer_size_func;
3103 md5_func = s3_buffer_md5_func;
3105 hdl->last_message = g_strdup_printf(_(
3106 "Location constraint given for Amazon S3 bucket, "
3107 "but the bucket name (%s) is not usable as a subdomain."), bucket);
3112 if (hdl->s3_api == S3_API_CASTOR) {
3114 content_type = "application/castorcontext";
3117 result = perform_request(hdl, verb, bucket, NULL, NULL, NULL, content_type,
3119 read_func, reset_func, size_func, md5_func, ptr,
3120 NULL, NULL, NULL, NULL, NULL, result_handling);
3122 if (result == S3_RESULT_OK ||
3123 (result != S3_RESULT_OK &&
3124 hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
3125 /* verify the that the location constraint on the existing bucket matches
3126 * the one that's configured.
3128 if (is_non_empty_string(hdl->bucket_location)) {
3129 result = perform_request(hdl, "GET", bucket, NULL, "location", NULL, NULL, NULL,
3130 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3131 NULL, NULL, result_handling);
3133 result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL, NULL, NULL,
3134 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3135 NULL, NULL, result_handling);
3138 if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
3139 /* return to the default state of failure */
3140 result = S3_RESULT_FAIL;
3142 /* use strndup to get a null-terminated string */
3143 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
3145 hdl->last_message = g_strdup(_("No body received for location request"));
3147 } else if ('\0' == body[0]) {
3148 hdl->last_message = g_strdup(_("Empty body received for location request"));
3152 if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
3153 loc_end_open = find_regex_substring(body, pmatch[1]);
3154 loc_content = find_regex_substring(body, pmatch[3]);
3156 /* The case of an empty string is special because XML allows
3157 * "self-closing" tags
3159 if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
3160 '/' != loc_end_open[0])
3161 hdl->last_message = g_strdup(_("A wildcard location constraint is "
3162 "configured, but the bucket has a non-empty location constraint"));
3163 else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
3164 strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
3165 ('\0' != loc_content[0]))
3166 hdl->last_message = g_strdup(_("The location constraint configured "
3167 "does not match the constraint currently on the bucket"));
3169 result = S3_RESULT_OK;
3171 hdl->last_message = g_strdup(_("Unexpected location response from Amazon S3"));
3177 if (body) g_free(body);
3179 return result == S3_RESULT_OK;
3184 oauth2_get_access_token(
3189 s3_result_t result = S3_RESULT_FAIL;
3190 static result_handling_t result_handling[] = {
3191 { 200, 0, 0, S3_RESULT_OK },
3192 { 204, 0, 0, S3_RESULT_OK },
3193 RESULT_HANDLING_ALWAYS_RETRY,
3194 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3197 regmatch_t pmatch[2];
3199 g_assert(hdl != NULL);
3201 query = g_string_new(NULL);
3202 g_string_append(query, "client_id=");
3203 g_string_append(query, hdl->client_id);
3204 g_string_append(query, "&client_secret=");
3205 g_string_append(query, hdl->client_secret);
3206 g_string_append(query, "&refresh_token=");
3207 g_string_append(query, hdl->refresh_token);
3208 g_string_append(query, "&grant_type=refresh_token");
3210 data.buffer_len = query->len;
3211 data.buffer = query->str;
3212 data.buffer_pos = 0;
3213 data.max_buffer_size = data.buffer_len;
3215 hdl->x_storage_url = "https://accounts.google.com/o/oauth2/token";
3216 hdl->getting_oauth2_access_token = 1;
3217 result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
3218 "application/x-www-form-urlencoded", NULL,
3219 s3_buffer_read_func, s3_buffer_reset_func,
3220 s3_buffer_size_func, s3_buffer_md5_func,
3221 &data, NULL, NULL, NULL, NULL, NULL,
3223 hdl->x_storage_url = NULL;
3224 hdl->getting_oauth2_access_token = 0;
3226 /* use strndup to get a null-terminated string */
3227 body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
3229 hdl->last_message = g_strdup(_("No body received for location request"));
3231 } else if ('\0' == body[0]) {
3232 hdl->last_message = g_strdup(_("Empty body received for location request"));
3236 if (!s3_regexec_wrap(&access_token_regex, body, 2, pmatch, 0)) {
3237 hdl->access_token = find_regex_substring(body, pmatch[1]);
3238 hdl->x_auth_token = g_strdup(hdl->access_token);
3240 if (!s3_regexec_wrap(&expires_in_regex, body, 2, pmatch, 0)) {
3241 char *expires_in = find_regex_substring(body, pmatch[1]);
3242 hdl->expires = time(NULL) + atoi(expires_in) - 600;
3248 return result == S3_RESULT_OK;
3252 s3_is_bucket_exists(S3Handle *hdl,
3254 const char *project_id)
3256 s3_result_t result = S3_RESULT_FAIL;
3258 static result_handling_t result_handling[] = {
3259 { 200, 0, 0, S3_RESULT_OK },
3260 { 204, 0, 0, S3_RESULT_OK },
3261 RESULT_HANDLING_ALWAYS_RETRY,
3262 { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
3265 if (hdl->s3_api == S3_API_SWIFT_1 ||
3266 hdl->s3_api == S3_API_SWIFT_2) {
3268 } else if (hdl->s3_api == S3_API_CASTOR) {
3269 query = "format=xml&size=0";
3271 query = "max-keys=1";
3274 result = perform_request(hdl, "GET", bucket, NULL, NULL, query,
3276 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
3277 NULL, NULL, result_handling);
3279 return result == S3_RESULT_OK;
3283 s3_delete_bucket(S3Handle *hdl,
3286 return s3_delete(hdl, bucket, NULL);