X-Git-Url: https://git.gag.com/?a=blobdiff_plain;f=device-src%2Fs3.c;h=453fabc946dc2dde4612d7fec91c3921316e1377;hb=HEAD;hp=584a47d54ad1feacf0a8eae1e13549c104266dad;hpb=42ff24f2a525d5965e1841b2ebe3ee0f4b918ac6;p=debian%2Famanda diff --git a/device-src/s3.c b/device-src/s3.c index 584a47d..453fabc 100644 --- a/device-src/s3.c +++ b/device-src/s3.c @@ -1,9 +1,10 @@ /* - * Copyright (c) 2008, 2009, 2010 Zmanda, Inc. All Rights Reserved. + * Copyright (c) 2008-2012 Zmanda, Inc. All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY @@ -88,12 +89,14 @@ #define AMAZON_SECURITY_HEADER "x-amz-security-token" #define AMAZON_BUCKET_CONF_TEMPLATE "\ - \n\ + \n\ %s\n\ " #define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class" +#define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption" + #define AMAZON_WILDCARD_LOCATION "*" /* parameters for exponential backoff in the face of retriable errors */ @@ -120,6 +123,7 @@ { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \ { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \ { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \ + { 0, 0, CURLE_SSL_CONNECT_ERROR, S3_RESULT_RETRY }, \ { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \ { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \ { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY } @@ -134,12 +138,32 @@ struct S3Handle { char *access_key; char *secret_key; char *user_token; + char *swift_account_id; + char *swift_access_key; + char *username; + char *password; + char *tenant_id; + char *tenant_name; + char *client_id; + char *client_secret; + char *refresh_token; + char *access_token; + time_t expires; + gboolean getting_oauth2_access_token; + gboolean getting_swift_2_token; /* attributes for new objects */ char *bucket_location; char *storage_class; - + char *server_side_encryption; + char *proxy; + char *host; + char *service_path; + gboolean use_subdomain; + S3_api s3_api; char *ca_info; + char *x_auth_token; + char *x_storage_url; CURL *curl; @@ -160,6 +184,13 @@ struct S3Handle { /* offset with s3 */ time_t time_offset_with_s3; + char *content_type; + + gboolean reuse_connection; + + /* CAStor */ + char *reps; + char *reps_bucket; }; typedef struct { @@ -237,7 +268,8 @@ s3_error_name_from_code(s3_error_code_t s3_error_code); typedef enum { S3_RESULT_RETRY = -1, S3_RESULT_FAIL = 0, - S3_RESULT_OK = 1 + S3_RESULT_OK = 1, + S3_RESULT_NOTIMPL = 2 } s3_result_t; typedef struct result_handling { @@ -247,6 +279,11 @@ typedef struct result_handling { s3_result_t result; } result_handling_t; +/* + * get the access token for OAUTH2 + */ +static gboolean oauth2_get_access_token(S3Handle *hdl); + /* Lookup a result in C{result_handling}. * * @param result_handling: array of handling specifications @@ -264,7 +301,9 @@ lookup_result(const result_handling_t *result_handling, /* * Precompiled regular expressions */ static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex, - location_con_regex, date_sync_regex; + location_con_regex, date_sync_regex, x_auth_token_regex, + x_storage_url_regex, access_token_regex, expires_in_regex, + content_type_regex, details_regex, code_regex; /* @@ -283,19 +322,23 @@ static gboolean is_non_empty_string(const char *str); * A new string is allocated and returned; it is the responsiblity of the caller. * * @param hdl: the S3Handle object - * @param verb: capitalized verb for this request ('PUT', 'GET', etc.) + * @param service_path: A path to add in the URL, or NULL for none. * @param bucket: the bucket being accessed, or NULL for none * @param key: the key being accessed, or NULL for none * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none - * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used + * @param query: the query being accessed (e.g. "acl"), or NULL for none + * + * !use_subdomain: http://host/service_path/bucket/key + * use_subdomain : http://bucket.host/service_path/key + * */ static char * -build_url(const char *bucket, +build_url( + S3Handle *hdl, + const char *bucket, const char *key, const char *subresource, - const char *query, - gboolean use_subdomain, - gboolean use_ssl); + const char *query); /* Create proper authorization headers for an Amazon S3 REST * request to C{headers}. @@ -312,7 +355,6 @@ build_url(const char *bucket, * @param key: the key being accessed, or NULL for none * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none * @param md5_hash: the MD5 hash of the request body, or NULL for none - * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used */ static struct curl_slist * authenticate_request(S3Handle *hdl, @@ -321,7 +363,9 @@ authenticate_request(S3Handle *hdl, const char *key, const char *subresource, const char *md5_hash, - gboolean use_subdomain); + const char *content_type, + const size_t content_length, + const char *project_id); @@ -387,6 +431,8 @@ perform_request(S3Handle *hdl, const char *key, const char *subresource, const char *query, + const char *content_type, + const char *project_id, s3_read_func read_func, s3_reset_func read_reset_func, s3_size_func size_func, @@ -421,6 +467,9 @@ s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream); static gboolean compile_regexes(void); +static gboolean get_openstack_swift_api_v1_setting(S3Handle *hdl); +static gboolean get_openstack_swift_api_v2_setting(S3Handle *hdl); + /* * Static function implementations */ @@ -506,6 +555,130 @@ lookup_result(const result_handling_t *result_handling, return result_handling->result; } +static time_t +rfc3339_date( + const char *date) +{ + gint year, month, day, hour, minute, seconds; + const char *atz; + + if (strlen(date) < 19) + return 1073741824; + + year = atoi(date); + month = atoi(date+5); + day = atoi(date+8); + hour = atoi(date+11); + minute = atoi(date+14); + seconds = atoi(date+17); + atz = date+19; + if (*atz == '.') { /* skip decimal seconds */ + atz++; + while (*atz >= '0' && *atz <= '9') { + atz++; + } + } + +#if GLIB_CHECK_VERSION(2,26,0) + if (!glib_check_version(2,26,0)) { + GTimeZone *tz; + GDateTime *dt; + time_t a; + + tz = g_time_zone_new(atz); + dt = g_date_time_new(tz, year, month, day, hour, minute, seconds); + a = g_date_time_to_unix(dt); + g_time_zone_unref(tz); + g_date_time_unref(dt); + return a; + } else +#endif + { + struct tm tm; + time_t t; + + tm.tm_year = year - 1900; + tm.tm_mon = month - 1; + tm.tm_mday = day; + tm.tm_hour = hour; + tm.tm_min = minute; + tm.tm_sec = seconds; + tm.tm_wday = 0; + tm.tm_yday = 0; + tm.tm_isdst = -1; + t = time(NULL); + + if (*atz == '-' || *atz == '+') { /* numeric timezone */ + time_t lt, gt; + time_t a; + struct tm ltt, gtt; + gint Hour = atoi(atz); + gint Min = atoi(atz+4); + + if (Hour < 0) + Min = -Min; + tm.tm_hour -= Hour; + tm.tm_min -= Min; + tm.tm_isdst = 0; + localtime_r(&t, <t); + lt = mktime(<t); + gmtime_r(&t, >t); + gt = mktime(>t); + tm.tm_sec += lt - gt; + a = mktime(&tm); + return a; + } else if (*atz == 'Z' && *(atz+1) == '\0') { /* Z timezone */ + time_t lt, gt; + time_t a; + struct tm ltt, gtt; + + tm.tm_isdst = 0; + localtime_r(&t, <t); + lt = mktime(<t); + gmtime_r(&t, >t); + gt = mktime(>t); + tm.tm_sec += lt - gt; + a = mktime(&tm); + return a; + } else { /* named timezone */ + int pid; + int fd[2]; + char buf[101]; + time_t a; + size_t size; + + if (pipe(fd) == -1) + return 1073741824; + pid = fork(); + switch (pid) { + case -1: + close(fd[0]); + close(fd[1]); + return 1073741824; + break; + case 0: + close(fd[0]); + setenv("TZ", atz, 1); + tzset(); + a = mktime(&tm); + g_snprintf(buf, 100, "%d", (int)a); + size = write(fd[1], buf, strlen(buf)); + close(fd[1]); + exit(0); + default: + close(fd[1]); + size = read(fd[0], buf, 100); + close(fd[0]); + buf[size] = '\0'; + waitpid(pid, NULL, 0); + break; + } + return atoi(buf); + } + } +} + + static gboolean is_non_empty_string(const char *str) { @@ -513,46 +686,87 @@ is_non_empty_string(const char *str) } static char * -build_url(const char *bucket, +build_url( + S3Handle *hdl, + const char *bucket, const char *key, const char *subresource, - const char *query, - gboolean use_subdomain, - gboolean use_ssl) + const char *query) { GString *url = NULL; char *esc_bucket = NULL, *esc_key = NULL; - /* scheme */ - url = g_string_new("http"); - if (use_ssl) - g_string_append(url, "s"); - - g_string_append(url, "://"); - - /* domain */ - if (use_subdomain && bucket) - g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket); - else - g_string_append(url, "s3.amazonaws.com/"); + if ((hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2 || + hdl->s3_api == S3_API_OAUTH2) && + hdl->x_storage_url) { + url = g_string_new(hdl->x_storage_url); + g_string_append(url, "/"); + } else { + /* scheme */ + url = g_string_new("http"); + if (hdl->use_ssl) + g_string_append(url, "s"); + + g_string_append(url, "://"); + + /* domain */ + if (hdl->use_subdomain && bucket) + g_string_append_printf(url, "%s.%s", bucket, hdl->host); + else + g_string_append_printf(url, "%s", hdl->host); + + if (hdl->service_path) { + g_string_append_printf(url, "%s/", hdl->service_path); + } else { + g_string_append(url, "/"); + } + } /* path */ - if (!use_subdomain && bucket) { - esc_bucket = curl_escape(bucket, 0); - if (!esc_bucket) goto cleanup; + if (!hdl->use_subdomain && bucket) { + /* curl_easy_escape addeded in 7.15.4 */ + #if LIBCURL_VERSION_NUM >= 0x070f04 + curl_version_info_data *info; + /* check the runtime version too */ + info = curl_version_info(CURLVERSION_NOW); + if (info->version_num >= 0x070f04) + esc_bucket = curl_easy_escape(hdl->curl, bucket, 0); + else + esc_bucket = curl_escape(bucket, 0); + #else + esc_bucket = curl_escape(bucket, 0); + #endif + if (!esc_bucket) goto cleanup; g_string_append_printf(url, "%s", esc_bucket); if (key) g_string_append(url, "/"); + curl_free(esc_bucket); } if (key) { - esc_key = curl_escape(key, 0); - if (!esc_key) goto cleanup; + /* curl_easy_escape addeded in 7.15.4 */ + #if LIBCURL_VERSION_NUM >= 0x070f04 + curl_version_info_data *info; + /* check the runtime version too */ + info = curl_version_info(CURLVERSION_NOW); + if (info->version_num >= 0x070f04) + esc_key = curl_easy_escape(hdl->curl, key, 0); + else + esc_key = curl_escape(key, 0); + #else + esc_key = curl_escape(key, 0); + #endif + if (!esc_key) goto cleanup; g_string_append_printf(url, "%s", esc_key); + curl_free(esc_key); + } + + if (url->str[strlen(url->str)-1] == '/') { + g_string_truncate(url, strlen(url->str)-1); } /* query string */ - if (subresource || query) + if (subresource || query || (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name)) g_string_append(url, "?"); if (subresource) @@ -564,9 +778,15 @@ build_url(const char *bucket, if (query) g_string_append(url, query); + /* add CAStor tenant domain override query arg */ + if (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name) { + if (subresource || query) { + g_string_append(url, "&"); + } + g_string_append_printf(url, "domain=%s", hdl->tenant_name); + } + cleanup: - if (esc_bucket) curl_free(esc_bucket); - if (esc_key) curl_free(esc_key); return g_string_free(url, FALSE); } @@ -578,7 +798,9 @@ authenticate_request(S3Handle *hdl, const char *key, const char *subresource, const char *md5_hash, - gboolean use_subdomain) + const char *content_type, + const size_t content_length, + const char *project_id) { time_t t; struct tm tmp; @@ -590,31 +812,13 @@ authenticate_request(S3Handle *hdl, struct curl_slist *headers = NULL; char *esc_bucket = NULL, *esc_key = NULL; GString *auth_string = NULL; + char *reps = NULL; /* From RFC 2616 */ static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"}; static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"}; - - - /* Build the string to sign, per the S3 spec. - * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58 - */ - - /* verb */ - auth_string = g_string_new(verb); - g_string_append(auth_string, "\n"); - - /* Content-MD5 header */ - if (md5_hash) - g_string_append(auth_string, md5_hash); - g_string_append(auth_string, "\n"); - - /* Content-Type is empty*/ - g_string_append(auth_string, "\n"); - - /* calculate the date */ t = time(NULL); @@ -632,87 +836,197 @@ authenticate_request(S3Handle *hdl, wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year, tmp.tm_hour, tmp.tm_min, tmp.tm_sec); - g_string_append(auth_string, date); - g_string_append(auth_string, "\n"); - - /* CanonicalizedAmzHeaders, sorted lexicographically */ - if (is_non_empty_string(hdl->user_token)) { - g_string_append(auth_string, AMAZON_SECURITY_HEADER); - g_string_append(auth_string, ":"); - g_string_append(auth_string, hdl->user_token); - g_string_append(auth_string, ","); - g_string_append(auth_string, STS_PRODUCT_TOKEN); - g_string_append(auth_string, "\n"); - } - - if (is_non_empty_string(hdl->storage_class)) { - g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER); - g_string_append(auth_string, ":"); - g_string_append(auth_string, hdl->storage_class); - g_string_append(auth_string, "\n"); - } - - /* CanonicalizedResource */ - g_string_append(auth_string, "/"); - if (bucket) { - if (use_subdomain) - g_string_append(auth_string, bucket); - else { - esc_bucket = curl_escape(bucket, 0); - if (!esc_bucket) goto cleanup; - g_string_append(auth_string, esc_bucket); + if (hdl->s3_api == S3_API_SWIFT_1) { + if (!bucket) { + buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id); + headers = curl_slist_append(headers, buf); + g_free(buf); + buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key); + headers = curl_slist_append(headers, buf); + g_free(buf); + } else { + buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + } else if (hdl->s3_api == S3_API_SWIFT_2) { + if (bucket) { + buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + buf = g_strdup_printf("Accept: %s", "application/xml"); + headers = curl_slist_append(headers, buf); + g_free(buf); + } else if (hdl->s3_api == S3_API_OAUTH2) { + if (bucket) { + buf = g_strdup_printf("Authorization: Bearer %s", hdl->access_token); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + } else if (hdl->s3_api == S3_API_CASTOR) { + if (g_str_equal(verb, "PUT") || g_str_equal(verb, "POST")) { + if (key) { + buf = g_strdup("CAStor-Application: Amanda"); + headers = curl_slist_append(headers, buf); + g_free(buf); + reps = g_strdup(hdl->reps); /* object replication level */ + } else { + reps = g_strdup(hdl->reps_bucket); /* bucket replication level */ + } + + /* set object replicas in lifepoint */ + buf = g_strdup_printf("lifepoint: [] reps=%s", reps); + headers = curl_slist_append(headers, buf); + g_free(buf); + g_free(reps); } - } + } else { + /* Build the string to sign, per the S3 spec. + * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58 + */ - if (bucket && (use_subdomain || key)) - g_string_append(auth_string, "/"); + /* verb */ + auth_string = g_string_new(verb); + g_string_append(auth_string, "\n"); - if (key) { - esc_key = curl_escape(key, 0); - if (!esc_key) goto cleanup; - g_string_append(auth_string, esc_key); - } + /* Content-MD5 header */ + if (md5_hash) + g_string_append(auth_string, md5_hash); + g_string_append(auth_string, "\n"); - if (subresource) { - g_string_append(auth_string, "?"); - g_string_append(auth_string, subresource); - } + if (content_type) { + g_string_append(auth_string, content_type); + } + g_string_append(auth_string, "\n"); + + /* Date */ + g_string_append(auth_string, date); + g_string_append(auth_string, "\n"); + + /* CanonicalizedAmzHeaders, sorted lexicographically */ + if (is_non_empty_string(hdl->user_token)) { + g_string_append(auth_string, AMAZON_SECURITY_HEADER); + g_string_append(auth_string, ":"); + g_string_append(auth_string, hdl->user_token); + g_string_append(auth_string, ","); + g_string_append(auth_string, STS_PRODUCT_TOKEN); + g_string_append(auth_string, "\n"); + } + + if (g_str_equal(verb,"PUT") && + is_non_empty_string(hdl->server_side_encryption)) { + g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER); + g_string_append(auth_string, ":"); + g_string_append(auth_string, hdl->server_side_encryption); + g_string_append(auth_string, "\n"); + } + + if (is_non_empty_string(hdl->storage_class)) { + g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER); + g_string_append(auth_string, ":"); + g_string_append(auth_string, hdl->storage_class); + g_string_append(auth_string, "\n"); + } + + /* CanonicalizedResource */ + if (hdl->service_path) { + g_string_append(auth_string, hdl->service_path); + } + g_string_append(auth_string, "/"); + if (bucket) { + if (hdl->use_subdomain) + g_string_append(auth_string, bucket); + else { + esc_bucket = curl_escape(bucket, 0); + if (!esc_bucket) goto cleanup; + g_string_append(auth_string, esc_bucket); + } + } - /* run HMAC-SHA1 on the canonicalized string */ - md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1); - HMAC_CTX_init(&ctx); - HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), EVP_sha1(), NULL); - HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len); - HMAC_Final(&ctx, md->data, &md->len); - HMAC_CTX_cleanup(&ctx); - auth_base64 = s3_base64_encode(md); + if (bucket && (hdl->use_subdomain || key)) + g_string_append(auth_string, "/"); - /* append the new headers */ - if (is_non_empty_string(hdl->user_token)) { - /* Devpay headers are included in hash. */ - buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token); + if (key) { + esc_key = curl_escape(key, 0); + if (!esc_key) goto cleanup; + g_string_append(auth_string, esc_key); + } + + if (subresource) { + g_string_append(auth_string, "?"); + g_string_append(auth_string, subresource); + } + + /* run HMAC-SHA1 on the canonicalized string */ + md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1); + HMAC_CTX_init(&ctx); + HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), + EVP_sha1(), NULL); + HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len); + HMAC_Final(&ctx, md->data, &md->len); + HMAC_CTX_cleanup(&ctx); + auth_base64 = s3_base64_encode(md); + /* append the new headers */ + if (is_non_empty_string(hdl->user_token)) { + /* Devpay headers are included in hash. */ + buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", + hdl->user_token); + headers = curl_slist_append(headers, buf); + g_free(buf); + + buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", + STS_PRODUCT_TOKEN); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + + if (g_str_equal(verb,"PUT") && + is_non_empty_string(hdl->server_side_encryption)) { + buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s", + hdl->server_side_encryption); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + + if (is_non_empty_string(hdl->storage_class)) { + buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s", + hdl->storage_class); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + + buf = g_strdup_printf("Authorization: AWS %s:%s", + hdl->access_key, auth_base64); + headers = curl_slist_append(headers, buf); + g_free(buf); + } + + if (md5_hash && '\0' != md5_hash[0]) { + buf = g_strdup_printf("Content-MD5: %s", md5_hash); headers = curl_slist_append(headers, buf); g_free(buf); - - buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", STS_PRODUCT_TOKEN); + } + if (content_length > 0) { + buf = g_strdup_printf("Content-Length: %zu", content_length); headers = curl_slist_append(headers, buf); g_free(buf); } - if (is_non_empty_string(hdl->storage_class)) { - buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s", hdl->storage_class); - headers = curl_slist_append(headers, buf); - g_free(buf); + if (content_type) { + buf = g_strdup_printf("Content-Type: %s", content_type); + headers = curl_slist_append(headers, buf); + g_free(buf); } + if (hdl->s3_api == S3_API_OAUTH2) { + buf = g_strdup_printf("x-goog-api-version: 2"); + headers = curl_slist_append(headers, buf); + g_free(buf); + } - buf = g_strdup_printf("Authorization: AWS %s:%s", - hdl->access_key, auth_base64); - headers = curl_slist_append(headers, buf); - g_free(buf); - - if (md5_hash && '\0' != md5_hash[0]) { - buf = g_strdup_printf("Content-MD5: %s", md5_hash); + if (project_id && hdl->s3_api == S3_API_OAUTH2) { + buf = g_strdup_printf("x-goog-project-id: %s", project_id); headers = curl_slist_append(headers, buf); g_free(buf); } @@ -720,17 +1034,220 @@ authenticate_request(S3Handle *hdl, buf = g_strdup_printf("Date: %s", date); headers = curl_slist_append(headers, buf); g_free(buf); + cleanup: g_free(date); g_free(esc_bucket); g_free(esc_key); - g_byte_array_free(md, TRUE); + if (md) g_byte_array_free(md, TRUE); g_free(auth_base64); - g_string_free(auth_string, TRUE); + if (auth_string) g_string_free(auth_string, TRUE); return headers; } +/* Functions for a SAX parser to parse the XML failure from Amazon */ + +/* Private structure for our "thunk", which tracks where the user is in the list + * * of keys. */ +struct failure_thunk { + gboolean want_text; + + gboolean in_title; + gboolean in_body; + gboolean in_code; + gboolean in_message; + gboolean in_details; + gboolean in_access; + gboolean in_token; + gboolean in_serviceCatalog; + gboolean in_service; + gboolean in_endpoint; + gint in_others; + + gchar *text; + gsize text_len; + + gchar *message; + gchar *details; + gchar *error_name; + gchar *token_id; + gchar *service_type; + gchar *service_public_url; + gint64 expires; +}; + +static void +failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED, + const gchar *element_name, + const gchar **attribute_names, + const gchar **attribute_values, + gpointer user_data, + GError **error G_GNUC_UNUSED) +{ + struct failure_thunk *thunk = (struct failure_thunk *)user_data; + const gchar **att_name, **att_value; + + if (g_ascii_strcasecmp(element_name, "title") == 0) { + thunk->in_title = 1; + thunk->in_others = 0; + thunk->want_text = 1; + } else if (g_ascii_strcasecmp(element_name, "body") == 0) { + thunk->in_body = 1; + thunk->in_others = 0; + thunk->want_text = 1; + } else if (g_ascii_strcasecmp(element_name, "code") == 0) { + thunk->in_code = 1; + thunk->in_others = 0; + thunk->want_text = 1; + } else if (g_ascii_strcasecmp(element_name, "message") == 0) { + thunk->in_message = 1; + thunk->in_others = 0; + thunk->want_text = 1; + } else if (g_ascii_strcasecmp(element_name, "details") == 0) { + thunk->in_details = 1; + thunk->in_others = 0; + thunk->want_text = 1; + } else if (g_ascii_strcasecmp(element_name, "access") == 0) { + thunk->in_access = 1; + thunk->in_others = 0; + } else if (g_ascii_strcasecmp(element_name, "token") == 0) { + thunk->in_token = 1; + thunk->in_others = 0; + for (att_name=attribute_names, att_value=attribute_values; + *att_name != NULL; + att_name++, att_value++) { + if (g_str_equal(*att_name, "id")) { + thunk->token_id = g_strdup(*att_value); + } + if (g_str_equal(*att_name, "expires") && strlen(*att_value) >= 19) { + thunk->expires = rfc3339_date(*att_value) - 600; + } + } + } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) { + thunk->in_serviceCatalog = 1; + thunk->in_others = 0; + } else if (g_ascii_strcasecmp(element_name, "service") == 0) { + thunk->in_service = 1; + thunk->in_others = 0; + for (att_name=attribute_names, att_value=attribute_values; + *att_name != NULL; + att_name++, att_value++) { + if (g_str_equal(*att_name, "type")) { + thunk->service_type = g_strdup(*att_value); + } + } + } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) { + thunk->in_endpoint = 1; + thunk->in_others = 0; + if (thunk->service_type && + g_str_equal(thunk->service_type, "object-store")) { + for (att_name=attribute_names, att_value=attribute_values; + *att_name != NULL; + att_name++, att_value++) { + if (g_str_equal(*att_name, "publicURL")) { + thunk->service_public_url = g_strdup(*att_value); + } + } + } + } else if (g_ascii_strcasecmp(element_name, "error") == 0) { + for (att_name=attribute_names, att_value=attribute_values; + *att_name != NULL; + att_name++, att_value++) { + if (g_str_equal(*att_name, "message")) { + thunk->message = g_strdup(*att_value); + } + } + } else { + thunk->in_others++; + } +} + +static void +failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED, + const gchar *element_name, + gpointer user_data, + GError **error G_GNUC_UNUSED) +{ + struct failure_thunk *thunk = (struct failure_thunk *)user_data; + + if (g_ascii_strcasecmp(element_name, "title") == 0) { + char *p = strchr(thunk->text, ' '); + if (p) { + p++; + if (*p) { + thunk->error_name = g_strdup(p); + } + } + g_free(thunk->text); + thunk->text = NULL; + thunk->in_title = 0; + } else if (g_ascii_strcasecmp(element_name, "body") == 0) { + thunk->message = thunk->text; + g_strstrip(thunk->message); + thunk->text = NULL; + thunk->in_body = 0; + } else if (g_ascii_strcasecmp(element_name, "code") == 0) { + thunk->error_name = thunk->text; + thunk->text = NULL; + thunk->in_code = 0; + } else if (g_ascii_strcasecmp(element_name, "message") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + thunk->in_message = 0; + } else if (g_ascii_strcasecmp(element_name, "details") == 0) { + thunk->details = thunk->text; + thunk->text = NULL; + thunk->in_details = 0; + } else if (g_ascii_strcasecmp(element_name, "access") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + thunk->in_access = 0; + } else if (g_ascii_strcasecmp(element_name, "token") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + thunk->in_token = 0; + } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + thunk->in_serviceCatalog = 0; + } else if (g_ascii_strcasecmp(element_name, "service") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + g_free(thunk->service_type); + thunk->service_type = NULL; + thunk->in_service = 0; + } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) { + thunk->message = thunk->text; + thunk->text = NULL; + thunk->in_endpoint = 0; + } else { + thunk->in_others--; + } +} + +static void +failure_text(GMarkupParseContext *context G_GNUC_UNUSED, + const gchar *text, + gsize text_len, + gpointer user_data, + GError **error G_GNUC_UNUSED) +{ + struct failure_thunk *thunk = (struct failure_thunk *)user_data; + + if (thunk->want_text && thunk->in_others == 0) { + char *new_text; + + new_text = g_strndup(text, text_len); + if (thunk->text) { + strappend(thunk->text, new_text); + g_free(new_text); + } else { + thunk->text = new_text; + } + } +} + static gboolean interpret_response(S3Handle *hdl, CURLcode curl_code, @@ -741,10 +1258,11 @@ interpret_response(S3Handle *hdl, const char *content_md5) { long response_code = 0; - regmatch_t pmatch[2]; - char *error_name = NULL, *message = NULL; - char *body_copy = NULL; gboolean ret = TRUE; + struct failure_thunk thunk; + GMarkupParseContext *ctxt = NULL; + static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL }; + GError *err = NULL; if (!hdl) return FALSE; @@ -762,8 +1280,9 @@ interpret_response(S3Handle *hdl, curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code); hdl->last_response_code = response_code; - /* check ETag, if present */ - if (etag && content_md5 && 200 == response_code) { + /* check ETag, if present and not CAStor */ + if (etag && content_md5 && 200 == response_code && + hdl->s3_api != S3_API_CASTOR) { if (etag && g_ascii_strcasecmp(etag, content_md5)) hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)"); else @@ -771,48 +1290,172 @@ interpret_response(S3Handle *hdl, return ret; } - if (200 <= response_code && response_code < 400) { - /* 2xx and 3xx codes won't have a response body we care about */ - hdl->last_s3_error_code = S3_ERROR_None; - return FALSE; - } - - /* Now look at the body to try to get the actual Amazon error message. Rather - * than parse out the XML, just use some regexes. */ + /* Now look at the body to try to get the actual Amazon error message. */ /* impose a reasonable limit on body size */ if (body_len > MAX_ERROR_RESPONSE_LEN) { hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)"); return FALSE; } else if (!body || body_len == 0) { - hdl->last_message = g_strdup("S3 Error: Unknown (empty response body)"); - return TRUE; /* perhaps a network error; retry the request */ + if (response_code < 100 || response_code >= 400) { + hdl->last_message = + g_strdup("S3 Error: Unknown (empty response body)"); + return TRUE; /* perhaps a network error; retry the request */ + } else { + /* 2xx and 3xx codes without body are good result */ + hdl->last_s3_error_code = S3_ERROR_None; + return FALSE; + } } - /* use strndup to get a zero-terminated string */ - body_copy = g_strndup(body, body_len); - if (!body_copy) goto cleanup; + thunk.in_title = FALSE; + thunk.in_body = FALSE; + thunk.in_code = FALSE; + thunk.in_message = FALSE; + thunk.in_details = FALSE; + thunk.in_access = FALSE; + thunk.in_token = FALSE; + thunk.in_serviceCatalog = FALSE; + thunk.in_service = FALSE; + thunk.in_endpoint = FALSE; + thunk.in_others = 0; + thunk.text = NULL; + thunk.want_text = FALSE; + thunk.text_len = 0; + thunk.message = NULL; + thunk.details = NULL; + thunk.error_name = NULL; + thunk.token_id = NULL; + thunk.service_type = NULL; + thunk.service_public_url = NULL; + thunk.expires = 0; + + if ((hdl->s3_api == S3_API_SWIFT_1 || + hdl->s3_api == S3_API_SWIFT_2) && + hdl->content_type && + (g_str_equal(hdl->content_type, "text/html") || + g_str_equal(hdl->content_type, "text/plain"))) { + + char *body_copy = g_strndup(body, body_len); + char *b = body_copy; + char *p = strchr(b, '\n'); + char *p1; + if (p) { /* first line: error code */ + *p = '\0'; + p++; + p1 = strchr(b, ' '); + if (p1) { + p1++; + if (*p1) { + thunk.error_name = g_strdup(p1); + } + } + b = p; + } + p = strchr(b, '\n'); + if (p) { /* second line: error message */ + *p = '\0'; + p++; + thunk.message = g_strdup(p); + g_strstrip(thunk.message); + b = p; + } + goto parsing_done; + } else if ((hdl->s3_api == S3_API_SWIFT_1 || + hdl->s3_api == S3_API_SWIFT_2) && + hdl->content_type && + g_str_equal(hdl->content_type, "application/json")) { + char *body_copy = g_strndup(body, body_len); + char *code = NULL; + char *details = NULL; + regmatch_t pmatch[2]; + + if (!s3_regexec_wrap(&code_regex, body_copy, 2, pmatch, 0)) { + code = find_regex_substring(body_copy, pmatch[1]); + } + if (!s3_regexec_wrap(&details_regex, body_copy, 2, pmatch, 0)) { + details = find_regex_substring(body_copy, pmatch[1]); + } + if (code && details) { + hdl->last_message = g_strdup_printf("%s (%s)", details, code); + } else if (code) { + hdl->last_message = g_strdup_printf("(%s)", code); + } else if (details) { + hdl->last_message = g_strdup_printf("%s", details); + } else { + hdl->last_message = NULL; + } + g_free(code); + g_free(details); + g_free(body_copy); + return FALSE; + } else if (hdl->s3_api == S3_API_CASTOR) { + /* The error mesage is the body */ + hdl->last_message = g_strndup(body, body_len); + return FALSE; + } else if (!hdl->content_type || + !g_str_equal(hdl->content_type, "application/xml")) { + return FALSE; + } - if (!s3_regexec_wrap(&error_name_regex, body_copy, 2, pmatch, 0)) - error_name = find_regex_substring(body_copy, pmatch[1]); + /* run the parser over it */ + ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL); + if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) { + if (hdl->last_message) g_free(hdl->last_message); + hdl->last_message = g_strdup(err->message); + goto cleanup; + } - if (!s3_regexec_wrap(&message_regex, body_copy, 2, pmatch, 0)) - message = find_regex_substring(body_copy, pmatch[1]); + if (!g_markup_parse_context_end_parse(ctxt, &err)) { + if (hdl->last_message) g_free(hdl->last_message); + hdl->last_message = g_strdup(err->message); + goto cleanup; + } - if (error_name) { - hdl->last_s3_error_code = s3_error_code_from_name(error_name); + g_markup_parse_context_free(ctxt); + ctxt = NULL; + + if (hdl->s3_api == S3_API_SWIFT_2) { + if (!hdl->x_auth_token && thunk.token_id) { + hdl->x_auth_token = thunk.token_id; + thunk.token_id = NULL; + } + if (!hdl->x_storage_url && thunk.service_public_url) { + hdl->x_storage_url = thunk.service_public_url; + thunk.service_public_url = NULL; + } } - if (message) { - hdl->last_message = message; - message = NULL; /* steal the reference to the string */ + if (thunk.expires > 0) { + hdl->expires = thunk.expires; + } +parsing_done: + if (thunk.error_name) { + hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name); + g_free(thunk.error_name); + thunk.error_name = NULL; } -cleanup: - g_free(body_copy); - g_free(message); - g_free(error_name); + if (thunk.message) { + g_free(hdl->last_message); + if (thunk.details) { + hdl->last_message = g_strdup_printf("%s: %s", thunk.message, + thunk.details); + amfree(thunk.message); + amfree(thunk.details); + } else { + hdl->last_message = thunk.message; + thunk.message = NULL; /* steal the reference to the string */ + } + } +cleanup: + g_free(thunk.text); + g_free(thunk.message); + g_free(thunk.error_name); + g_free(thunk.token_id); + g_free(thunk.service_public_url); + g_free(thunk.service_type); return FALSE; } @@ -1009,6 +1652,7 @@ curl_debug_message(CURL *curl G_GNUC_UNUSED, char *lineprefix; char *message; char **lines, **line; + size_t i; switch (type) { case CURLINFO_TEXT: @@ -1023,6 +1667,26 @@ curl_debug_message(CURL *curl G_GNUC_UNUSED, lineprefix="Hdr Out: "; break; + case CURLINFO_DATA_IN: + if (len > 3000) return 0; + for (i=0;i 3000) return 0; + for (i=0;icurl != NULL); + if (hdl->s3_api == S3_API_OAUTH2 && !hdl->getting_oauth2_access_token && + (!hdl->access_token || hdl->expires < time(NULL))) { + result = oauth2_get_access_token(hdl); + if (!result) { + g_debug("oauth2_get_access_token returned %d", result); + return result; + } + } else if (hdl->s3_api == S3_API_SWIFT_2 && !hdl->getting_swift_2_token && + (!hdl->x_auth_token || hdl->expires < time(NULL))) { + result = get_openstack_swift_api_v2_setting(hdl); + if (!result) { + g_debug("get_openstack_swift_api_v2_setting returned %d", result); + return result; + } + } + s3_reset(hdl); - use_subdomain = is_non_empty_string(hdl->bucket_location); - url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl); + url = build_url(hdl, bucket, key, subresource, query); if (!url) goto cleanup; /* libcurl may behave strangely if these are not set correctly */ @@ -1145,9 +1826,9 @@ perform_request(S3Handle *hdl, /* set up the request */ headers = authenticate_request(hdl, verb, bucket, key, subresource, - md5_hash_b64, is_non_empty_string(hdl->bucket_location)); + md5_hash_b64, content_type, request_body_size, project_id); - if (hdl->use_ssl && hdl->ca_info) { + if (hdl->ca_info) { if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info))) goto curl_error; } @@ -1183,6 +1864,10 @@ perform_request(S3Handle *hdl, goto curl_error; if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func))) goto curl_error; + if (progress_func) { + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS,0))) + goto curl_error; + } if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data))) goto curl_error; @@ -1194,6 +1879,15 @@ perform_request(S3Handle *hdl, if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size))) goto curl_error; #endif +/* CURLOPT_POSTFIELDSIZE_LARGE added in 7.11.1 */ +#if LIBCURL_VERSION_NUM >= 0x070b01 + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)request_body_size))) + goto curl_error; +#else + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE, (long)request_body_size))) + goto curl_error; +#endif + /* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */ #if LIBCURL_VERSION_NUM >= 0x070f05 if (s3_curl_throttling_compat()) { @@ -1202,7 +1896,7 @@ perform_request(S3Handle *hdl, goto curl_error; if (hdl->max_recv_speed) - if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed))) + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed))) goto curl_error; } #endif @@ -1220,7 +1914,7 @@ perform_request(S3Handle *hdl, goto curl_error; - if (curlopt_upload) { + if (curlopt_upload || curlopt_post) { if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func))) goto curl_error; if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data))) @@ -1234,6 +1928,20 @@ perform_request(S3Handle *hdl, NULL))) goto curl_error; } + if (hdl->proxy) { + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROXY, + hdl->proxy))) + goto curl_error; + } + + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FRESH_CONNECT, + (long)(hdl->reuse_connection && retry_after_close == 0 ? 0 : 1)))) { + goto curl_error; + } + if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FORBID_REUSE, + (long)(hdl->reuse_connection? 0 : 1)))) { + goto curl_error; + } /* Perform the request */ curl_code = curl_easy_perform(hdl->curl); @@ -1244,6 +1952,11 @@ perform_request(S3Handle *hdl, should_retry = interpret_response(hdl, curl_code, curl_error_buffer, int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex); + if (hdl->s3_api == S3_API_OAUTH2 && + hdl->last_response_code == 401 && + hdl->last_s3_error_code == S3_ERROR_AuthenticationRequired) { + should_retry = oauth2_get_access_token(hdl); + } /* and, unless we know we need to retry, see what we're to do now */ if (!should_retry) { result = lookup_result(result_handling, hdl->last_response_code, @@ -1254,6 +1967,13 @@ perform_request(S3Handle *hdl, break; } + if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES && + retry_after_close < 3 && + hdl->last_s3_error_code == S3_ERROR_RequestTimeout) { + retries = -1; + retry_after_close++; + g_debug("Retry on a new connection"); + } if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) { /* we're out of retries, so annotate hdl->last_message appropriately and bail * out. */ @@ -1341,10 +2061,27 @@ s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream) header = g_strndup((gchar *) ptr, (gsize) size*nmemb); + if (header[strlen(header)-1] == '\n') + header[strlen(header)-1] = '\0'; + if (header[strlen(header)-1] == '\r') + header[strlen(header)-1] = '\0'; if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0)) data->etag = find_regex_substring(header, pmatch[1]); - if (!strcmp(final_header, header)) + if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0)) + data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]); + + if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0)) + data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]); + + if (!s3_regexec_wrap(&content_type_regex, header, 2, pmatch, 0)) + data->hdl->content_type = find_regex_substring(header, pmatch[1]); + + if (strlen(header) == 0) + data->headers_done = TRUE; + if (g_str_equal(final_header, header)) data->headers_done = TRUE; + if (g_str_equal("\n", header)) + data->headers_done = TRUE; /* If date header is found */ if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){ @@ -1380,20 +2117,27 @@ compile_regexes(void) struct {const char * str; int flags; regex_t *regex;} regexes[] = { {"[[:space:]]*([^<]*)[[:space:]]*", REG_EXTENDED | REG_ICASE, &error_name_regex}, {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex}, + {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex}, + {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex}, + {"^Content-Type:[[:space:]]*([^ ;]+).*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &content_type_regex}, {"[[:space:]]*([^<]*)[[:space:]]*", REG_EXTENDED | REG_ICASE, &message_regex}, {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex}, {"(/>)|(>([^<]*))", REG_EXTENDED | REG_ICASE, &location_con_regex}, - {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex}, + {"^Date:(.*)$",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex}, + {"\"access_token\" : \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &access_token_regex}, + {"\"expires_in\" : (.*)", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &expires_in_regex}, + {"\"details\": \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &details_regex}, + {"\"code\": (.*),", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &code_regex}, {NULL, 0, NULL} }; char regmessage[1024]; - int size, i; + int i; int reg_result; for (i = 0; regexes[i].str; i++) { reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags); if (reg_result != 0) { - size = regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage)); + regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage)); g_error(_("Regex error: %s"), regmessage); return FALSE; } @@ -1407,6 +2151,15 @@ compile_regexes(void) {"^ETag:\\s*\"([^\"]+)\"\\s*$", G_REGEX_OPTIMIZE | G_REGEX_CASELESS, &etag_regex}, + {"^X-Auth-Token:\\s*([^ ]+)\\s*$", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &x_auth_token_regex}, + {"^X-Storage-Url:\\s*([^ ]+)\\s*$", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &x_storage_url_regex}, + {"^Content-Type:\\s*([^ ]+)\\s*$", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &content_type_regex}, {"\\s*([^<]*)\\s*", G_REGEX_OPTIMIZE | G_REGEX_CASELESS, &message_regex}, @@ -1416,9 +2169,21 @@ compile_regexes(void) {"(/>)|(>([^<]*))", G_REGEX_CASELESS, &location_con_regex}, - {"^Date:(.*)\\r", + {"^Date:(.*)$", G_REGEX_OPTIMIZE | G_REGEX_CASELESS, &date_sync_regex}, + {"\"access_token\" : \"([^\"]*)\"", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &access_token_regex}, + {"\"expires_n\" : (.*)", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &expires_in_regex}, + {"\"details\" : \"([^\"]*)\"", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &details_regex}, + {"\"code\" : (.*)", + G_REGEX_OPTIMIZE | G_REGEX_CASELESS, + &code_regex}, {NULL, 0, NULL} }; int i; @@ -1440,6 +2205,10 @@ compile_regexes(void) * Public function implementations */ +#if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31)) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wmissing-field-initializers" +#endif gboolean s3_init(void) { static GStaticMutex mutex = G_STATIC_MUTEX_INIT; @@ -1455,6 +2224,9 @@ gboolean s3_init(void) g_static_mutex_unlock(&mutex); return ret; } +#if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31)) +# pragma GCC diagnostic pop +#endif gboolean s3_curl_location_compat(void) @@ -1471,26 +2243,140 @@ s3_bucket_location_compat(const char *bucket) return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0); } +static gboolean +get_openstack_swift_api_v1_setting( + S3Handle *hdl) +{ + s3_result_t result = S3_RESULT_FAIL; + static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, + RESULT_HANDLING_ALWAYS_RETRY, + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + }; + + s3_verbose(hdl, 1); + result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, result_handling); + + return result == S3_RESULT_OK; +} + +static gboolean +get_openstack_swift_api_v2_setting( + S3Handle *hdl) +{ + s3_result_t result = S3_RESULT_FAIL; + static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, + RESULT_HANDLING_ALWAYS_RETRY, + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + }; + + CurlBuffer buf = {NULL, 0, 0, 0}; + GString *body = g_string_new(""); + if (hdl->username && hdl->password) { + g_string_append_printf(body, "tenant_id) { + g_string_append_printf(body, " tenantId=\"%s\"", hdl->tenant_id); + } + if (hdl->tenant_name) { + g_string_append_printf(body, " tenantName=\"%s\"", hdl->tenant_name); + } + g_string_append(body, ">"); + if (hdl->username && hdl->password) { + g_string_append_printf(body, "", hdl->username, hdl->password); + } else { + g_string_append_printf(body, "", hdl->access_key, hdl->secret_key); + } + g_string_append(body, ""); + + buf.buffer = g_string_free(body, FALSE); + buf.buffer_len = strlen(buf.buffer); + s3_verbose(hdl, 1); + hdl->getting_swift_2_token = 1; + g_free(hdl->x_storage_url); + hdl->x_storage_url = NULL; + result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL, + "application/xml", NULL, + S3_BUFFER_READ_FUNCS, &buf, + NULL, NULL, NULL, + NULL, NULL, result_handling); + hdl->getting_swift_2_token = 0; + + return result == S3_RESULT_OK; +} + S3Handle * s3_open(const char *access_key, const char *secret_key, + const char *swift_account_id, + const char *swift_access_key, + const char *host, + const char *service_path, + const gboolean use_subdomain, const char *user_token, const char *bucket_location, const char *storage_class, - const char *ca_info - ) { + const char *ca_info, + const char *server_side_encryption, + const char *proxy, + const S3_api s3_api, + const char *username, + const char *password, + const char *tenant_id, + const char *tenant_name, + const char *client_id, + const char *client_secret, + const char *refresh_token, + const gboolean reuse_connection, + const char *reps, + const char *reps_bucket) +{ S3Handle *hdl; hdl = g_new0(S3Handle, 1); if (!hdl) goto error; - hdl->verbose = FALSE; + hdl->verbose = TRUE; hdl->use_ssl = s3_curl_supports_ssl(); + hdl->reuse_connection = reuse_connection; + + if (s3_api == S3_API_S3) { + g_assert(access_key); + hdl->access_key = g_strdup(access_key); + g_assert(secret_key); + hdl->secret_key = g_strdup(secret_key); + } else if (s3_api == S3_API_SWIFT_1) { + g_assert(swift_account_id); + hdl->swift_account_id = g_strdup(swift_account_id); + g_assert(swift_access_key); + hdl->swift_access_key = g_strdup(swift_access_key); + } else if (s3_api == S3_API_SWIFT_2) { + g_assert((username && password) || (access_key && secret_key)); + hdl->username = g_strdup(username); + hdl->password = g_strdup(password); + hdl->access_key = g_strdup(access_key); + hdl->secret_key = g_strdup(secret_key); + g_assert(tenant_id || tenant_name); + hdl->tenant_id = g_strdup(tenant_id); + hdl->tenant_name = g_strdup(tenant_name); + } else if (s3_api == S3_API_OAUTH2) { + hdl->client_id = g_strdup(client_id); + hdl->client_secret = g_strdup(client_secret); + hdl->refresh_token = g_strdup(refresh_token); + } else if (s3_api == S3_API_CASTOR) { + hdl->username = g_strdup(username); + hdl->password = g_strdup(password); + hdl->tenant_name = g_strdup(tenant_name); + hdl->reps = g_strdup(reps); + hdl->reps_bucket = g_strdup(reps_bucket); + } - g_assert(access_key); - hdl->access_key = g_strdup(access_key); - g_assert(secret_key); - hdl->secret_key = g_strdup(secret_key); /* NULL is okay */ hdl->user_token = g_strdup(user_token); @@ -1500,12 +2386,67 @@ s3_open(const char *access_key, /* NULL is ok */ hdl->storage_class = g_strdup(storage_class); + /* NULL is ok */ + hdl->server_side_encryption = g_strdup(server_side_encryption); + + /* NULL is ok */ + hdl->proxy = g_strdup(proxy); + /* NULL is okay */ hdl->ca_info = g_strdup(ca_info); + if (!is_non_empty_string(host)) + host = "s3.amazonaws.com"; + hdl->host = g_ascii_strdown(host, -1); + hdl->use_subdomain = use_subdomain || + (strcmp(hdl->host, "s3.amazonaws.com") == 0 && + is_non_empty_string(hdl->bucket_location)); + hdl->s3_api = s3_api; + if (service_path) { + if (strlen(service_path) == 0 || + (strlen(service_path) == 1 && service_path[0] == '/')) { + hdl->service_path = NULL; + } else if (service_path[0] != '/') { + hdl->service_path = g_strdup_printf("/%s", service_path); + } else { + hdl->service_path = g_strdup(service_path); + } + if (hdl->service_path) { + /* remove trailling / */ + size_t len = strlen(hdl->service_path) - 1; + if (hdl->service_path[len] == '/') + hdl->service_path[len] = '\0'; + } + } else { + hdl->service_path = NULL; + } + hdl->curl = curl_easy_init(); if (!hdl->curl) goto error; + /* Set HTTP handling options for CAStor */ + if (s3_api == S3_API_CASTOR) { +#if LIBCURL_VERSION_NUM >= 0x071301 + curl_version_info_data *info; + /* check the runtime version too */ + info = curl_version_info(CURLVERSION_NOW); + if (info->version_num >= 0x071301) { + curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1); + curl_easy_setopt(hdl->curl, CURLOPT_UNRESTRICTED_AUTH, 1); + curl_easy_setopt(hdl->curl, CURLOPT_MAXREDIRS, 5); + curl_easy_setopt(hdl->curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL); + curl_easy_setopt(hdl->curl, CURLOPT_HTTP_VERSION, + CURL_HTTP_VERSION_1_1); + if (hdl->username) + curl_easy_setopt(hdl->curl, CURLOPT_USERNAME, hdl->username); + if (hdl->password) + curl_easy_setopt(hdl->curl, CURLOPT_PASSWORD, hdl->password); + curl_easy_setopt(hdl->curl, CURLOPT_HTTPAUTH, + (CURLAUTH_BASIC | CURLAUTH_DIGEST)); + } +#endif + } + return hdl; error: @@ -1513,6 +2454,22 @@ error: return NULL; } +gboolean +s3_open2( + S3Handle *hdl) +{ + gboolean ret = TRUE; + + /* get the X-Storage-Url and X-Auth-Token */ + if (hdl->s3_api == S3_API_SWIFT_1) { + ret = get_openstack_swift_api_v1_setting(hdl); + } else if (hdl->s3_api == S3_API_SWIFT_2) { + ret = get_openstack_swift_api_v2_setting(hdl); + } + + return ret; +} + void s3_free(S3Handle *hdl) { @@ -1521,9 +2478,26 @@ s3_free(S3Handle *hdl) if (hdl) { g_free(hdl->access_key); g_free(hdl->secret_key); + g_free(hdl->swift_account_id); + g_free(hdl->swift_access_key); + g_free(hdl->content_type); + g_free(hdl->user_token); + g_free(hdl->ca_info); + g_free(hdl->proxy); + g_free(hdl->username); + g_free(hdl->password); + g_free(hdl->tenant_id); + g_free(hdl->tenant_name); + g_free(hdl->client_id); + g_free(hdl->client_secret); + g_free(hdl->refresh_token); + g_free(hdl->access_token); if (hdl->user_token) g_free(hdl->user_token); if (hdl->bucket_location) g_free(hdl->bucket_location); if (hdl->storage_class) g_free(hdl->storage_class); + if (hdl->server_side_encryption) g_free(hdl->server_side_encryption); + if (hdl->host) g_free(hdl->host); + if (hdl->service_path) g_free(hdl->service_path); if (hdl->curl) curl_easy_cleanup(hdl->curl); g_free(hdl); @@ -1551,6 +2525,10 @@ s3_reset(S3Handle *hdl) g_free(hdl->last_response_body); hdl->last_response_body = NULL; } + if (hdl->content_type) { + g_free(hdl->content_type); + hdl->content_type = NULL; + } hdl->last_response_body_size = 0; } @@ -1678,13 +2656,21 @@ s3_upload(S3Handle *hdl, s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { { 200, 0, 0, S3_RESULT_OK }, + { 201, 0, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; + char *verb = "PUT"; + char *content_type = NULL; g_assert(hdl != NULL); - result = perform_request(hdl, "PUT", bucket, key, NULL, NULL, + if (hdl->s3_api == S3_API_CASTOR) { + verb = "POST"; + content_type = "application/x-amanda-backup-data"; + } + + result = perform_request(hdl, verb, bucket, key, NULL, NULL, content_type, NULL, read_func, reset_func, size_func, md5_func, read_data, NULL, NULL, NULL, progress_func, progress_data, result_handling); @@ -1703,6 +2689,7 @@ struct list_keys_thunk { gboolean is_truncated; gchar *next_marker; + guint64 size; gboolean want_text; @@ -1723,13 +2710,20 @@ list_start_element(GMarkupParseContext *context G_GNUC_UNUSED, struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data; thunk->want_text = 0; - if (g_ascii_strcasecmp(element_name, "contents") == 0) { + if (g_ascii_strcasecmp(element_name, "contents") == 0 || + g_ascii_strcasecmp(element_name, "object") == 0) { thunk->in_contents = 1; } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) { thunk->in_common_prefixes = 1; } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { thunk->want_text = 1; - } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { + } else if ((g_ascii_strcasecmp(element_name, "key") == 0 || + g_ascii_strcasecmp(element_name, "name") == 0) && + thunk->in_contents) { + thunk->want_text = 1; + } else if ((g_ascii_strcasecmp(element_name, "size") == 0 || + g_ascii_strcasecmp(element_name, "bytes") == 0) && + thunk->in_contents) { thunk->want_text = 1; } else if (g_ascii_strcasecmp(element_name, "istruncated")) { thunk->want_text = 1; @@ -1750,8 +2744,19 @@ list_end_element(GMarkupParseContext *context G_GNUC_UNUSED, thunk->in_contents = 0; } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) { thunk->in_common_prefixes = 0; - } else if (g_ascii_strcasecmp(element_name, "key") == 0 && thunk->in_contents) { + } else if ((g_ascii_strcasecmp(element_name, "key") == 0 || + g_ascii_strcasecmp(element_name, "name") == 0) && + thunk->in_contents) { thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text); + if (thunk->is_truncated) { + if (thunk->next_marker) g_free(thunk->next_marker); + thunk->next_marker = g_strdup(thunk->text); + } + thunk->text = NULL; + } else if ((g_ascii_strcasecmp(element_name, "size") == 0 || + g_ascii_strcasecmp(element_name, "bytes") == 0) && + thunk->in_contents) { + thunk->size += g_ascii_strtoull (thunk->text, NULL, 10); thunk->text = NULL; } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) { thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text); @@ -1795,6 +2800,7 @@ list_fetch(S3Handle *hdl, s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { { 200, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; @@ -1813,19 +2819,37 @@ list_fetch(S3Handle *hdl, /* loop over possible parts to build query string */ query = g_string_new(""); for (i = 0; pos_parts[i][0]; i++) { - if (pos_parts[i][1]) { - if (have_prev_part) - g_string_append(query, "&"); - else - have_prev_part = TRUE; - esc_value = curl_escape(pos_parts[i][1], 0); - g_string_append_printf(query, "%s=%s", pos_parts[i][0], esc_value); - curl_free(esc_value); - } + if (pos_parts[i][1]) { + const char *keyword; + if (have_prev_part) + g_string_append(query, "&"); + else + have_prev_part = TRUE; + esc_value = curl_escape(pos_parts[i][1], 0); + keyword = pos_parts[i][0]; + if ((hdl->s3_api == S3_API_SWIFT_1 || + hdl->s3_api == S3_API_SWIFT_2) && + strcmp(keyword, "max-keys") == 0) { + keyword = "limit"; + } else if ((hdl->s3_api == S3_API_CASTOR) && + strcmp(keyword, "max-keys") == 0) { + keyword = "size"; + } + g_string_append_printf(query, "%s=%s", keyword, esc_value); + curl_free(esc_value); + } + } + if (hdl->s3_api == S3_API_SWIFT_1 || + hdl->s3_api == S3_API_SWIFT_2 || + hdl->s3_api == S3_API_CASTOR) { + if (have_prev_part) + g_string_append(query, "&"); + g_string_append(query, "format=xml"); } /* and perform the request on that URI */ - result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str, + result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL, result_handling); @@ -1840,7 +2864,8 @@ s3_list_keys(S3Handle *hdl, const char *bucket, const char *prefix, const char *delimiter, - GSList **list) + GSList **list, + guint64 *total_size) { /* * max len of XML variables: @@ -1867,6 +2892,7 @@ s3_list_keys(S3Handle *hdl, thunk.filename_list = NULL; thunk.text = NULL; thunk.next_marker = NULL; + thunk.size = 0; /* Loop until S3 has given us the entire picture */ do { @@ -1874,11 +2900,14 @@ s3_list_keys(S3Handle *hdl, /* get some data from S3 */ result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf); if (result != S3_RESULT_OK) goto cleanup; + if (buf.buffer_pos == 0) goto cleanup; /* no body */ /* run the parser over it */ thunk.in_contents = FALSE; thunk.in_common_prefixes = FALSE; thunk.is_truncated = FALSE; + if (thunk.next_marker) g_free(thunk.next_marker); + thunk.next_marker = NULL; thunk.want_text = FALSE; ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL); @@ -1913,6 +2942,9 @@ cleanup: return FALSE; } else { *list = thunk.filename_list; + if(total_size) { + *total_size = thunk.size; + } return TRUE; } } @@ -1937,7 +2969,7 @@ s3_read(S3Handle *hdl, g_assert(hdl != NULL); g_assert(write_func != NULL); - result = perform_request(hdl, "GET", bucket, key, NULL, NULL, + result = perform_request(hdl, "GET", bucket, key, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data, progress_func, progress_data, result_handling); @@ -1951,29 +2983,94 @@ s3_delete(S3Handle *hdl, { s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, { 204, 0, 0, S3_RESULT_OK }, + { 404, 0, 0, S3_RESULT_OK }, { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK }, RESULT_HANDLING_ALWAYS_RETRY, + { 409, 0, 0, S3_RESULT_OK }, { 0, 0, 0, /* default: */ S3_RESULT_FAIL } }; g_assert(hdl != NULL); - result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL, + result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, result_handling); return result == S3_RESULT_OK; } +int +s3_multi_delete(S3Handle *hdl, + const char *bucket, + const char **key) +{ + GString *query; + CurlBuffer data; + s3_result_t result = S3_RESULT_FAIL; + static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, + { 400, 0, 0, S3_RESULT_NOTIMPL }, + { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK }, + RESULT_HANDLING_ALWAYS_RETRY, + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + }; + + g_assert(hdl != NULL); + + query = g_string_new(NULL); + g_string_append(query, "\n"); + g_string_append(query, "\n"); + if (!hdl->verbose) { + g_string_append(query, " true\n"); + } + while (*key != NULL) { + g_string_append(query, " \n"); + g_string_append(query, " "); + g_string_append(query, *key); + g_string_append(query, "\n"); + g_string_append(query, " \n"); + key++; + } + g_string_append(query, "\n"); + + data.buffer_len = query->len; + data.buffer = query->str; + data.buffer_pos = 0; + data.max_buffer_size = data.buffer_len; + + result = perform_request(hdl, "POST", bucket, NULL, "delete", NULL, + "application/xml", NULL, + s3_buffer_read_func, s3_buffer_reset_func, + s3_buffer_size_func, s3_buffer_md5_func, + &data, NULL, NULL, NULL, NULL, NULL, + result_handling); + + g_string_free(query, TRUE); + if (result == S3_RESULT_OK) + return 1; + else if (result == S3_RESULT_NOTIMPL) + return 2; + else + return 0; +} + gboolean s3_make_bucket(S3Handle *hdl, - const char *bucket) + const char *bucket, + const char *project_id) { char *body = NULL; + char *verb = "PUT"; + char *content_type = NULL; s3_result_t result = S3_RESULT_FAIL; static result_handling_t result_handling[] = { { 200, 0, 0, S3_RESULT_OK }, + { 201, 0, 0, S3_RESULT_OK }, + { 202, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY }, RESULT_HANDLING_ALWAYS_RETRY, { 0, 0, 0, /* default: */ S3_RESULT_FAIL } @@ -1992,7 +3089,11 @@ s3_make_bucket(S3Handle *hdl, 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) { if (s3_bucket_location_compat(bucket)) { ptr = &buf; - buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location); + buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, + g_str_equal(hdl->host, "gss.iijgio.com")? + " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"": + "", + hdl->bucket_location); buf.buffer_len = (guint) strlen(buf.buffer); buf.buffer_pos = 0; buf.max_buffer_size = buf.buffer_len; @@ -2008,28 +3109,36 @@ s3_make_bucket(S3Handle *hdl, } } - result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL, + if (hdl->s3_api == S3_API_CASTOR) { + verb = "POST"; + content_type = "application/castorcontext"; + } + + result = perform_request(hdl, verb, bucket, NULL, NULL, NULL, content_type, + project_id, read_func, reset_func, size_func, md5_func, ptr, NULL, NULL, NULL, NULL, NULL, result_handling); if (result == S3_RESULT_OK || - (is_non_empty_string(hdl->bucket_location) && result != S3_RESULT_OK - && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) { + (result != S3_RESULT_OK && + hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) { /* verify the that the location constraint on the existing bucket matches * the one that's configured. */ - result = perform_request(hdl, "GET", bucket, NULL, "location", NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, result_handling); + if (is_non_empty_string(hdl->bucket_location)) { + result = perform_request(hdl, "GET", bucket, NULL, "location", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, result_handling); + } else { + result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, result_handling); + } - /* note that we can check only one of the three AND conditions above - * and infer that the others are true - */ if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) { /* return to the default state of failure */ result = S3_RESULT_FAIL; - if (body) g_free(body); /* use strndup to get a null-terminated string */ body = g_strndup(hdl->last_response_body, hdl->last_response_body_size); if (!body) { @@ -2071,6 +3180,105 @@ cleanup: } +static s3_result_t +oauth2_get_access_token( + S3Handle *hdl) +{ + GString *query; + CurlBuffer data; + s3_result_t result = S3_RESULT_FAIL; + static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, + RESULT_HANDLING_ALWAYS_RETRY, + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + }; + char *body; + regmatch_t pmatch[2]; + + g_assert(hdl != NULL); + + query = g_string_new(NULL); + g_string_append(query, "client_id="); + g_string_append(query, hdl->client_id); + g_string_append(query, "&client_secret="); + g_string_append(query, hdl->client_secret); + g_string_append(query, "&refresh_token="); + g_string_append(query, hdl->refresh_token); + g_string_append(query, "&grant_type=refresh_token"); + + data.buffer_len = query->len; + data.buffer = query->str; + data.buffer_pos = 0; + data.max_buffer_size = data.buffer_len; + + hdl->x_storage_url = "https://accounts.google.com/o/oauth2/token"; + hdl->getting_oauth2_access_token = 1; + result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL, + "application/x-www-form-urlencoded", NULL, + s3_buffer_read_func, s3_buffer_reset_func, + s3_buffer_size_func, s3_buffer_md5_func, + &data, NULL, NULL, NULL, NULL, NULL, + result_handling); + hdl->x_storage_url = NULL; + hdl->getting_oauth2_access_token = 0; + + /* use strndup to get a null-terminated string */ + body = g_strndup(hdl->last_response_body, hdl->last_response_body_size); + if (!body) { + hdl->last_message = g_strdup(_("No body received for location request")); + goto cleanup; + } else if ('\0' == body[0]) { + hdl->last_message = g_strdup(_("Empty body received for location request")); + goto cleanup; + } + + if (!s3_regexec_wrap(&access_token_regex, body, 2, pmatch, 0)) { + hdl->access_token = find_regex_substring(body, pmatch[1]); + hdl->x_auth_token = g_strdup(hdl->access_token); + } + if (!s3_regexec_wrap(&expires_in_regex, body, 2, pmatch, 0)) { + char *expires_in = find_regex_substring(body, pmatch[1]); + hdl->expires = time(NULL) + atoi(expires_in) - 600; + g_free(expires_in); + } + +cleanup: + g_free(body); + return result == S3_RESULT_OK; +} + +gboolean +s3_is_bucket_exists(S3Handle *hdl, + const char *bucket, + const char *project_id) +{ + s3_result_t result = S3_RESULT_FAIL; + char *query; + static result_handling_t result_handling[] = { + { 200, 0, 0, S3_RESULT_OK }, + { 204, 0, 0, S3_RESULT_OK }, + RESULT_HANDLING_ALWAYS_RETRY, + { 0, 0, 0, /* default: */ S3_RESULT_FAIL } + }; + + if (hdl->s3_api == S3_API_SWIFT_1 || + hdl->s3_api == S3_API_SWIFT_2) { + query = "limit=1"; + } else if (hdl->s3_api == S3_API_CASTOR) { + query = "format=xml&size=0"; + } else { + query = "max-keys=1"; + } + + result = perform_request(hdl, "GET", bucket, NULL, NULL, query, + NULL, project_id, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, result_handling); + + return result == S3_RESULT_OK; +} + gboolean s3_delete_bucket(S3Handle *hdl, const char *bucket)