/*
- * Copyright (c) 2005-2008 Zmanda Inc. All Rights Reserved.
- *
- * This library is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License version 2.1 as
- * published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful, but
+ * Copyright (c) 2008-2012 Zmanda, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- * License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * Contact information: Zmanda Inc., 465 S Mathlida Ave, Suite 300
- * Sunnyvale, CA 94086, USA, or: http://www.zmanda.com
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Contact information: Zmanda Inc., 465 S. Mathilda Ave., Suite 300
+ * Sunnyvale, CA 94085, USA, or: http://www.zmanda.com
*/
/* TODO
#define AMAZON_SECURITY_HEADER "x-amz-security-token"
#define AMAZON_BUCKET_CONF_TEMPLATE "\
- <CreateBucketConfiguration>\n\
+ <CreateBucketConfiguration%s>\n\
<LocationConstraint>%s</LocationConstraint>\n\
</CreateBucketConfiguration>"
+#define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
+
+#define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption"
+
+#define AMAZON_WILDCARD_LOCATION "*"
+
/* parameters for exponential backoff in the face of retriable errors */
/* start at 0.01s */
/* Results which should always be retried */
#define RESULT_HANDLING_ALWAYS_RETRY \
- { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
- { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY }, \
- { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
- { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
- { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
- { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
- { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
+ { 400, S3_ERROR_RequestTimeout, 0, S3_RESULT_RETRY }, \
+ { 403, S3_ERROR_RequestTimeTooSkewed,0, S3_RESULT_RETRY }, \
+ { 409, S3_ERROR_OperationAborted, 0, S3_RESULT_RETRY }, \
+ { 412, S3_ERROR_PreconditionFailed, 0, S3_RESULT_RETRY }, \
+ { 500, S3_ERROR_InternalError, 0, S3_RESULT_RETRY }, \
+ { 501, S3_ERROR_NotImplemented, 0, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_COULDNT_CONNECT, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_PARTIAL_FILE, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_OPERATION_TIMEOUTED, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_SSL_CONNECT_ERROR, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_SEND_ERROR, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_RECV_ERROR, S3_RESULT_RETRY }, \
+ { 0, 0, CURLE_GOT_NOTHING, S3_RESULT_RETRY }
/*
* Data structures and associated functions
char *access_key;
char *secret_key;
char *user_token;
-
+ char *swift_account_id;
+ char *swift_access_key;
+ char *username;
+ char *password;
+ char *tenant_id;
+ char *tenant_name;
+ char *client_id;
+ char *client_secret;
+ char *refresh_token;
+ char *access_token;
+ time_t expires;
+ gboolean getting_oauth2_access_token;
+ gboolean getting_swift_2_token;
+
+ /* attributes for new objects */
char *bucket_location;
+ char *storage_class;
+ char *server_side_encryption;
+ char *proxy;
+ char *host;
+ char *service_path;
+ gboolean use_subdomain;
+ S3_api s3_api;
+ char *ca_info;
+ char *x_auth_token;
+ char *x_storage_url;
CURL *curl;
gboolean verbose;
gboolean use_ssl;
+ guint64 max_send_speed;
+ guint64 max_recv_speed;
+
/* information from the last request */
char *last_message;
guint last_response_code;
guint last_num_retries;
void *last_response_body;
guint last_response_body_size;
+
+ /* offset with s3 */
+ time_t time_offset_with_s3;
+ char *content_type;
+
+ gboolean reuse_connection;
+
+ /* CAStor */
+ char *reps;
+ char *reps_bucket;
};
typedef struct {
gboolean headers_done;
gboolean int_write_done;
char *etag;
+ /* Points to current handle: Added to get hold of s3 offset */
+ struct S3Handle *hdl;
} S3InternalData;
/* Callback function to examine headers one-at-a-time
- *
+ *
* @note this is the same as CURLOPT_HEADERFUNCTION
*
* @param data: The pointer to read data from
typedef enum {
S3_RESULT_RETRY = -1,
S3_RESULT_FAIL = 0,
- S3_RESULT_OK = 1
+ S3_RESULT_OK = 1,
+ S3_RESULT_NOTIMPL = 2
} s3_result_t;
typedef struct result_handling {
s3_result_t result;
} result_handling_t;
+/*
+ * get the access token for OAUTH2
+ */
+static gboolean oauth2_get_access_token(S3Handle *hdl);
+
/* Lookup a result in C{result_handling}.
*
* @param result_handling: array of handling specifications
/*
* Precompiled regular expressions */
static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
- location_con_regex;
+ location_con_regex, date_sync_regex, x_auth_token_regex,
+ x_storage_url_regex, access_token_regex, expires_in_regex,
+ content_type_regex, details_regex, code_regex;
+
/*
* Utility functions
*/
+/* Check if a string is non-empty
+ *
+ * @param str: string to check
+ * @returns: true iff str is non-NULL and not "\0"
+ */
+static gboolean is_non_empty_string(const char *str);
+
/* Construct the URL for an Amazon S3 REST request.
*
* A new string is allocated and returned; it is the responsiblity of the caller.
*
* @param hdl: the S3Handle object
- * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
+ * @param service_path: A path to add in the URL, or NULL for none.
* @param bucket: the bucket being accessed, or NULL for none
* @param key: the key being accessed, or NULL for none
* @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
- * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
+ * @param query: the query being accessed (e.g. "acl"), or NULL for none
+ *
+ * !use_subdomain: http://host/service_path/bucket/key
+ * use_subdomain : http://bucket.host/service_path/key
+ *
*/
static char *
-build_url(const char *bucket,
+build_url(
+ S3Handle *hdl,
+ const char *bucket,
const char *key,
const char *subresource,
- const char *query,
- gboolean use_subdomain,
- gboolean use_ssl);
+ const char *query);
/* Create proper authorization headers for an Amazon S3 REST
* request to C{headers}.
* @param key: the key being accessed, or NULL for none
* @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
* @param md5_hash: the MD5 hash of the request body, or NULL for none
- * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
*/
static struct curl_slist *
authenticate_request(S3Handle *hdl,
const char *key,
const char *subresource,
const char *md5_hash,
- gboolean use_subdomain);
+ const char *content_type,
+ const size_t content_length,
+ const char *project_id);
* @param body: the response body
* @param body_len: the length of the response body
* @param etag: The response's ETag header
- * @param content_md5: The hex-encoded MD5 hash of the request body,
+ * @param content_md5: The hex-encoded MD5 hash of the request body,
* which will be checked against the response's ETag header.
* If NULL, the header is not checked.
* If non-NULL, then the body should have the response headers at its beginnning.
/* Perform an S3 operation. This function handles all of the details
* of retryig requests and so on.
- *
+ *
* The concepts of bucket and keys are defined by the Amazon S3 API.
* See: "Components of Amazon S3" - API Version 2006-03-01 pg. 8
*
- * Individual sub-resources are defined in several places. In the REST API,
+ * Individual sub-resources are defined in several places. In the REST API,
* they they are represented by a "flag" in the "query string".
* See: "Constructing the CanonicalizedResource Element" - API Version 2006-03-01 pg. 60
*
const char *key,
const char *subresource,
const char *query,
+ const char *content_type,
+ const char *project_id,
s3_read_func read_func,
s3_reset_func read_reset_func,
s3_size_func size_func,
const result_handling_t *result_handling);
/*
- * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
+ * a CURLOPT_WRITEFUNCTION to save part of the response in memory and
* call an external function if one was provided.
*/
static size_t
static gboolean
compile_regexes(void);
+static gboolean get_openstack_swift_api_v1_setting(S3Handle *hdl);
+static gboolean get_openstack_swift_api_v2_setting(S3Handle *hdl);
+
/*
* Static function implementations
*/
/* do a brute-force search through the list, since it's not sorted */
for (i = 0; i < S3_ERROR_END; i++) {
- if (g_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
+ if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
return i;
}
return supported;
}
+static gboolean
+s3_curl_throttling_compat(void)
+{
+/* CURLOPT_MAX_SEND_SPEED_LARGE added in 7.15.5 */
+#if LIBCURL_VERSION_NUM >= 0x070f05
+ curl_version_info_data *info;
+
+ /* check the runtime version too */
+ info = curl_version_info(CURLVERSION_NOW);
+ return info->version_num >= 0x070f05;
+#else
+ return FALSE;
+#endif
+}
+
static s3_result_t
lookup_result(const result_handling_t *result_handling,
guint response_code,
CURLcode curl_code)
{
while (result_handling->response_code
- || result_handling->s3_error_code
+ || result_handling->s3_error_code
|| result_handling->curl_code) {
if ((result_handling->response_code && result_handling->response_code != response_code)
|| (result_handling->s3_error_code && result_handling->s3_error_code != s3_error_code)
return result_handling->result;
}
+static time_t
+rfc3339_date(
+ const char *date)
+{
+ gint year, month, day, hour, minute, seconds;
+ const char *atz;
+
+ if (strlen(date) < 19)
+ return 1073741824;
+
+ year = atoi(date);
+ month = atoi(date+5);
+ day = atoi(date+8);
+ hour = atoi(date+11);
+ minute = atoi(date+14);
+ seconds = atoi(date+17);
+ atz = date+19;
+ if (*atz == '.') { /* skip decimal seconds */
+ atz++;
+ while (*atz >= '0' && *atz <= '9') {
+ atz++;
+ }
+ }
+
+#if GLIB_CHECK_VERSION(2,26,0)
+ if (!glib_check_version(2,26,0)) {
+ GTimeZone *tz;
+ GDateTime *dt;
+ time_t a;
+
+ tz = g_time_zone_new(atz);
+ dt = g_date_time_new(tz, year, month, day, hour, minute, seconds);
+ a = g_date_time_to_unix(dt);
+ g_time_zone_unref(tz);
+ g_date_time_unref(dt);
+ return a;
+ } else
+#endif
+ {
+ struct tm tm;
+ time_t t;
+
+ tm.tm_year = year - 1900;
+ tm.tm_mon = month - 1;
+ tm.tm_mday = day;
+ tm.tm_hour = hour;
+ tm.tm_min = minute;
+ tm.tm_sec = seconds;
+ tm.tm_wday = 0;
+ tm.tm_yday = 0;
+ tm.tm_isdst = -1;
+ t = time(NULL);
+
+ if (*atz == '-' || *atz == '+') { /* numeric timezone */
+ time_t lt, gt;
+ time_t a;
+ struct tm ltt, gtt;
+ gint Hour = atoi(atz);
+ gint Min = atoi(atz+4);
+
+ if (Hour < 0)
+ Min = -Min;
+ tm.tm_hour -= Hour;
+ tm.tm_min -= Min;
+ tm.tm_isdst = 0;
+ localtime_r(&t, <t);
+ lt = mktime(<t);
+ gmtime_r(&t, >t);
+ gt = mktime(>t);
+ tm.tm_sec += lt - gt;
+ a = mktime(&tm);
+ return a;
+ } else if (*atz == 'Z' && *(atz+1) == '\0') { /* Z timezone */
+ time_t lt, gt;
+ time_t a;
+ struct tm ltt, gtt;
+
+ tm.tm_isdst = 0;
+ localtime_r(&t, <t);
+ lt = mktime(<t);
+ gmtime_r(&t, >t);
+ gt = mktime(>t);
+ tm.tm_sec += lt - gt;
+ a = mktime(&tm);
+ return a;
+ } else { /* named timezone */
+ int pid;
+ int fd[2];
+ char buf[101];
+ time_t a;
+ size_t size;
+
+ if (pipe(fd) == -1)
+ return 1073741824;
+ pid = fork();
+ switch (pid) {
+ case -1:
+ close(fd[0]);
+ close(fd[1]);
+ return 1073741824;
+ break;
+ case 0:
+ close(fd[0]);
+ setenv("TZ", atz, 1);
+ tzset();
+ a = mktime(&tm);
+ g_snprintf(buf, 100, "%d", (int)a);
+ size = write(fd[1], buf, strlen(buf));
+ close(fd[1]);
+ exit(0);
+ default:
+ close(fd[1]);
+ size = read(fd[0], buf, 100);
+ close(fd[0]);
+ buf[size] = '\0';
+ waitpid(pid, NULL, 0);
+ break;
+ }
+ return atoi(buf);
+ }
+ }
+}
+
+
+static gboolean
+is_non_empty_string(const char *str)
+{
+ return str && str[0] != '\0';
+}
+
static char *
-build_url(const char *bucket,
+build_url(
+ S3Handle *hdl,
+ const char *bucket,
const char *key,
const char *subresource,
- const char *query,
- gboolean use_subdomain,
- gboolean use_ssl)
+ const char *query)
{
GString *url = NULL;
char *esc_bucket = NULL, *esc_key = NULL;
- /* scheme */
- url = g_string_new("http");
- if (use_ssl)
- g_string_append(url, "s");
-
- g_string_append(url, "://");
+ if ((hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2 ||
+ hdl->s3_api == S3_API_OAUTH2) &&
+ hdl->x_storage_url) {
+ url = g_string_new(hdl->x_storage_url);
+ g_string_append(url, "/");
+ } else {
+ /* scheme */
+ url = g_string_new("http");
+ if (hdl->use_ssl)
+ g_string_append(url, "s");
+
+ g_string_append(url, "://");
+
+ /* domain */
+ if (hdl->use_subdomain && bucket)
+ g_string_append_printf(url, "%s.%s", bucket, hdl->host);
+ else
+ g_string_append_printf(url, "%s", hdl->host);
+
+ if (hdl->service_path) {
+ g_string_append_printf(url, "%s/", hdl->service_path);
+ } else {
+ g_string_append(url, "/");
+ }
+ }
- /* domain */
- if (use_subdomain && bucket)
- g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket);
- else
- g_string_append(url, "s3.amazonaws.com/");
-
/* path */
- if (!use_subdomain && bucket) {
- esc_bucket = curl_escape(bucket, 0);
- if (!esc_bucket) goto cleanup;
+ if (!hdl->use_subdomain && bucket) {
+ /* curl_easy_escape addeded in 7.15.4 */
+ #if LIBCURL_VERSION_NUM >= 0x070f04
+ curl_version_info_data *info;
+ /* check the runtime version too */
+ info = curl_version_info(CURLVERSION_NOW);
+ if (info->version_num >= 0x070f04)
+ esc_bucket = curl_easy_escape(hdl->curl, bucket, 0);
+ else
+ esc_bucket = curl_escape(bucket, 0);
+ #else
+ esc_bucket = curl_escape(bucket, 0);
+ #endif
+ if (!esc_bucket) goto cleanup;
g_string_append_printf(url, "%s", esc_bucket);
if (key)
g_string_append(url, "/");
+ curl_free(esc_bucket);
}
if (key) {
- esc_key = curl_escape(key, 0);
- if (!esc_key) goto cleanup;
+ /* curl_easy_escape addeded in 7.15.4 */
+ #if LIBCURL_VERSION_NUM >= 0x070f04
+ curl_version_info_data *info;
+ /* check the runtime version too */
+ info = curl_version_info(CURLVERSION_NOW);
+ if (info->version_num >= 0x070f04)
+ esc_key = curl_easy_escape(hdl->curl, key, 0);
+ else
+ esc_key = curl_escape(key, 0);
+ #else
+ esc_key = curl_escape(key, 0);
+ #endif
+ if (!esc_key) goto cleanup;
g_string_append_printf(url, "%s", esc_key);
+ curl_free(esc_key);
+ }
+
+ if (url->str[strlen(url->str)-1] == '/') {
+ g_string_truncate(url, strlen(url->str)-1);
}
/* query string */
- if (subresource || query)
+ if (subresource || query || (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name))
g_string_append(url, "?");
if (subresource)
if (query)
g_string_append(url, query);
+ /* add CAStor tenant domain override query arg */
+ if (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name) {
+ if (subresource || query) {
+ g_string_append(url, "&");
+ }
+ g_string_append_printf(url, "domain=%s", hdl->tenant_name);
+ }
+
cleanup:
- if (esc_bucket) curl_free(esc_bucket);
- if (esc_key) curl_free(esc_key);
return g_string_free(url, FALSE);
}
const char *key,
const char *subresource,
const char *md5_hash,
- gboolean use_subdomain)
+ const char *content_type,
+ const size_t content_length,
+ const char *project_id)
{
time_t t;
struct tm tmp;
- char date[100];
+ char *date = NULL;
char *buf = NULL;
HMAC_CTX ctx;
GByteArray *md = NULL;
struct curl_slist *headers = NULL;
char *esc_bucket = NULL, *esc_key = NULL;
GString *auth_string = NULL;
+ char *reps = NULL;
- /* Build the string to sign, per the S3 spec.
- * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
- */
-
- /* verb */
- auth_string = g_string_new(verb);
- g_string_append(auth_string, "\n");
-
- /* Content-MD5 header */
- if (md5_hash)
- g_string_append(auth_string, md5_hash);
- g_string_append(auth_string, "\n");
-
- /* Content-Type is empty*/
- g_string_append(auth_string, "\n");
-
+ /* From RFC 2616 */
+ static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
+ static const char *month[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
/* calculate the date */
t = time(NULL);
+
+ /* sync clock with amazon s3 */
+ t = t + hdl->time_offset_with_s3;
+
#ifdef _WIN32
- if (!localtime_s(&tmp, &t)) g_debug("localtime error");
+ if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
#else
- if (!localtime_r(&t, &tmp)) perror("localtime");
+ if (!gmtime_r(&t, &tmp)) perror("localtime");
#endif
- if (!strftime(date, sizeof(date), "%a, %d %b %Y %H:%M:%S %Z", &tmp))
- perror("strftime");
-
- g_string_append(auth_string, date);
- g_string_append(auth_string, "\n");
-
- if (hdl->user_token) {
- g_string_append(auth_string, AMAZON_SECURITY_HEADER);
- g_string_append(auth_string, ":");
- g_string_append(auth_string, hdl->user_token);
- g_string_append(auth_string, ",");
- g_string_append(auth_string, STS_PRODUCT_TOKEN);
- g_string_append(auth_string, "\n");
- }
-
- /* CanonicalizedResource */
- g_string_append(auth_string, "/");
- if (bucket) {
- if (use_subdomain)
- g_string_append(auth_string, bucket);
- else {
- esc_bucket = curl_escape(bucket, 0);
- if (!esc_bucket) goto cleanup;
- g_string_append(auth_string, esc_bucket);
- }
- }
- if (bucket && (use_subdomain || key))
- g_string_append(auth_string, "/");
- if (key) {
- esc_key = curl_escape(key, 0);
- if (!esc_key) goto cleanup;
- g_string_append(auth_string, esc_key);
- }
+ date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
+ wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
+ tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
+
+ if (hdl->s3_api == S3_API_SWIFT_1) {
+ if (!bucket) {
+ buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ } else {
+ buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+ } else if (hdl->s3_api == S3_API_SWIFT_2) {
+ if (bucket) {
+ buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+ buf = g_strdup_printf("Accept: %s", "application/xml");
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ } else if (hdl->s3_api == S3_API_OAUTH2) {
+ if (bucket) {
+ buf = g_strdup_printf("Authorization: Bearer %s", hdl->access_token);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+ } else if (hdl->s3_api == S3_API_CASTOR) {
+ if (g_str_equal(verb, "PUT") || g_str_equal(verb, "POST")) {
+ if (key) {
+ buf = g_strdup("CAStor-Application: Amanda");
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ reps = g_strdup(hdl->reps); /* object replication level */
+ } else {
+ reps = g_strdup(hdl->reps_bucket); /* bucket replication level */
+ }
- if (subresource) {
- g_string_append(auth_string, "?");
- g_string_append(auth_string, subresource);
+ /* set object replicas in lifepoint */
+ buf = g_strdup_printf("lifepoint: [] reps=%s", reps);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ g_free(reps);
+ }
+ } else {
+ /* Build the string to sign, per the S3 spec.
+ * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
+ */
+
+ /* verb */
+ auth_string = g_string_new(verb);
+ g_string_append(auth_string, "\n");
+
+ /* Content-MD5 header */
+ if (md5_hash)
+ g_string_append(auth_string, md5_hash);
+ g_string_append(auth_string, "\n");
+
+ if (content_type) {
+ g_string_append(auth_string, content_type);
+ }
+ g_string_append(auth_string, "\n");
+
+ /* Date */
+ g_string_append(auth_string, date);
+ g_string_append(auth_string, "\n");
+
+ /* CanonicalizedAmzHeaders, sorted lexicographically */
+ if (is_non_empty_string(hdl->user_token)) {
+ g_string_append(auth_string, AMAZON_SECURITY_HEADER);
+ g_string_append(auth_string, ":");
+ g_string_append(auth_string, hdl->user_token);
+ g_string_append(auth_string, ",");
+ g_string_append(auth_string, STS_PRODUCT_TOKEN);
+ g_string_append(auth_string, "\n");
+ }
+
+ if (g_str_equal(verb,"PUT") &&
+ is_non_empty_string(hdl->server_side_encryption)) {
+ g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER);
+ g_string_append(auth_string, ":");
+ g_string_append(auth_string, hdl->server_side_encryption);
+ g_string_append(auth_string, "\n");
+ }
+
+ if (is_non_empty_string(hdl->storage_class)) {
+ g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
+ g_string_append(auth_string, ":");
+ g_string_append(auth_string, hdl->storage_class);
+ g_string_append(auth_string, "\n");
+ }
+
+ /* CanonicalizedResource */
+ if (hdl->service_path) {
+ g_string_append(auth_string, hdl->service_path);
+ }
+ g_string_append(auth_string, "/");
+ if (bucket) {
+ if (hdl->use_subdomain)
+ g_string_append(auth_string, bucket);
+ else {
+ esc_bucket = curl_escape(bucket, 0);
+ if (!esc_bucket) goto cleanup;
+ g_string_append(auth_string, esc_bucket);
+ }
+ }
+
+ if (bucket && (hdl->use_subdomain || key))
+ g_string_append(auth_string, "/");
+
+ if (key) {
+ esc_key = curl_escape(key, 0);
+ if (!esc_key) goto cleanup;
+ g_string_append(auth_string, esc_key);
+ }
+
+ if (subresource) {
+ g_string_append(auth_string, "?");
+ g_string_append(auth_string, subresource);
+ }
+
+ /* run HMAC-SHA1 on the canonicalized string */
+ md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
+ HMAC_CTX_init(&ctx);
+ HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key),
+ EVP_sha1(), NULL);
+ HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
+ HMAC_Final(&ctx, md->data, &md->len);
+ HMAC_CTX_cleanup(&ctx);
+ auth_base64 = s3_base64_encode(md);
+ /* append the new headers */
+ if (is_non_empty_string(hdl->user_token)) {
+ /* Devpay headers are included in hash. */
+ buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
+ hdl->user_token);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+
+ buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
+ STS_PRODUCT_TOKEN);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+
+ if (g_str_equal(verb,"PUT") &&
+ is_non_empty_string(hdl->server_side_encryption)) {
+ buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s",
+ hdl->server_side_encryption);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+
+ if (is_non_empty_string(hdl->storage_class)) {
+ buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s",
+ hdl->storage_class);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+
+ buf = g_strdup_printf("Authorization: AWS %s:%s",
+ hdl->access_key, auth_base64);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
}
- /* run HMAC-SHA1 on the canonicalized string */
- md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
- HMAC_CTX_init(&ctx);
- HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), EVP_sha1(), NULL);
- HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
- HMAC_Final(&ctx, md->data, &md->len);
- HMAC_CTX_cleanup(&ctx);
- auth_base64 = s3_base64_encode(md);
+ if (md5_hash && '\0' != md5_hash[0]) {
+ buf = g_strdup_printf("Content-MD5: %s", md5_hash);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
+ if (content_length > 0) {
+ buf = g_strdup_printf("Content-Length: %zu", content_length);
+ headers = curl_slist_append(headers, buf);
+ g_free(buf);
+ }
- /* append the new headers */
- if (hdl->user_token) {
- /* Devpay headers are included in hash. */
- buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token);
+ if (content_type) {
+ buf = g_strdup_printf("Content-Type: %s", content_type);
headers = curl_slist_append(headers, buf);
g_free(buf);
+ }
- buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", STS_PRODUCT_TOKEN);
+ if (hdl->s3_api == S3_API_OAUTH2) {
+ buf = g_strdup_printf("x-goog-api-version: 2");
headers = curl_slist_append(headers, buf);
g_free(buf);
}
- buf = g_strdup_printf("Authorization: AWS %s:%s",
- hdl->access_key, auth_base64);
- headers = curl_slist_append(headers, buf);
- g_free(buf);
-
- if (md5_hash && '\0' != md5_hash[0]) {
- buf = g_strdup_printf("Content-MD5: %s", md5_hash);
+ if (project_id && hdl->s3_api == S3_API_OAUTH2) {
+ buf = g_strdup_printf("x-goog-project-id: %s", project_id);
headers = curl_slist_append(headers, buf);
g_free(buf);
}
buf = g_strdup_printf("Date: %s", date);
headers = curl_slist_append(headers, buf);
g_free(buf);
+
cleanup:
+ g_free(date);
g_free(esc_bucket);
g_free(esc_key);
- g_byte_array_free(md, TRUE);
+ if (md) g_byte_array_free(md, TRUE);
g_free(auth_base64);
- g_string_free(auth_string, TRUE);
+ if (auth_string) g_string_free(auth_string, TRUE);
return headers;
}
+/* Functions for a SAX parser to parse the XML failure from Amazon */
+
+/* Private structure for our "thunk", which tracks where the user is in the list
+ * * of keys. */
+struct failure_thunk {
+ gboolean want_text;
+
+ gboolean in_title;
+ gboolean in_body;
+ gboolean in_code;
+ gboolean in_message;
+ gboolean in_details;
+ gboolean in_access;
+ gboolean in_token;
+ gboolean in_serviceCatalog;
+ gboolean in_service;
+ gboolean in_endpoint;
+ gint in_others;
+
+ gchar *text;
+ gsize text_len;
+
+ gchar *message;
+ gchar *details;
+ gchar *error_name;
+ gchar *token_id;
+ gchar *service_type;
+ gchar *service_public_url;
+ gint64 expires;
+};
+
+static void
+failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
+ const gchar *element_name,
+ const gchar **attribute_names,
+ const gchar **attribute_values,
+ gpointer user_data,
+ GError **error G_GNUC_UNUSED)
+{
+ struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+ const gchar **att_name, **att_value;
+
+ if (g_ascii_strcasecmp(element_name, "title") == 0) {
+ thunk->in_title = 1;
+ thunk->in_others = 0;
+ thunk->want_text = 1;
+ } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
+ thunk->in_body = 1;
+ thunk->in_others = 0;
+ thunk->want_text = 1;
+ } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
+ thunk->in_code = 1;
+ thunk->in_others = 0;
+ thunk->want_text = 1;
+ } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
+ thunk->in_message = 1;
+ thunk->in_others = 0;
+ thunk->want_text = 1;
+ } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
+ thunk->in_details = 1;
+ thunk->in_others = 0;
+ thunk->want_text = 1;
+ } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
+ thunk->in_access = 1;
+ thunk->in_others = 0;
+ } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
+ thunk->in_token = 1;
+ thunk->in_others = 0;
+ for (att_name=attribute_names, att_value=attribute_values;
+ *att_name != NULL;
+ att_name++, att_value++) {
+ if (g_str_equal(*att_name, "id")) {
+ thunk->token_id = g_strdup(*att_value);
+ }
+ if (g_str_equal(*att_name, "expires") && strlen(*att_value) >= 19) {
+ thunk->expires = rfc3339_date(*att_value) - 600;
+ }
+ }
+ } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
+ thunk->in_serviceCatalog = 1;
+ thunk->in_others = 0;
+ } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
+ thunk->in_service = 1;
+ thunk->in_others = 0;
+ for (att_name=attribute_names, att_value=attribute_values;
+ *att_name != NULL;
+ att_name++, att_value++) {
+ if (g_str_equal(*att_name, "type")) {
+ thunk->service_type = g_strdup(*att_value);
+ }
+ }
+ } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
+ thunk->in_endpoint = 1;
+ thunk->in_others = 0;
+ if (thunk->service_type &&
+ g_str_equal(thunk->service_type, "object-store")) {
+ for (att_name=attribute_names, att_value=attribute_values;
+ *att_name != NULL;
+ att_name++, att_value++) {
+ if (g_str_equal(*att_name, "publicURL")) {
+ thunk->service_public_url = g_strdup(*att_value);
+ }
+ }
+ }
+ } else if (g_ascii_strcasecmp(element_name, "error") == 0) {
+ for (att_name=attribute_names, att_value=attribute_values;
+ *att_name != NULL;
+ att_name++, att_value++) {
+ if (g_str_equal(*att_name, "message")) {
+ thunk->message = g_strdup(*att_value);
+ }
+ }
+ } else {
+ thunk->in_others++;
+ }
+}
+
+static void
+failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
+ const gchar *element_name,
+ gpointer user_data,
+ GError **error G_GNUC_UNUSED)
+{
+ struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+
+ if (g_ascii_strcasecmp(element_name, "title") == 0) {
+ char *p = strchr(thunk->text, ' ');
+ if (p) {
+ p++;
+ if (*p) {
+ thunk->error_name = g_strdup(p);
+ }
+ }
+ g_free(thunk->text);
+ thunk->text = NULL;
+ thunk->in_title = 0;
+ } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
+ thunk->message = thunk->text;
+ g_strstrip(thunk->message);
+ thunk->text = NULL;
+ thunk->in_body = 0;
+ } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
+ thunk->error_name = thunk->text;
+ thunk->text = NULL;
+ thunk->in_code = 0;
+ } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ thunk->in_message = 0;
+ } else if (g_ascii_strcasecmp(element_name, "details") == 0) {
+ thunk->details = thunk->text;
+ thunk->text = NULL;
+ thunk->in_details = 0;
+ } else if (g_ascii_strcasecmp(element_name, "access") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ thunk->in_access = 0;
+ } else if (g_ascii_strcasecmp(element_name, "token") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ thunk->in_token = 0;
+ } else if (g_ascii_strcasecmp(element_name, "serviceCatalog") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ thunk->in_serviceCatalog = 0;
+ } else if (g_ascii_strcasecmp(element_name, "service") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ g_free(thunk->service_type);
+ thunk->service_type = NULL;
+ thunk->in_service = 0;
+ } else if (g_ascii_strcasecmp(element_name, "endpoint") == 0) {
+ thunk->message = thunk->text;
+ thunk->text = NULL;
+ thunk->in_endpoint = 0;
+ } else {
+ thunk->in_others--;
+ }
+}
+
+static void
+failure_text(GMarkupParseContext *context G_GNUC_UNUSED,
+ const gchar *text,
+ gsize text_len,
+ gpointer user_data,
+ GError **error G_GNUC_UNUSED)
+{
+ struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+
+ if (thunk->want_text && thunk->in_others == 0) {
+ char *new_text;
+
+ new_text = g_strndup(text, text_len);
+ if (thunk->text) {
+ strappend(thunk->text, new_text);
+ g_free(new_text);
+ } else {
+ thunk->text = new_text;
+ }
+ }
+}
+
static gboolean
interpret_response(S3Handle *hdl,
CURLcode curl_code,
const char *content_md5)
{
long response_code = 0;
- regmatch_t pmatch[2];
- char *error_name = NULL, *message = NULL;
- char *body_copy = NULL;
gboolean ret = TRUE;
+ struct failure_thunk thunk;
+ GMarkupParseContext *ctxt = NULL;
+ static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL };
+ GError *err = NULL;
if (!hdl) return FALSE;
curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
hdl->last_response_code = response_code;
- /* check ETag, if present */
- if (etag && content_md5 && 200 == response_code) {
- if (etag && g_strcasecmp(etag, content_md5))
+ /* check ETag, if present and not CAStor */
+ if (etag && content_md5 && 200 == response_code &&
+ hdl->s3_api != S3_API_CASTOR) {
+ if (etag && g_ascii_strcasecmp(etag, content_md5))
hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
else
ret = FALSE;
return ret;
}
- if (200 <= response_code && response_code < 400) {
- /* 2xx and 3xx codes won't have a response body we care about */
- hdl->last_s3_error_code = S3_ERROR_None;
- return FALSE;
- }
-
- /* Now look at the body to try to get the actual Amazon error message. Rather
- * than parse out the XML, just use some regexes. */
+ /* Now look at the body to try to get the actual Amazon error message. */
/* impose a reasonable limit on body size */
if (body_len > MAX_ERROR_RESPONSE_LEN) {
hdl->last_message = g_strdup("S3 Error: Unknown (response body too large to parse)");
return FALSE;
} else if (!body || body_len == 0) {
- hdl->last_message = g_strdup("S3 Error: Unknown (empty response body)");
- return TRUE; /* perhaps a network error; retry the request */
+ if (response_code < 100 || response_code >= 400) {
+ hdl->last_message =
+ g_strdup("S3 Error: Unknown (empty response body)");
+ return TRUE; /* perhaps a network error; retry the request */
+ } else {
+ /* 2xx and 3xx codes without body are good result */
+ hdl->last_s3_error_code = S3_ERROR_None;
+ return FALSE;
+ }
}
- /* use strndup to get a zero-terminated string */
- body_copy = g_strndup(body, body_len);
- if (!body_copy) goto cleanup;
+ thunk.in_title = FALSE;
+ thunk.in_body = FALSE;
+ thunk.in_code = FALSE;
+ thunk.in_message = FALSE;
+ thunk.in_details = FALSE;
+ thunk.in_access = FALSE;
+ thunk.in_token = FALSE;
+ thunk.in_serviceCatalog = FALSE;
+ thunk.in_service = FALSE;
+ thunk.in_endpoint = FALSE;
+ thunk.in_others = 0;
+ thunk.text = NULL;
+ thunk.want_text = FALSE;
+ thunk.text_len = 0;
+ thunk.message = NULL;
+ thunk.details = NULL;
+ thunk.error_name = NULL;
+ thunk.token_id = NULL;
+ thunk.service_type = NULL;
+ thunk.service_public_url = NULL;
+ thunk.expires = 0;
+
+ if ((hdl->s3_api == S3_API_SWIFT_1 ||
+ hdl->s3_api == S3_API_SWIFT_2) &&
+ hdl->content_type &&
+ (g_str_equal(hdl->content_type, "text/html") ||
+ g_str_equal(hdl->content_type, "text/plain"))) {
+
+ char *body_copy = g_strndup(body, body_len);
+ char *b = body_copy;
+ char *p = strchr(b, '\n');
+ char *p1;
+ if (p) { /* first line: error code */
+ *p = '\0';
+ p++;
+ p1 = strchr(b, ' ');
+ if (p1) {
+ p1++;
+ if (*p1) {
+ thunk.error_name = g_strdup(p1);
+ }
+ }
+ b = p;
+ }
+ p = strchr(b, '\n');
+ if (p) { /* second line: error message */
+ *p = '\0';
+ p++;
+ thunk.message = g_strdup(p);
+ g_strstrip(thunk.message);
+ b = p;
+ }
+ goto parsing_done;
+ } else if ((hdl->s3_api == S3_API_SWIFT_1 ||
+ hdl->s3_api == S3_API_SWIFT_2) &&
+ hdl->content_type &&
+ g_str_equal(hdl->content_type, "application/json")) {
+ char *body_copy = g_strndup(body, body_len);
+ char *code = NULL;
+ char *details = NULL;
+ regmatch_t pmatch[2];
+
+ if (!s3_regexec_wrap(&code_regex, body_copy, 2, pmatch, 0)) {
+ code = find_regex_substring(body_copy, pmatch[1]);
+ }
+ if (!s3_regexec_wrap(&details_regex, body_copy, 2, pmatch, 0)) {
+ details = find_regex_substring(body_copy, pmatch[1]);
+ }
+ if (code && details) {
+ hdl->last_message = g_strdup_printf("%s (%s)", details, code);
+ } else if (code) {
+ hdl->last_message = g_strdup_printf("(%s)", code);
+ } else if (details) {
+ hdl->last_message = g_strdup_printf("%s", details);
+ } else {
+ hdl->last_message = NULL;
+ }
+ g_free(code);
+ g_free(details);
+ g_free(body_copy);
+ return FALSE;
+ } else if (hdl->s3_api == S3_API_CASTOR) {
+ /* The error mesage is the body */
+ hdl->last_message = g_strndup(body, body_len);
+ return FALSE;
+ } else if (!hdl->content_type ||
+ !g_str_equal(hdl->content_type, "application/xml")) {
+ return FALSE;
+ }
- if (!s3_regexec_wrap(&error_name_regex, body_copy, 2, pmatch, 0))
- error_name = find_regex_substring(body_copy, pmatch[1]);
+ /* run the parser over it */
+ ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
+ if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) {
+ if (hdl->last_message) g_free(hdl->last_message);
+ hdl->last_message = g_strdup(err->message);
+ goto cleanup;
+ }
- if (!s3_regexec_wrap(&message_regex, body_copy, 2, pmatch, 0))
- message = find_regex_substring(body_copy, pmatch[1]);
+ if (!g_markup_parse_context_end_parse(ctxt, &err)) {
+ if (hdl->last_message) g_free(hdl->last_message);
+ hdl->last_message = g_strdup(err->message);
+ goto cleanup;
+ }
- if (error_name) {
- hdl->last_s3_error_code = s3_error_code_from_name(error_name);
+ g_markup_parse_context_free(ctxt);
+ ctxt = NULL;
+
+ if (hdl->s3_api == S3_API_SWIFT_2) {
+ if (!hdl->x_auth_token && thunk.token_id) {
+ hdl->x_auth_token = thunk.token_id;
+ thunk.token_id = NULL;
+ }
+ if (!hdl->x_storage_url && thunk.service_public_url) {
+ hdl->x_storage_url = thunk.service_public_url;
+ thunk.service_public_url = NULL;
+ }
}
- if (message) {
- hdl->last_message = message;
- message = NULL; /* steal the reference to the string */
+ if (thunk.expires > 0) {
+ hdl->expires = thunk.expires;
+ }
+parsing_done:
+ if (thunk.error_name) {
+ hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name);
+ g_free(thunk.error_name);
+ thunk.error_name = NULL;
}
-cleanup:
- g_free(body_copy);
- g_free(message);
- g_free(error_name);
+ if (thunk.message) {
+ g_free(hdl->last_message);
+ if (thunk.details) {
+ hdl->last_message = g_strdup_printf("%s: %s", thunk.message,
+ thunk.details);
+ amfree(thunk.message);
+ amfree(thunk.details);
+ } else {
+ hdl->last_message = thunk.message;
+ thunk.message = NULL; /* steal the reference to the string */
+ }
+ }
+cleanup:
+ g_free(thunk.text);
+ g_free(thunk.message);
+ g_free(thunk.error_name);
+ g_free(thunk.token_id);
+ g_free(thunk.service_public_url);
+ g_free(thunk.service_type);
return FALSE;
}
s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
{
gint64 *count = (gint64*) stream, inc = nmemb*size;
-
+
if (count) *count += inc;
return inc;
}
g_assert(INVALID_SET_FILE_POINTER != SetFilePointer(hFile, 0, NULL, FILE_BEGIN));
ret = g_byte_array_sized_new(S3_MD5_HASH_BYTE_LEN);
- g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
+ g_byte_array_set_size(ret, S3_MD5_HASH_BYTE_LEN);
MD5_Init(&md5_ctx);
while (ReadFile(hFile, buf, S3_MD5_BUF_SIZE, &bytes_read, NULL)) {
}
#endif
-static int
-curl_debug_message(CURL *curl G_GNUC_UNUSED,
- curl_infotype type,
- char *s,
- size_t len,
+static int
+curl_debug_message(CURL *curl G_GNUC_UNUSED,
+ curl_infotype type,
+ char *s,
+ size_t len,
void *unused G_GNUC_UNUSED)
{
char *lineprefix;
char *message;
char **lines, **line;
+ size_t i;
switch (type) {
case CURLINFO_TEXT:
lineprefix="Hdr Out: ";
break;
+ case CURLINFO_DATA_IN:
+ if (len > 3000) return 0;
+ for (i=0;i<len;i++) {
+ if (!g_ascii_isprint(s[i])) {
+ return 0;
+ }
+ }
+ lineprefix="Data In: ";
+ break;
+
+ case CURLINFO_DATA_OUT:
+ if (len > 3000) return 0;
+ for (i=0;i<len;i++) {
+ if (!g_ascii_isprint(s[i])) {
+ return 0;
+ }
+ }
+ lineprefix="Data Out: ";
+ break;
+
default:
/* ignore data in/out -- nobody wants to see that in the
* debug logs! */
g_free(message);
for (line = lines; *line; line++) {
- if (**line == '\0') continue; /* skip blank lines */
- g_debug("%s%s", lineprefix, *line);
+ if (**line == '\0') continue; /* skip blank lines */
+ g_debug("%s%s", lineprefix, *line);
}
g_strfreev(lines);
const char *key,
const char *subresource,
const char *query,
+ const char *content_type,
+ const char *project_id,
s3_read_func read_func,
s3_reset_func read_reset_func,
s3_size_func size_func,
gpointer progress_data,
const result_handling_t *result_handling)
{
- gboolean use_subdomain;
char *url = NULL;
s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
CURLcode curl_code = CURLE_OK;
char curl_error_buffer[CURL_ERROR_SIZE] = "";
struct curl_slist *headers = NULL;
- S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL};
+ /* Set S3Internal Data */
+ S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
gboolean should_retry;
- guint retries = 0;
+ gint retries = 0;
+ gint retry_after_close = 0;
gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
/* corresponds to PUT, HEAD, GET, and POST */
int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
g_assert(hdl != NULL && hdl->curl != NULL);
+ if (hdl->s3_api == S3_API_OAUTH2 && !hdl->getting_oauth2_access_token &&
+ (!hdl->access_token || hdl->expires < time(NULL))) {
+ result = oauth2_get_access_token(hdl);
+ if (!result) {
+ g_debug("oauth2_get_access_token returned %d", result);
+ return result;
+ }
+ } else if (hdl->s3_api == S3_API_SWIFT_2 && !hdl->getting_swift_2_token &&
+ (!hdl->x_auth_token || hdl->expires < time(NULL))) {
+ result = get_openstack_swift_api_v2_setting(hdl);
+ if (!result) {
+ g_debug("get_openstack_swift_api_v2_setting returned %d", result);
+ return result;
+ }
+ }
+
s3_reset(hdl);
- use_subdomain = hdl->bucket_location? TRUE : FALSE;
- url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl);
+ url = build_url(hdl, bucket, key, subresource, query);
if (!url) goto cleanup;
/* libcurl may behave strangely if these are not set correctly */
/* set up the request */
headers = authenticate_request(hdl, verb, bucket, key, subresource,
- md5_hash_b64, hdl->bucket_location? TRUE : FALSE);
+ md5_hash_b64, content_type, request_body_size, project_id);
+
+ if (hdl->ca_info) {
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
+ goto curl_error;
+ }
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_VERBOSE, hdl->verbose)))
goto curl_error;
if (hdl->verbose) {
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_DEBUGFUNCTION,
curl_debug_message)))
goto curl_error;
}
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPHEADER,
headers)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEFUNCTION, s3_internal_write_func)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_WRITEDATA, &int_writedata)))
goto curl_error;
/* Note: we always have to set this apparently, for consistent "end of header" detection */
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERFUNCTION, s3_internal_header_func)))
goto curl_error;
/* Note: if set, CURLOPT_HEADERDATA seems to also be used for CURLOPT_WRITEDATA ? */
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HEADERDATA, &int_writedata)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSFUNCTION, progress_func)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
+ if (progress_func) {
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_NOPROGRESS,0)))
+ goto curl_error;
+ }
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROGRESSDATA, progress_data)))
goto curl_error;
-#ifdef CURLOPT_INFILESIZE_LARGE
+/* CURLOPT_INFILESIZE_LARGE added in 7.11.0 */
+#if LIBCURL_VERSION_NUM >= 0x070b00
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)request_body_size)))
goto curl_error;
#else
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_INFILESIZE, (long)request_body_size)))
goto curl_error;
#endif
+/* CURLOPT_POSTFIELDSIZE_LARGE added in 7.11.1 */
+#if LIBCURL_VERSION_NUM >= 0x070b01
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t)request_body_size)))
+ goto curl_error;
+#else
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_POSTFIELDSIZE, (long)request_body_size)))
+ goto curl_error;
+#endif
+
+/* CURLOPT_MAX_{RECV,SEND}_SPEED_LARGE added in 7.15.5 */
+#if LIBCURL_VERSION_NUM >= 0x070f05
+ if (s3_curl_throttling_compat()) {
+ if (hdl->max_send_speed)
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_send_speed)))
+ goto curl_error;
+
+ if (hdl->max_recv_speed)
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
+ goto curl_error;
+ }
+#endif
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_HTTPGET, curlopt_httpget)))
goto curl_error;
goto curl_error;
- if (curlopt_upload) {
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
+ if (curlopt_upload || curlopt_post) {
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION, read_func)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA, read_data)))
goto curl_error;
} else {
/* Clear request_body options. */
if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READFUNCTION,
NULL)))
goto curl_error;
- if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_READDATA,
NULL)))
goto curl_error;
}
+ if (hdl->proxy) {
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_PROXY,
+ hdl->proxy)))
+ goto curl_error;
+ }
+
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FRESH_CONNECT,
+ (long)(hdl->reuse_connection && retry_after_close == 0 ? 0 : 1)))) {
+ goto curl_error;
+ }
+ if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FORBID_REUSE,
+ (long)(hdl->reuse_connection? 0 : 1)))) {
+ goto curl_error;
+ }
/* Perform the request */
curl_code = curl_easy_perform(hdl->curl);
/* interpret the response into hdl->last* */
curl_error: /* (label for short-circuiting the curl_easy_perform call) */
- should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
+ should_retry = interpret_response(hdl, curl_code, curl_error_buffer,
int_writedata.resp_buf.buffer, int_writedata.resp_buf.buffer_pos, int_writedata.etag, md5_hash_hex);
-
+
+ if (hdl->s3_api == S3_API_OAUTH2 &&
+ hdl->last_response_code == 401 &&
+ hdl->last_s3_error_code == S3_ERROR_AuthenticationRequired) {
+ should_retry = oauth2_get_access_token(hdl);
+ }
/* and, unless we know we need to retry, see what we're to do now */
if (!should_retry) {
- result = lookup_result(result_handling, hdl->last_response_code,
+ result = lookup_result(result_handling, hdl->last_response_code,
hdl->last_s3_error_code, hdl->last_curl_code);
/* break out of the while(1) unless we're retrying */
break;
}
+ if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES &&
+ retry_after_close < 3 &&
+ hdl->last_s3_error_code == S3_ERROR_RequestTimeout) {
+ retries = -1;
+ retry_after_close++;
+ g_debug("Retry on a new connection");
+ }
if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
/* we're out of retries, so annotate hdl->last_message appropriately and bail
* out. */
if (result != S3_RESULT_OK) {
g_debug(_("%s %s failed with %d/%s"), verb, url,
hdl->last_response_code,
- s3_error_name_from_code(hdl->last_s3_error_code));
+ s3_error_name_from_code(hdl->last_s3_error_code));
}
cleanup:
if (headers) curl_slist_free_all(headers);
g_free(md5_hash_b64);
g_free(md5_hash_hex);
-
+
/* we don't deallocate the response body -- we keep it for later */
hdl->last_response_body = int_writedata.resp_buf.buffer;
hdl->last_response_body_size = int_writedata.resp_buf.buffer_pos;
s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
{
static const char *final_header = "\r\n";
+ time_t remote_time_in_sec,local_time;
char *header;
regmatch_t pmatch[2];
S3InternalData *data = (S3InternalData *) stream;
header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
+
+ if (header[strlen(header)-1] == '\n')
+ header[strlen(header)-1] = '\0';
+ if (header[strlen(header)-1] == '\r')
+ header[strlen(header)-1] = '\0';
if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
- data->etag = find_regex_substring(header, pmatch[1]);
- if (!strcmp(final_header, header))
+ data->etag = find_regex_substring(header, pmatch[1]);
+ if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0))
+ data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]);
+
+ if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0))
+ data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]);
+
+ if (!s3_regexec_wrap(&content_type_regex, header, 2, pmatch, 0))
+ data->hdl->content_type = find_regex_substring(header, pmatch[1]);
+
+ if (strlen(header) == 0)
+ data->headers_done = TRUE;
+ if (g_str_equal(final_header, header))
data->headers_done = TRUE;
+ if (g_str_equal("\n", header))
+ data->headers_done = TRUE;
+
+ /* If date header is found */
+ if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
+ char *date = find_regex_substring(header, pmatch[1]);
+
+ /* Remote time is always in GMT: RFC 2616 */
+ /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
+ if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
+ g_debug("Error: Conversion of remote time to seconds failed.");
+ data->hdl->time_offset_with_s3 = 0;
+ }else{
+ local_time = time(NULL);
+ /* Offset time */
+ data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
+
+ if (data->hdl->verbose)
+ g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
+ }
+ g_free(date);
+ }
+
+ g_free(header);
return size*nmemb;
}
struct {const char * str; int flags; regex_t *regex;} regexes[] = {
{"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
{"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
+ {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex},
+ {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex},
+ {"^Content-Type:[[:space:]]*([^ ;]+).*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &content_type_regex},
{"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
- {"^[a-z0-9]((-*[a-z0-9])|(\\.[a-z0-9])){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
+ {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
{"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
+ {"^Date:(.*)$",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
+ {"\"access_token\" : \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &access_token_regex},
+ {"\"expires_in\" : (.*)", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &expires_in_regex},
+ {"\"details\": \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &details_regex},
+ {"\"code\": (.*),", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &code_regex},
{NULL, 0, NULL}
};
char regmessage[1024];
- int size, i;
+ int i;
int reg_result;
for (i = 0; regexes[i].str; i++) {
reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
if (reg_result != 0) {
- size = regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
+ regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
g_error(_("Regex error: %s"), regmessage);
return FALSE;
}
{"^ETag:\\s*\"([^\"]+)\"\\s*$",
G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
&etag_regex},
+ {"^X-Auth-Token:\\s*([^ ]+)\\s*$",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &x_auth_token_regex},
+ {"^X-Storage-Url:\\s*([^ ]+)\\s*$",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &x_storage_url_regex},
+ {"^Content-Type:\\s*([^ ]+)\\s*$",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &content_type_regex},
{"<Message>\\s*([^<]*)\\s*</Message>",
G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
&message_regex},
{"(/>)|(>([^<]*)</LocationConstraint>)",
G_REGEX_CASELESS,
&location_con_regex},
+ {"^Date:(.*)$",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &date_sync_regex},
+ {"\"access_token\" : \"([^\"]*)\"",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &access_token_regex},
+ {"\"expires_n\" : (.*)",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &expires_in_regex},
+ {"\"details\" : \"([^\"]*)\"",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &details_regex},
+ {"\"code\" : (.*)",
+ G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+ &code_regex},
{NULL, 0, NULL}
};
int i;
* Public function implementations
*/
+#if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wmissing-field-initializers"
+#endif
gboolean s3_init(void)
{
static GStaticMutex mutex = G_STATIC_MUTEX_INIT;
static gboolean init = FALSE, ret;
-
+
/* n.b. curl_global_init is called in common-src/glib-util.c:glib_init() */
g_static_mutex_lock (&mutex);
g_static_mutex_unlock(&mutex);
return ret;
}
+#if (GLIB_MAJOR_VERSION > 2 || (GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION >= 31))
+# pragma GCC diagnostic pop
+#endif
gboolean
s3_curl_location_compat(void)
return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
}
+static gboolean
+get_openstack_swift_api_v1_setting(
+ S3Handle *hdl)
+{
+ s3_result_t result = S3_RESULT_FAIL;
+ static result_handling_t result_handling[] = {
+ { 200, 0, 0, S3_RESULT_OK },
+ RESULT_HANDLING_ALWAYS_RETRY,
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ };
+
+ s3_verbose(hdl, 1);
+ result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, result_handling);
+
+ return result == S3_RESULT_OK;
+}
+
+static gboolean
+get_openstack_swift_api_v2_setting(
+ S3Handle *hdl)
+{
+ s3_result_t result = S3_RESULT_FAIL;
+ static result_handling_t result_handling[] = {
+ { 200, 0, 0, S3_RESULT_OK },
+ RESULT_HANDLING_ALWAYS_RETRY,
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ };
+
+ CurlBuffer buf = {NULL, 0, 0, 0};
+ GString *body = g_string_new("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
+ if (hdl->username && hdl->password) {
+ g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://docs.openstack.org/identity/api/v2.0\"");
+ } else {
+ g_string_append_printf(body, "<auth xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://www.hp.com/identity/api/ext/HP-IDM/v1.0\"");
+ }
+
+ if (hdl->tenant_id) {
+ g_string_append_printf(body, " tenantId=\"%s\"", hdl->tenant_id);
+ }
+ if (hdl->tenant_name) {
+ g_string_append_printf(body, " tenantName=\"%s\"", hdl->tenant_name);
+ }
+ g_string_append(body, ">");
+ if (hdl->username && hdl->password) {
+ g_string_append_printf(body, "<passwordCredentials username=\"%s\" password=\"%s\"/>", hdl->username, hdl->password);
+ } else {
+ g_string_append_printf(body, "<apiAccessKeyCredentials accessKey=\"%s\" secretKey=\"%s\"/>", hdl->access_key, hdl->secret_key);
+ }
+ g_string_append(body, "</auth>");
+
+ buf.buffer = g_string_free(body, FALSE);
+ buf.buffer_len = strlen(buf.buffer);
+ s3_verbose(hdl, 1);
+ hdl->getting_swift_2_token = 1;
+ g_free(hdl->x_storage_url);
+ hdl->x_storage_url = NULL;
+ result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
+ "application/xml", NULL,
+ S3_BUFFER_READ_FUNCS, &buf,
+ NULL, NULL, NULL,
+ NULL, NULL, result_handling);
+ hdl->getting_swift_2_token = 0;
+
+ return result == S3_RESULT_OK;
+}
+
S3Handle *
s3_open(const char *access_key,
const char *secret_key,
+ const char *swift_account_id,
+ const char *swift_access_key,
+ const char *host,
+ const char *service_path,
+ const gboolean use_subdomain,
const char *user_token,
- const char *bucket_location
- ) {
+ const char *bucket_location,
+ const char *storage_class,
+ const char *ca_info,
+ const char *server_side_encryption,
+ const char *proxy,
+ const S3_api s3_api,
+ const char *username,
+ const char *password,
+ const char *tenant_id,
+ const char *tenant_name,
+ const char *client_id,
+ const char *client_secret,
+ const char *refresh_token,
+ const gboolean reuse_connection,
+ const char *reps,
+ const char *reps_bucket)
+{
S3Handle *hdl;
hdl = g_new0(S3Handle, 1);
if (!hdl) goto error;
- hdl->verbose = FALSE;
+ hdl->verbose = TRUE;
hdl->use_ssl = s3_curl_supports_ssl();
+ hdl->reuse_connection = reuse_connection;
+
+ if (s3_api == S3_API_S3) {
+ g_assert(access_key);
+ hdl->access_key = g_strdup(access_key);
+ g_assert(secret_key);
+ hdl->secret_key = g_strdup(secret_key);
+ } else if (s3_api == S3_API_SWIFT_1) {
+ g_assert(swift_account_id);
+ hdl->swift_account_id = g_strdup(swift_account_id);
+ g_assert(swift_access_key);
+ hdl->swift_access_key = g_strdup(swift_access_key);
+ } else if (s3_api == S3_API_SWIFT_2) {
+ g_assert((username && password) || (access_key && secret_key));
+ hdl->username = g_strdup(username);
+ hdl->password = g_strdup(password);
+ hdl->access_key = g_strdup(access_key);
+ hdl->secret_key = g_strdup(secret_key);
+ g_assert(tenant_id || tenant_name);
+ hdl->tenant_id = g_strdup(tenant_id);
+ hdl->tenant_name = g_strdup(tenant_name);
+ } else if (s3_api == S3_API_OAUTH2) {
+ hdl->client_id = g_strdup(client_id);
+ hdl->client_secret = g_strdup(client_secret);
+ hdl->refresh_token = g_strdup(refresh_token);
+ } else if (s3_api == S3_API_CASTOR) {
+ hdl->username = g_strdup(username);
+ hdl->password = g_strdup(password);
+ hdl->tenant_name = g_strdup(tenant_name);
+ hdl->reps = g_strdup(reps);
+ hdl->reps_bucket = g_strdup(reps_bucket);
+ }
- g_assert(access_key);
- hdl->access_key = g_strdup(access_key);
- g_assert(secret_key);
- hdl->secret_key = g_strdup(secret_key);
/* NULL is okay */
hdl->user_token = g_strdup(user_token);
/* NULL is okay */
hdl->bucket_location = g_strdup(bucket_location);
+ /* NULL is ok */
+ hdl->storage_class = g_strdup(storage_class);
+
+ /* NULL is ok */
+ hdl->server_side_encryption = g_strdup(server_side_encryption);
+
+ /* NULL is ok */
+ hdl->proxy = g_strdup(proxy);
+
+ /* NULL is okay */
+ hdl->ca_info = g_strdup(ca_info);
+
+ if (!is_non_empty_string(host))
+ host = "s3.amazonaws.com";
+ hdl->host = g_ascii_strdown(host, -1);
+ hdl->use_subdomain = use_subdomain ||
+ (strcmp(hdl->host, "s3.amazonaws.com") == 0 &&
+ is_non_empty_string(hdl->bucket_location));
+ hdl->s3_api = s3_api;
+ if (service_path) {
+ if (strlen(service_path) == 0 ||
+ (strlen(service_path) == 1 && service_path[0] == '/')) {
+ hdl->service_path = NULL;
+ } else if (service_path[0] != '/') {
+ hdl->service_path = g_strdup_printf("/%s", service_path);
+ } else {
+ hdl->service_path = g_strdup(service_path);
+ }
+ if (hdl->service_path) {
+ /* remove trailling / */
+ size_t len = strlen(hdl->service_path) - 1;
+ if (hdl->service_path[len] == '/')
+ hdl->service_path[len] = '\0';
+ }
+ } else {
+ hdl->service_path = NULL;
+ }
+
hdl->curl = curl_easy_init();
if (!hdl->curl) goto error;
+ /* Set HTTP handling options for CAStor */
+ if (s3_api == S3_API_CASTOR) {
+#if LIBCURL_VERSION_NUM >= 0x071301
+ curl_version_info_data *info;
+ /* check the runtime version too */
+ info = curl_version_info(CURLVERSION_NOW);
+ if (info->version_num >= 0x071301) {
+ curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1);
+ curl_easy_setopt(hdl->curl, CURLOPT_UNRESTRICTED_AUTH, 1);
+ curl_easy_setopt(hdl->curl, CURLOPT_MAXREDIRS, 5);
+ curl_easy_setopt(hdl->curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL);
+ curl_easy_setopt(hdl->curl, CURLOPT_HTTP_VERSION,
+ CURL_HTTP_VERSION_1_1);
+ if (hdl->username)
+ curl_easy_setopt(hdl->curl, CURLOPT_USERNAME, hdl->username);
+ if (hdl->password)
+ curl_easy_setopt(hdl->curl, CURLOPT_PASSWORD, hdl->password);
+ curl_easy_setopt(hdl->curl, CURLOPT_HTTPAUTH,
+ (CURLAUTH_BASIC | CURLAUTH_DIGEST));
+ }
+#endif
+ }
+
return hdl;
error:
return NULL;
}
+gboolean
+s3_open2(
+ S3Handle *hdl)
+{
+ gboolean ret = TRUE;
+
+ /* get the X-Storage-Url and X-Auth-Token */
+ if (hdl->s3_api == S3_API_SWIFT_1) {
+ ret = get_openstack_swift_api_v1_setting(hdl);
+ } else if (hdl->s3_api == S3_API_SWIFT_2) {
+ ret = get_openstack_swift_api_v2_setting(hdl);
+ }
+
+ return ret;
+}
+
void
s3_free(S3Handle *hdl)
{
if (hdl) {
g_free(hdl->access_key);
g_free(hdl->secret_key);
+ g_free(hdl->swift_account_id);
+ g_free(hdl->swift_access_key);
+ g_free(hdl->content_type);
+ g_free(hdl->user_token);
+ g_free(hdl->ca_info);
+ g_free(hdl->proxy);
+ g_free(hdl->username);
+ g_free(hdl->password);
+ g_free(hdl->tenant_id);
+ g_free(hdl->tenant_name);
+ g_free(hdl->client_id);
+ g_free(hdl->client_secret);
+ g_free(hdl->refresh_token);
+ g_free(hdl->access_token);
if (hdl->user_token) g_free(hdl->user_token);
if (hdl->bucket_location) g_free(hdl->bucket_location);
+ if (hdl->storage_class) g_free(hdl->storage_class);
+ if (hdl->server_side_encryption) g_free(hdl->server_side_encryption);
+ if (hdl->host) g_free(hdl->host);
+ if (hdl->service_path) g_free(hdl->service_path);
if (hdl->curl) curl_easy_cleanup(hdl->curl);
g_free(hdl);
g_free(hdl->last_response_body);
hdl->last_response_body = NULL;
}
+ if (hdl->content_type) {
+ g_free(hdl->content_type);
+ hdl->content_type = NULL;
+ }
hdl->last_response_body_size = 0;
}
hdl->verbose = verbose;
}
+gboolean
+s3_set_max_send_speed(S3Handle *hdl, guint64 max_send_speed)
+{
+ if (!s3_curl_throttling_compat())
+ return FALSE;
+
+ hdl->max_send_speed = max_send_speed;
+
+ return TRUE;
+}
+
+gboolean
+s3_set_max_recv_speed(S3Handle *hdl, guint64 max_recv_speed)
+{
+ if (!s3_curl_throttling_compat())
+ return FALSE;
+
+ hdl->max_recv_speed = max_recv_speed;
+
+ return TRUE;
+}
+
gboolean
s3_use_ssl(S3Handle *hdl, gboolean use_ssl)
{
s3_error(hdl, &message, &response_code, NULL, &s3_error_name, &curl_code, &num_retries);
- if (!message)
+ if (!message)
message = "Unknown S3 error";
if (s3_error_name)
g_snprintf(s3_info, sizeof(s3_info), " (%s)", s3_error_name);
g_snprintf(response_info, sizeof(response_info), " (HTTP %d)", response_code);
if (curl_code)
g_snprintf(curl_info, sizeof(curl_info), " (CURLcode %d)", curl_code);
- if (num_retries)
+ if (num_retries)
g_snprintf(retries_info, sizeof(retries_info), " (after %d retries)", num_retries);
return g_strdup_printf("%s%s%s%s%s", message, s3_info, curl_info, response_info, retries_info);
gboolean
s3_upload(S3Handle *hdl,
const char *bucket,
- const char *key,
+ const char *key,
s3_read_func read_func,
s3_reset_func reset_func,
s3_size_func size_func,
{
s3_result_t result = S3_RESULT_FAIL;
static result_handling_t result_handling[] = {
- { 200, 0, 0, S3_RESULT_OK },
+ { 200, 0, 0, S3_RESULT_OK },
+ { 201, 0, 0, S3_RESULT_OK },
RESULT_HANDLING_ALWAYS_RETRY,
- { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
};
+ char *verb = "PUT";
+ char *content_type = NULL;
g_assert(hdl != NULL);
- result = perform_request(hdl, "PUT", bucket, key, NULL, NULL,
+ if (hdl->s3_api == S3_API_CASTOR) {
+ verb = "POST";
+ content_type = "application/x-amanda-backup-data";
+ }
+
+ result = perform_request(hdl, verb, bucket, key, NULL, NULL, content_type, NULL,
read_func, reset_func, size_func, md5_func, read_data,
NULL, NULL, NULL, progress_func, progress_data,
result_handling);
gboolean is_truncated;
gchar *next_marker;
+ guint64 size;
gboolean want_text;
-
+
gchar *text;
gsize text_len;
};
/* Functions for a SAX parser to parse the XML from Amazon */
static void
-list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
- const gchar *element_name,
- const gchar **attribute_names G_GNUC_UNUSED,
- const gchar **attribute_values G_GNUC_UNUSED,
- gpointer user_data,
+list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
+ const gchar *element_name,
+ const gchar **attribute_names G_GNUC_UNUSED,
+ const gchar **attribute_values G_GNUC_UNUSED,
+ gpointer user_data,
GError **error G_GNUC_UNUSED)
{
struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
thunk->want_text = 0;
- if (g_strcasecmp(element_name, "contents") == 0) {
+ if (g_ascii_strcasecmp(element_name, "contents") == 0 ||
+ g_ascii_strcasecmp(element_name, "object") == 0) {
thunk->in_contents = 1;
- } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
+ } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
thunk->in_common_prefixes = 1;
- } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
+ } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
+ thunk->want_text = 1;
+ } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
+ g_ascii_strcasecmp(element_name, "name") == 0) &&
+ thunk->in_contents) {
thunk->want_text = 1;
- } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
+ } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
+ g_ascii_strcasecmp(element_name, "bytes") == 0) &&
+ thunk->in_contents) {
thunk->want_text = 1;
- } else if (g_strcasecmp(element_name, "istruncated")) {
+ } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
thunk->want_text = 1;
- } else if (g_strcasecmp(element_name, "nextmarker")) {
+ } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
thunk->want_text = 1;
}
}
static void
-list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
+list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
const gchar *element_name,
- gpointer user_data,
+ gpointer user_data,
GError **error G_GNUC_UNUSED)
{
struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
- if (g_strcasecmp(element_name, "contents") == 0) {
+ if (g_ascii_strcasecmp(element_name, "contents") == 0) {
thunk->in_contents = 0;
- } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
+ } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
thunk->in_common_prefixes = 0;
- } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
+ } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
+ g_ascii_strcasecmp(element_name, "name") == 0) &&
+ thunk->in_contents) {
thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
+ if (thunk->is_truncated) {
+ if (thunk->next_marker) g_free(thunk->next_marker);
+ thunk->next_marker = g_strdup(thunk->text);
+ }
thunk->text = NULL;
- } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
+ } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
+ g_ascii_strcasecmp(element_name, "bytes") == 0) &&
+ thunk->in_contents) {
+ thunk->size += g_ascii_strtoull (thunk->text, NULL, 10);
+ thunk->text = NULL;
+ } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
thunk->text = NULL;
- } else if (g_strcasecmp(element_name, "istruncated") == 0) {
- if (thunk->text && g_strncasecmp(thunk->text, "false", 5) != 0)
+ } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
+ if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
thunk->is_truncated = TRUE;
- } else if (g_strcasecmp(element_name, "nextmarker") == 0) {
+ } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
if (thunk->next_marker) g_free(thunk->next_marker);
thunk->next_marker = thunk->text;
thunk->text = NULL;
static void
list_text(GMarkupParseContext *context G_GNUC_UNUSED,
- const gchar *text,
- gsize text_len,
- gpointer user_data,
+ const gchar *text,
+ gsize text_len,
+ gpointer user_data,
GError **error G_GNUC_UNUSED)
{
struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
static s3_result_t
list_fetch(S3Handle *hdl,
const char *bucket,
- const char *prefix,
- const char *delimiter,
+ const char *prefix,
+ const char *delimiter,
const char *marker,
const char *max_keys,
CurlBuffer *buf)
{
- s3_result_t result = S3_RESULT_FAIL;
+ s3_result_t result = S3_RESULT_FAIL;
static result_handling_t result_handling[] = {
- { 200, 0, 0, S3_RESULT_OK },
+ { 200, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
RESULT_HANDLING_ALWAYS_RETRY,
- { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
};
const char* pos_parts[][2] = {
{"prefix", prefix},
/* loop over possible parts to build query string */
query = g_string_new("");
for (i = 0; pos_parts[i][0]; i++) {
- if (pos_parts[i][1]) {
- if (have_prev_part)
- g_string_append(query, "&");
- else
- have_prev_part = TRUE;
- esc_value = curl_escape(pos_parts[i][1], 0);
- g_string_append_printf(query, "%s=%s", pos_parts[i][0], esc_value);
- curl_free(esc_value);
- }
+ if (pos_parts[i][1]) {
+ const char *keyword;
+ if (have_prev_part)
+ g_string_append(query, "&");
+ else
+ have_prev_part = TRUE;
+ esc_value = curl_escape(pos_parts[i][1], 0);
+ keyword = pos_parts[i][0];
+ if ((hdl->s3_api == S3_API_SWIFT_1 ||
+ hdl->s3_api == S3_API_SWIFT_2) &&
+ strcmp(keyword, "max-keys") == 0) {
+ keyword = "limit";
+ } else if ((hdl->s3_api == S3_API_CASTOR) &&
+ strcmp(keyword, "max-keys") == 0) {
+ keyword = "size";
+ }
+ g_string_append_printf(query, "%s=%s", keyword, esc_value);
+ curl_free(esc_value);
+ }
+ }
+ if (hdl->s3_api == S3_API_SWIFT_1 ||
+ hdl->s3_api == S3_API_SWIFT_2 ||
+ hdl->s3_api == S3_API_CASTOR) {
+ if (have_prev_part)
+ g_string_append(query, "&");
+ g_string_append(query, "format=xml");
}
/* and perform the request on that URI */
- result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str,
+ result = perform_request(hdl, "GET", bucket, NULL, NULL, query->str, NULL,
+ NULL,
NULL, NULL, NULL, NULL, NULL,
S3_BUFFER_WRITE_FUNCS, buf, NULL, NULL,
result_handling);
const char *bucket,
const char *prefix,
const char *delimiter,
- GSList **list)
+ GSList **list,
+ guint64 *total_size)
{
/*
* max len of XML variables:
thunk.filename_list = NULL;
thunk.text = NULL;
thunk.next_marker = NULL;
+ thunk.size = 0;
/* Loop until S3 has given us the entire picture */
do {
/* get some data from S3 */
result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
if (result != S3_RESULT_OK) goto cleanup;
+ if (buf.buffer_pos == 0) goto cleanup; /* no body */
/* run the parser over it */
thunk.in_contents = FALSE;
thunk.in_common_prefixes = FALSE;
thunk.is_truncated = FALSE;
+ if (thunk.next_marker) g_free(thunk.next_marker);
+ thunk.next_marker = NULL;
thunk.want_text = FALSE;
ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
result = S3_RESULT_FAIL;
goto cleanup;
}
-
+
g_markup_parse_context_free(ctxt);
ctxt = NULL;
} while (thunk.next_marker);
return FALSE;
} else {
*list = thunk.filename_list;
+ if(total_size) {
+ *total_size = thunk.size;
+ }
return TRUE;
}
}
{
s3_result_t result = S3_RESULT_FAIL;
static result_handling_t result_handling[] = {
- { 200, 0, 0, S3_RESULT_OK },
+ { 200, 0, 0, S3_RESULT_OK },
RESULT_HANDLING_ALWAYS_RETRY,
- { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
};
g_assert(hdl != NULL);
g_assert(write_func != NULL);
- result = perform_request(hdl, "GET", bucket, key, NULL, NULL,
+ result = perform_request(hdl, "GET", bucket, key, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
progress_func, progress_data, result_handling);
{
s3_result_t result = S3_RESULT_FAIL;
static result_handling_t result_handling[] = {
- { 204, 0, 0, S3_RESULT_OK },
+ { 200, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
+ { 404, 0, 0, S3_RESULT_OK },
+ { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
RESULT_HANDLING_ALWAYS_RETRY,
- { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ { 409, 0, 0, S3_RESULT_OK },
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
};
g_assert(hdl != NULL);
- result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL,
+ result = perform_request(hdl, "DELETE", bucket, key, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
result_handling);
return result == S3_RESULT_OK;
}
+int
+s3_multi_delete(S3Handle *hdl,
+ const char *bucket,
+ const char **key)
+{
+ GString *query;
+ CurlBuffer data;
+ s3_result_t result = S3_RESULT_FAIL;
+ static result_handling_t result_handling[] = {
+ { 200, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
+ { 400, 0, 0, S3_RESULT_NOTIMPL },
+ { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
+ RESULT_HANDLING_ALWAYS_RETRY,
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ };
+
+ g_assert(hdl != NULL);
+
+ query = g_string_new(NULL);
+ g_string_append(query, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+ g_string_append(query, "<Delete>\n");
+ if (!hdl->verbose) {
+ g_string_append(query, " <Quiet>true</Quiet>\n");
+ }
+ while (*key != NULL) {
+ g_string_append(query, " <Object>\n");
+ g_string_append(query, " <Key>");
+ g_string_append(query, *key);
+ g_string_append(query, "</Key>\n");
+ g_string_append(query, " </Object>\n");
+ key++;
+ }
+ g_string_append(query, "</Delete>\n");
+
+ data.buffer_len = query->len;
+ data.buffer = query->str;
+ data.buffer_pos = 0;
+ data.max_buffer_size = data.buffer_len;
+
+ result = perform_request(hdl, "POST", bucket, NULL, "delete", NULL,
+ "application/xml", NULL,
+ s3_buffer_read_func, s3_buffer_reset_func,
+ s3_buffer_size_func, s3_buffer_md5_func,
+ &data, NULL, NULL, NULL, NULL, NULL,
+ result_handling);
+
+ g_string_free(query, TRUE);
+ if (result == S3_RESULT_OK)
+ return 1;
+ else if (result == S3_RESULT_NOTIMPL)
+ return 2;
+ else
+ return 0;
+}
+
gboolean
s3_make_bucket(S3Handle *hdl,
- const char *bucket)
+ const char *bucket,
+ const char *project_id)
{
char *body = NULL;
+ char *verb = "PUT";
+ char *content_type = NULL;
s3_result_t result = S3_RESULT_FAIL;
static result_handling_t result_handling[] = {
- { 200, 0, 0, S3_RESULT_OK },
+ { 200, 0, 0, S3_RESULT_OK },
+ { 201, 0, 0, S3_RESULT_OK },
+ { 202, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
+ { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
RESULT_HANDLING_ALWAYS_RETRY,
- { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
};
regmatch_t pmatch[4];
char *loc_end_open, *loc_content;
s3_size_func size_func = NULL;
g_assert(hdl != NULL);
-
- if (hdl->bucket_location && hdl->bucket_location[0]) {
+
+ if (is_non_empty_string(hdl->bucket_location) &&
+ 0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
if (s3_bucket_location_compat(bucket)) {
ptr = &buf;
- buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location);
+ buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE,
+ g_str_equal(hdl->host, "gss.iijgio.com")?
+ " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"":
+ "",
+ hdl->bucket_location);
buf.buffer_len = (guint) strlen(buf.buffer);
buf.buffer_pos = 0;
buf.max_buffer_size = buf.buffer_len;
}
}
- result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL,
+ if (hdl->s3_api == S3_API_CASTOR) {
+ verb = "POST";
+ content_type = "application/castorcontext";
+ }
+
+ result = perform_request(hdl, verb, bucket, NULL, NULL, NULL, content_type,
+ project_id,
read_func, reset_func, size_func, md5_func, ptr,
NULL, NULL, NULL, NULL, NULL, result_handling);
if (result == S3_RESULT_OK ||
- (hdl->bucket_location && result != S3_RESULT_OK
- && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
+ (result != S3_RESULT_OK &&
+ hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
/* verify the that the location constraint on the existing bucket matches
* the one that's configured.
*/
- result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, result_handling);
-
- /* note that we can check only one of the three AND conditions above
- * and infer that the others are true
- */
- if (result == S3_RESULT_OK && hdl->bucket_location) {
+ if (is_non_empty_string(hdl->bucket_location)) {
+ result = perform_request(hdl, "GET", bucket, NULL, "location", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, result_handling);
+ } else {
+ result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, result_handling);
+ }
+
+ if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
/* return to the default state of failure */
result = S3_RESULT_FAIL;
- if (body) g_free(body);
/* use strndup to get a null-terminated string */
body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
if (!body) {
hdl->last_message = g_strdup(_("No body received for location request"));
goto cleanup;
+ } else if ('\0' == body[0]) {
+ hdl->last_message = g_strdup(_("Empty body received for location request"));
+ goto cleanup;
}
-
+
if (!s3_regexec_wrap(&location_con_regex, body, 4, pmatch, 0)) {
loc_end_open = find_regex_substring(body, pmatch[1]);
loc_content = find_regex_substring(body, pmatch[3]);
/* The case of an empty string is special because XML allows
* "self-closing" tags
*/
- if ('\0' == hdl->bucket_location[0] &&
- '/' != loc_end_open[0] && '\0' != hdl->bucket_location[0])
- hdl->last_message = g_strdup(_("An empty location constraint is "
+ if (0 == strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location) &&
+ '/' != loc_end_open[0])
+ hdl->last_message = g_strdup(_("A wildcard location constraint is "
"configured, but the bucket has a non-empty location constraint"));
- else if (strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)))
+ else if (strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)?
+ strncmp(loc_content, hdl->bucket_location, strlen(hdl->bucket_location)) :
+ ('\0' != loc_content[0]))
hdl->last_message = g_strdup(_("The location constraint configured "
"does not match the constraint currently on the bucket"));
else
cleanup:
if (body) g_free(body);
-
+
return result == S3_RESULT_OK;
}
+
+static s3_result_t
+oauth2_get_access_token(
+ S3Handle *hdl)
+{
+ GString *query;
+ CurlBuffer data;
+ s3_result_t result = S3_RESULT_FAIL;
+ static result_handling_t result_handling[] = {
+ { 200, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
+ RESULT_HANDLING_ALWAYS_RETRY,
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ };
+ char *body;
+ regmatch_t pmatch[2];
+
+ g_assert(hdl != NULL);
+
+ query = g_string_new(NULL);
+ g_string_append(query, "client_id=");
+ g_string_append(query, hdl->client_id);
+ g_string_append(query, "&client_secret=");
+ g_string_append(query, hdl->client_secret);
+ g_string_append(query, "&refresh_token=");
+ g_string_append(query, hdl->refresh_token);
+ g_string_append(query, "&grant_type=refresh_token");
+
+ data.buffer_len = query->len;
+ data.buffer = query->str;
+ data.buffer_pos = 0;
+ data.max_buffer_size = data.buffer_len;
+
+ hdl->x_storage_url = "https://accounts.google.com/o/oauth2/token";
+ hdl->getting_oauth2_access_token = 1;
+ result = perform_request(hdl, "POST", NULL, NULL, NULL, NULL,
+ "application/x-www-form-urlencoded", NULL,
+ s3_buffer_read_func, s3_buffer_reset_func,
+ s3_buffer_size_func, s3_buffer_md5_func,
+ &data, NULL, NULL, NULL, NULL, NULL,
+ result_handling);
+ hdl->x_storage_url = NULL;
+ hdl->getting_oauth2_access_token = 0;
+
+ /* use strndup to get a null-terminated string */
+ body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
+ if (!body) {
+ hdl->last_message = g_strdup(_("No body received for location request"));
+ goto cleanup;
+ } else if ('\0' == body[0]) {
+ hdl->last_message = g_strdup(_("Empty body received for location request"));
+ goto cleanup;
+ }
+
+ if (!s3_regexec_wrap(&access_token_regex, body, 2, pmatch, 0)) {
+ hdl->access_token = find_regex_substring(body, pmatch[1]);
+ hdl->x_auth_token = g_strdup(hdl->access_token);
+ }
+ if (!s3_regexec_wrap(&expires_in_regex, body, 2, pmatch, 0)) {
+ char *expires_in = find_regex_substring(body, pmatch[1]);
+ hdl->expires = time(NULL) + atoi(expires_in) - 600;
+ g_free(expires_in);
+ }
+
+cleanup:
+ g_free(body);
+ return result == S3_RESULT_OK;
+}
+
+gboolean
+s3_is_bucket_exists(S3Handle *hdl,
+ const char *bucket,
+ const char *project_id)
+{
+ s3_result_t result = S3_RESULT_FAIL;
+ char *query;
+ static result_handling_t result_handling[] = {
+ { 200, 0, 0, S3_RESULT_OK },
+ { 204, 0, 0, S3_RESULT_OK },
+ RESULT_HANDLING_ALWAYS_RETRY,
+ { 0, 0, 0, /* default: */ S3_RESULT_FAIL }
+ };
+
+ if (hdl->s3_api == S3_API_SWIFT_1 ||
+ hdl->s3_api == S3_API_SWIFT_2) {
+ query = "limit=1";
+ } else if (hdl->s3_api == S3_API_CASTOR) {
+ query = "format=xml&size=0";
+ } else {
+ query = "max-keys=1";
+ }
+
+ result = perform_request(hdl, "GET", bucket, NULL, NULL, query,
+ NULL, project_id,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, result_handling);
+
+ return result == S3_RESULT_OK;
+}
+
+gboolean
+s3_delete_bucket(S3Handle *hdl,
+ const char *bucket)
+{
+ return s3_delete(hdl, bucket, NULL);
+}