Merge tag 'upstream/3.3.1'
[debian/amanda] / device-src / s3.c
index 1658a37fa3f496c02533e1ad27ed19eb2198904c..1c6129156a74c74c66ff2e625bb761e7f8824882 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008,2009 Zmanda, Inc.  All Rights Reserved.
+ * Copyright (c) 2008, 2009, 2010 Zmanda, Inc.  All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
 
 #define AMAZON_SECURITY_HEADER "x-amz-security-token"
 #define AMAZON_BUCKET_CONF_TEMPLATE "\
-  <CreateBucketConfiguration>\n\
+  <CreateBucketConfiguration%s>\n\
     <LocationConstraint>%s</LocationConstraint>\n\
   </CreateBucketConfiguration>"
 
+#define AMAZON_STORAGE_CLASS_HEADER "x-amz-storage-class"
+
+#define AMAZON_SERVER_SIDE_ENCRYPTION_HEADER "x-amz-server-side-encryption"
+
 #define AMAZON_WILDCARD_LOCATION "*"
 
 /* parameters for exponential backoff in the face of retriable errors */
 /* Results which should always be retried */
 #define RESULT_HANDLING_ALWAYS_RETRY \
         { 400,  S3_ERROR_RequestTimeout,     0,                          S3_RESULT_RETRY }, \
+        { 403,  S3_ERROR_RequestTimeTooSkewed,0,                          S3_RESULT_RETRY }, \
         { 409,  S3_ERROR_OperationAborted,   0,                          S3_RESULT_RETRY }, \
         { 412,  S3_ERROR_PreconditionFailed, 0,                          S3_RESULT_RETRY }, \
         { 500,  S3_ERROR_InternalError,      0,                          S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_COULDNT_RESOLVE_HOST, S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_PARTIAL_FILE,         S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_OPERATION_TIMEOUTED,  S3_RESULT_RETRY }, \
+        { 0,    0,                           CURLE_SSL_CONNECT_ERROR,    S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_SEND_ERROR,           S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_RECV_ERROR,           S3_RESULT_RETRY }, \
         { 0,    0,                           CURLE_GOT_NOTHING,          S3_RESULT_RETRY }
@@ -131,10 +137,20 @@ struct S3Handle {
     char *access_key;
     char *secret_key;
     char *user_token;
+    char *swift_account_id;
+    char *swift_access_key;
 
+    /* attributes for new objects */
     char *bucket_location;
-
+    char *storage_class;
+    char *server_side_encryption;
+    char *host;
+    char *service_path;
+    gboolean use_subdomain;
+    gboolean openstack_swift_api;
     char *ca_info;
+    char *x_auth_token;
+    char *x_storage_url;
 
     CURL *curl;
 
@@ -152,6 +168,9 @@ struct S3Handle {
     guint last_num_retries;
     void *last_response_body;
     guint last_response_body_size;
+
+    /* offset with s3 */
+    time_t time_offset_with_s3;
 };
 
 typedef struct {
@@ -163,6 +182,8 @@ typedef struct {
     gboolean headers_done;
     gboolean int_write_done;
     char *etag;
+    /* Points to current handle: Added to get hold of s3 offset */
+    struct S3Handle *hdl;
 } S3InternalData;
 
 /* Callback function to examine headers one-at-a-time
@@ -254,7 +275,9 @@ lookup_result(const result_handling_t *result_handling,
 /*
  * Precompiled regular expressions */
 static regex_t etag_regex, error_name_regex, message_regex, subdomain_regex,
-    location_con_regex;
+    location_con_regex, date_sync_regex, x_auth_token_regex,
+    x_storage_url_regex;
+
 
 /*
  * Utility functions
@@ -272,19 +295,23 @@ static gboolean is_non_empty_string(const char *str);
  * A new string is allocated and returned; it is the responsiblity of the caller.
  *
  * @param hdl: the S3Handle object
- * @param verb: capitalized verb for this request ('PUT', 'GET', etc.)
+ * @param service_path: A path to add in the URL, or NULL for none.
  * @param bucket: the bucket being accessed, or NULL for none
  * @param key: the key being accessed, or NULL for none
  * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
- * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
+ * @param query: the query being accessed (e.g. "acl"), or NULL for none
+ *
+ * !use_subdomain: http://host/service_path/bucket/key
+ * use_subdomain : http://bucket.host/service_path/key
+ *
  */
 static char *
-build_url(const char *bucket,
+build_url(
+      S3Handle *hdl,
+      const char *bucket,
       const char *key,
       const char *subresource,
-      const char *query,
-      gboolean use_subdomain,
-      gboolean use_ssl);
+      const char *query);
 
 /* Create proper authorization headers for an Amazon S3 REST
  * request to C{headers}.
@@ -301,7 +328,6 @@ build_url(const char *bucket,
  * @param key: the key being accessed, or NULL for none
  * @param subresource: the sub-resource being accessed (e.g. "acl"), or NULL for none
  * @param md5_hash: the MD5 hash of the request body, or NULL for none
- * @param use_subdomain: if TRUE, a subdomain of s3.amazonaws.com will be used
  */
 static struct curl_slist *
 authenticate_request(S3Handle *hdl,
@@ -309,8 +335,7 @@ authenticate_request(S3Handle *hdl,
                      const char *bucket,
                      const char *key,
                      const char *subresource,
-                     const char *md5_hash,
-                     gboolean use_subdomain);
+                     const char *md5_hash);
 
 
 
@@ -422,7 +447,7 @@ s3_error_code_from_name(char *s3_error_name)
 
     /* do a brute-force search through the list, since it's not sorted */
     for (i = 0; i < S3_ERROR_END; i++) {
-        if (g_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
+        if (g_ascii_strcasecmp(s3_error_name, s3_error_code_names[i]) == 0)
             return i;
     }
 
@@ -502,42 +527,77 @@ is_non_empty_string(const char *str)
 }
 
 static char *
-build_url(const char *bucket,
+build_url(
+      S3Handle   *hdl,
+      const char *bucket,
       const char *key,
       const char *subresource,
-      const char *query,
-      gboolean use_subdomain,
-      gboolean use_ssl)
+      const char *query)
 {
     GString *url = NULL;
     char *esc_bucket = NULL, *esc_key = NULL;
 
-    /* scheme */
-    url = g_string_new("http");
-    if (use_ssl)
-        g_string_append(url, "s");
-
-    g_string_append(url, "://");
-
-    /* domain */
-    if (use_subdomain && bucket)
-        g_string_append_printf(url, "%s.s3.amazonaws.com/", bucket);
-    else
-        g_string_append(url, "s3.amazonaws.com/");
+    if (hdl->openstack_swift_api && hdl->x_storage_url) {
+       url = g_string_new(hdl->x_storage_url);
+       g_string_append(url, "/");
+    } else {
+       /* scheme */
+       url = g_string_new("http");
+       if (hdl->use_ssl)
+            g_string_append(url, "s");
+
+       g_string_append(url, "://");
+
+       /* domain */
+       if (hdl->use_subdomain && bucket)
+            g_string_append_printf(url, "%s.%s", bucket, hdl->host);
+       else
+            g_string_append_printf(url, "%s", hdl->host);
+
+       if (hdl->service_path) {
+            g_string_append_printf(url, "%s/", hdl->service_path);
+       } else {
+           g_string_append(url, "/");
+       }
+    }
 
     /* path */
-    if (!use_subdomain && bucket) {
-        esc_bucket = curl_escape(bucket, 0);
-    if (!esc_bucket) goto cleanup;
+    if (!hdl->use_subdomain && bucket) {
+       /* curl_easy_escape addeded in 7.15.4 */
+       #if LIBCURL_VERSION_NUM >= 0x070f04
+           curl_version_info_data *info;
+           /* check the runtime version too */
+           info = curl_version_info(CURLVERSION_NOW);
+           if (info->version_num >= 0x070f04)
+               esc_bucket = curl_easy_escape(hdl->curl, bucket, 0);
+           else
+               esc_bucket = curl_escape(bucket, 0);
+       #else
+           esc_bucket = curl_escape(bucket, 0);
+       #endif
+        if (!esc_bucket) goto cleanup;
         g_string_append_printf(url, "%s", esc_bucket);
         if (key)
             g_string_append(url, "/");
+       curl_free(esc_bucket);
     }
 
     if (key) {
-        esc_key = curl_escape(key, 0);
-    if (!esc_key) goto cleanup;
+       /* curl_easy_escape addeded in 7.15.4 */
+       #if LIBCURL_VERSION_NUM >= 0x070f04
+           curl_version_info_data *info;
+           /* check the runtime version too */
+           info = curl_version_info(CURLVERSION_NOW);
+           if (info->version_num >= 0x070f04)
+               esc_key = curl_easy_escape(hdl->curl, key, 0);
+           else
+               esc_key = curl_escape(key, 0);
+       #else
+           esc_key = curl_escape(key, 0);
+       #endif
+        if (!esc_key) goto cleanup;
         g_string_append_printf(url, "%s", esc_key);
+       curl_free(esc_key);
     }
 
     /* query string */
@@ -554,8 +614,6 @@ build_url(const char *bucket,
         g_string_append(url, query);
 
 cleanup:
-    if (esc_bucket) curl_free(esc_bucket);
-    if (esc_key) curl_free(esc_key);
 
     return g_string_free(url, FALSE);
 }
@@ -566,8 +624,7 @@ authenticate_request(S3Handle *hdl,
                      const char *bucket,
                      const char *key,
                      const char *subresource,
-                     const char *md5_hash,
-                     gboolean use_subdomain)
+                     const char *md5_hash)
 {
     time_t t;
     struct tm tmp;
@@ -586,98 +643,154 @@ authenticate_request(S3Handle *hdl,
         "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
 
 
-
-    /* Build the string to sign, per the S3 spec.
-     * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
-     */
-
-    /* verb */
-    auth_string = g_string_new(verb);
-    g_string_append(auth_string, "\n");
-
-    /* Content-MD5 header */
-    if (md5_hash)
-        g_string_append(auth_string, md5_hash);
-    g_string_append(auth_string, "\n");
-
-    /* Content-Type is empty*/
-    g_string_append(auth_string, "\n");
-
-
     /* calculate the date */
     t = time(NULL);
+
+    /* sync clock with amazon s3 */
+    t = t + hdl->time_offset_with_s3;
+
 #ifdef _WIN32
     if (!gmtime_s(&tmp, &t)) g_debug("localtime error");
 #else
     if (!gmtime_r(&t, &tmp)) perror("localtime");
 #endif
+
+
     date = g_strdup_printf("%s, %02d %s %04d %02d:%02d:%02d GMT",
         wkday[tmp.tm_wday], tmp.tm_mday, month[tmp.tm_mon], 1900+tmp.tm_year,
         tmp.tm_hour, tmp.tm_min, tmp.tm_sec);
 
-    g_string_append(auth_string, date);
-    g_string_append(auth_string, "\n");
+    if (hdl->openstack_swift_api) {
+       if (!bucket) {
+            buf = g_strdup_printf("X-Auth-User: %s", hdl->swift_account_id);
+            headers = curl_slist_append(headers, buf);
+            g_free(buf);
+            buf = g_strdup_printf("X-Auth-Key: %s", hdl->swift_access_key);
+            headers = curl_slist_append(headers, buf);
+            g_free(buf);
+       } else {
+            buf = g_strdup_printf("X-Auth-Token: %s", hdl->x_auth_token);
+            headers = curl_slist_append(headers, buf);
+            g_free(buf);
+       }
+    } else {
+       /* Build the string to sign, per the S3 spec.
+        * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
+        */
+
+       /* verb */
+       auth_string = g_string_new(verb);
+       g_string_append(auth_string, "\n");
+
+       /* Content-MD5 header */
+       if (md5_hash)
+            g_string_append(auth_string, md5_hash);
+       g_string_append(auth_string, "\n");
+
+       /* Content-Type is empty*/
+       g_string_append(auth_string, "\n");
+
+       /* Date */
+       g_string_append(auth_string, date);
+       g_string_append(auth_string, "\n");
+
+       /* CanonicalizedAmzHeaders, sorted lexicographically */
+       if (is_non_empty_string(hdl->user_token)) {
+           g_string_append(auth_string, AMAZON_SECURITY_HEADER);
+           g_string_append(auth_string, ":");
+           g_string_append(auth_string, hdl->user_token);
+           g_string_append(auth_string, ",");
+           g_string_append(auth_string, STS_PRODUCT_TOKEN);
+           g_string_append(auth_string, "\n");
+       }
 
-    if (is_non_empty_string(hdl->user_token)) {
-        g_string_append(auth_string, AMAZON_SECURITY_HEADER);
-        g_string_append(auth_string, ":");
-        g_string_append(auth_string, hdl->user_token);
-        g_string_append(auth_string, ",");
-        g_string_append(auth_string, STS_PRODUCT_TOKEN);
-        g_string_append(auth_string, "\n");
-    }
+       if (g_str_equal(verb,"PUT") &&
+           is_non_empty_string(hdl->server_side_encryption)) {
+           g_string_append(auth_string, AMAZON_SERVER_SIDE_ENCRYPTION_HEADER);
+           g_string_append(auth_string, ":");
+           g_string_append(auth_string, hdl->server_side_encryption);
+           g_string_append(auth_string, "\n");
+       }
 
-    /* CanonicalizedResource */
-    g_string_append(auth_string, "/");
-    if (bucket) {
-        if (use_subdomain)
-            g_string_append(auth_string, bucket);
-        else {
-            esc_bucket = curl_escape(bucket, 0);
-            if (!esc_bucket) goto cleanup;
-            g_string_append(auth_string, esc_bucket);
-        }
-    }
+       if (is_non_empty_string(hdl->storage_class)) {
+           g_string_append(auth_string, AMAZON_STORAGE_CLASS_HEADER);
+           g_string_append(auth_string, ":");
+           g_string_append(auth_string, hdl->storage_class);
+           g_string_append(auth_string, "\n");
+       }
 
-    if (bucket && (use_subdomain || key))
-        g_string_append(auth_string, "/");
+       /* CanonicalizedResource */
+       if (hdl->service_path) {
+           g_string_append(auth_string, hdl->service_path);
+       }
+       g_string_append(auth_string, "/");
+       if (bucket) {
+           if (hdl->use_subdomain)
+               g_string_append(auth_string, bucket);
+           else {
+               esc_bucket = curl_escape(bucket, 0);
+               if (!esc_bucket) goto cleanup;
+               g_string_append(auth_string, esc_bucket);
+           }
+       }
 
-    if (key) {
-            esc_key = curl_escape(key, 0);
-            if (!esc_key) goto cleanup;
-            g_string_append(auth_string, esc_key);
-    }
+       if (bucket && (hdl->use_subdomain || key))
+           g_string_append(auth_string, "/");
 
-    if (subresource) {
-        g_string_append(auth_string, "?");
-        g_string_append(auth_string, subresource);
-    }
+       if (key) {
+           esc_key = curl_escape(key, 0);
+           if (!esc_key) goto cleanup;
+           g_string_append(auth_string, esc_key);
+       }
 
-    /* run HMAC-SHA1 on the canonicalized string */
-    md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
-    HMAC_CTX_init(&ctx);
-    HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key), EVP_sha1(), NULL);
-    HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
-    HMAC_Final(&ctx, md->data, &md->len);
-    HMAC_CTX_cleanup(&ctx);
-    auth_base64 = s3_base64_encode(md);
-
-    /* append the new headers */
-    if (is_non_empty_string(hdl->user_token)) {
-        /* Devpay headers are included in hash. */
-        buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", hdl->user_token);
-        headers = curl_slist_append(headers, buf);
-        g_free(buf);
+       if (subresource) {
+           g_string_append(auth_string, "?");
+           g_string_append(auth_string, subresource);
+       }
 
-        buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s", STS_PRODUCT_TOKEN);
-        headers = curl_slist_append(headers, buf);
-        g_free(buf);
-    }
+       /* run HMAC-SHA1 on the canonicalized string */
+       md = g_byte_array_sized_new(EVP_MAX_MD_SIZE+1);
+       HMAC_CTX_init(&ctx);
+       HMAC_Init_ex(&ctx, hdl->secret_key, (int) strlen(hdl->secret_key),
+                    EVP_sha1(), NULL);
+       HMAC_Update(&ctx, (unsigned char*) auth_string->str, auth_string->len);
+       HMAC_Final(&ctx, md->data, &md->len);
+       HMAC_CTX_cleanup(&ctx);
+       auth_base64 = s3_base64_encode(md);
+       /* append the new headers */
+       if (is_non_empty_string(hdl->user_token)) {
+           /* Devpay headers are included in hash. */
+           buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
+                                 hdl->user_token);
+           headers = curl_slist_append(headers, buf);
+           g_free(buf);
+
+           buf = g_strdup_printf(AMAZON_SECURITY_HEADER ": %s",
+                                 STS_PRODUCT_TOKEN);
+           headers = curl_slist_append(headers, buf);
+           g_free(buf);
+       }
 
-    buf = g_strdup_printf("Authorization: AWS %s:%s",
+       if (g_str_equal(verb,"PUT") &&
+           is_non_empty_string(hdl->server_side_encryption)) {
+           buf = g_strdup_printf(AMAZON_SERVER_SIDE_ENCRYPTION_HEADER ": %s",
+                                 hdl->server_side_encryption);
+           headers = curl_slist_append(headers, buf);
+           g_free(buf);
+       }
+
+       if (is_non_empty_string(hdl->storage_class)) {
+           buf = g_strdup_printf(AMAZON_STORAGE_CLASS_HEADER ": %s",
+                                 hdl->storage_class);
+           headers = curl_slist_append(headers, buf);
+           g_free(buf);
+       }
+
+       buf = g_strdup_printf("Authorization: AWS %s:%s",
                           hdl->access_key, auth_base64);
-    headers = curl_slist_append(headers, buf);
-    g_free(buf);
+       headers = curl_slist_append(headers, buf);
+       g_free(buf);
+    }
 
     if (md5_hash && '\0' != md5_hash[0]) {
         buf = g_strdup_printf("Content-MD5: %s", md5_hash);
@@ -688,17 +801,128 @@ authenticate_request(S3Handle *hdl,
     buf = g_strdup_printf("Date: %s", date);
     headers = curl_slist_append(headers, buf);
     g_free(buf);
+
 cleanup:
     g_free(date);
     g_free(esc_bucket);
     g_free(esc_key);
-    g_byte_array_free(md, TRUE);
+    if (md) g_byte_array_free(md, TRUE);
     g_free(auth_base64);
-    g_string_free(auth_string, TRUE);
+    if (auth_string) g_string_free(auth_string, TRUE);
 
     return headers;
 }
 
+/* Functions for a SAX parser to parse the XML failure from Amazon */
+
+/* Private structure for our "thunk", which tracks where the user is in the list
+ *  * of keys. */
+struct failure_thunk {
+    gboolean want_text;
+
+    gboolean in_title;
+    gboolean in_body;
+    gboolean in_code;
+    gboolean in_message;
+    gint     in_others;
+
+    gchar *text;
+    gsize text_len;
+
+    gchar *message;
+    gchar *error_name;
+};
+
+static void
+failure_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
+                   const gchar *element_name,
+                   const gchar **attribute_names G_GNUC_UNUSED,
+                   const gchar **attribute_values G_GNUC_UNUSED,
+                   gpointer user_data,
+                   GError **error G_GNUC_UNUSED)
+{
+    struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+
+    if (g_ascii_strcasecmp(element_name, "title") == 0) {
+        thunk->in_title = 1;
+       thunk->in_others = 0;
+        thunk->want_text = 1;
+    } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
+        thunk->in_body = 1;
+       thunk->in_others = 0;
+        thunk->want_text = 1;
+    } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
+        thunk->in_code = 1;
+       thunk->in_others = 0;
+        thunk->want_text = 1;
+    } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
+        thunk->in_message = 1;
+       thunk->in_others = 0;
+        thunk->want_text = 1;
+    } else {
+       thunk->in_others++;
+    }
+}
+
+static void
+failure_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
+                 const gchar *element_name,
+                 gpointer user_data,
+                 GError **error G_GNUC_UNUSED)
+{
+    struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+
+    if (g_ascii_strcasecmp(element_name, "title") == 0) {
+       char *p = strchr(thunk->text, ' ');
+       if (p) {
+           p++;
+           if (*p) {
+               thunk->error_name = g_strdup(p);
+           }
+       }
+       g_free(thunk->text);
+       thunk->text = NULL;
+        thunk->in_title = 0;
+    } else if (g_ascii_strcasecmp(element_name, "body") == 0) {
+       thunk->message = thunk->text;
+       g_strstrip(thunk->message);
+       thunk->text = NULL;
+        thunk->in_body = 0;
+    } else if (g_ascii_strcasecmp(element_name, "code") == 0) {
+       thunk->error_name = thunk->text;
+       thunk->text = NULL;
+        thunk->in_code = 0;
+    } else if (g_ascii_strcasecmp(element_name, "message") == 0) {
+       thunk->message = thunk->text;
+       thunk->text = NULL;
+        thunk->in_message = 0;
+    } else {
+       thunk->in_others--;
+    }
+}
+
+static void
+failure_text(GMarkupParseContext *context G_GNUC_UNUSED,
+          const gchar *text,
+          gsize text_len,
+          gpointer user_data,
+          GError **error G_GNUC_UNUSED)
+{
+    struct failure_thunk *thunk = (struct failure_thunk *)user_data;
+
+    if (thunk->want_text && thunk->in_others == 0) {
+        char *new_text;
+
+        new_text = g_strndup(text, text_len);
+       if (thunk->text) {
+           strappend(thunk->text, new_text);
+           g_free(new_text);
+       } else {
+           thunk->text = new_text;
+       }
+    }
+}
+
 static gboolean
 interpret_response(S3Handle *hdl,
                    CURLcode curl_code,
@@ -709,10 +933,11 @@ interpret_response(S3Handle *hdl,
                    const char *content_md5)
 {
     long response_code = 0;
-    regmatch_t pmatch[2];
-    char *error_name = NULL, *message = NULL;
-    char *body_copy = NULL;
     gboolean ret = TRUE;
+    struct failure_thunk thunk;
+    GMarkupParseContext *ctxt = NULL;
+    static GMarkupParser parser = { failure_start_element, failure_end_element, failure_text, NULL, NULL };
+    GError *err = NULL;
 
     if (!hdl) return FALSE;
 
@@ -732,21 +957,19 @@ interpret_response(S3Handle *hdl,
 
     /* check ETag, if present */
     if (etag && content_md5 && 200 == response_code) {
-        if (etag && g_strcasecmp(etag, content_md5))
+        if (etag && g_ascii_strcasecmp(etag, content_md5))
             hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
         else
             ret = FALSE;
         return ret;
     }
-
     if (200 <= response_code && response_code < 400) {
         /* 2xx and 3xx codes won't have a response body we care about */
         hdl->last_s3_error_code = S3_ERROR_None;
         return FALSE;
     }
 
-    /* Now look at the body to try to get the actual Amazon error message. Rather
-     * than parse out the XML, just use some regexes. */
+    /* Now look at the body to try to get the actual Amazon error message. */
 
     /* impose a reasonable limit on body size */
     if (body_len > MAX_ERROR_RESPONSE_LEN) {
@@ -757,30 +980,81 @@ interpret_response(S3Handle *hdl,
         return TRUE; /* perhaps a network error; retry the request */
     }
 
-    /* use strndup to get a zero-terminated string */
-    body_copy = g_strndup(body, body_len);
-    if (!body_copy) goto cleanup;
+    thunk.in_title = FALSE;
+    thunk.in_body = FALSE;
+    thunk.in_code = FALSE;
+    thunk.in_message = FALSE;
+    thunk.in_others = 0;
+    thunk.text = NULL;
+    thunk.want_text = FALSE;
+    thunk.text_len = 0;
+    thunk.message = NULL;
+    thunk.error_name = NULL;
+
+    if (hdl->openstack_swift_api &&
+       !g_strstr_len(body, body_len, "xml version") &&
+       !g_strstr_len(body, body_len, "<html>")) {
+       char *body_copy = g_strndup(body, body_len);
+       char *b = body_copy;
+       char *p = strchr(b, '\n');
+       char *p1;
+       if (p) { /* first line: error code */
+           *p = '\0';
+           p++;
+           p1 = strchr(b, ' ');
+           if (p1) {
+               p1++;
+               if (*p1) {
+                   thunk.error_name = g_strdup(p1);
+               }
+           }
+           b = p;
+       }
+       p = strchr(b, '\n');
+       if (p) { /* second line: error message */
+           *p = '\0';
+           p++;
+            thunk.message = g_strdup(p);
+           g_strstrip(thunk.message);
+           b = p;
+       }
+       goto parsing_done;
+    }
+
+    /* run the parser over it */
+    ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
+    if (!g_markup_parse_context_parse(ctxt, body, body_len, &err)) {
+           if (hdl->last_message) g_free(hdl->last_message);
+           hdl->last_message = g_strdup(err->message);
+           goto cleanup;
+    }
 
-    if (!s3_regexec_wrap(&error_name_regex, body_copy, 2, pmatch, 0))
-        error_name = find_regex_substring(body_copy, pmatch[1]);
+    if (!g_markup_parse_context_end_parse(ctxt, &err)) {
+           if (hdl->last_message) g_free(hdl->last_message);
+           hdl->last_message = g_strdup(err->message);
+           goto cleanup;
+    }
 
-    if (!s3_regexec_wrap(&message_regex, body_copy, 2, pmatch, 0))
-        message = find_regex_substring(body_copy, pmatch[1]);
+    g_markup_parse_context_free(ctxt);
+    ctxt = NULL;
 
-    if (error_name) {
-        hdl->last_s3_error_code = s3_error_code_from_name(error_name);
+parsing_done:
+    if (thunk.error_name) {
+        hdl->last_s3_error_code = s3_error_code_from_name(thunk.error_name);
+       g_free(thunk.error_name);
+       thunk.error_name = NULL;
     }
 
-    if (message) {
-        hdl->last_message = message;
-        message = NULL; /* steal the reference to the string */
+    if (thunk.message) {
+       g_free(hdl->last_message);
+        hdl->last_message = thunk.message;
+        thunk.message = NULL; /* steal the reference to the string */
     }
 
 cleanup:
-    g_free(body_copy);
-    g_free(message);
-    g_free(error_name);
-
+    g_free(thunk.text);
+    g_free(thunk.message);
+    g_free(thunk.error_name);
     return FALSE;
 }
 
@@ -885,7 +1159,7 @@ size_t
 s3_counter_write_func(G_GNUC_UNUSED void *ptr, size_t size, size_t nmemb, void *stream)
 {
     gint64 *count = (gint64*) stream, inc = nmemb*size;
-    
+
     if (count) *count += inc;
     return inc;
 }
@@ -990,7 +1264,17 @@ curl_debug_message(CURL *curl G_GNUC_UNUSED,
     case CURLINFO_HEADER_OUT:
         lineprefix="Hdr Out: ";
         break;
+/*
+    case CURLINFO_DATA_IN:
+       if (len > 1000) return 0;
+        lineprefix="Data In: ";
+        break;
 
+    case CURLINFO_DATA_OUT:
+       if (len > 1000) return 0;
+        lineprefix="Data Out: ";
+        break;
+*/
     default:
         /* ignore data in/out -- nobody wants to see that in the
          * debug logs! */
@@ -1030,13 +1314,13 @@ perform_request(S3Handle *hdl,
                 gpointer progress_data,
                 const result_handling_t *result_handling)
 {
-    gboolean use_subdomain;
     char *url = NULL;
     s3_result_t result = S3_RESULT_FAIL; /* assume the worst.. */
     CURLcode curl_code = CURLE_OK;
     char curl_error_buffer[CURL_ERROR_SIZE] = "";
     struct curl_slist *headers = NULL;
-    S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL};
+    /* Set S3Internal Data */
+    S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
     gboolean should_retry;
     guint retries = 0;
     gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
@@ -1053,8 +1337,7 @@ perform_request(S3Handle *hdl,
 
     s3_reset(hdl);
 
-    use_subdomain = is_non_empty_string(hdl->bucket_location);
-    url = build_url(bucket, key, subresource, query, use_subdomain, hdl->use_ssl);
+    url = build_url(hdl, bucket, key, subresource, query);
     if (!url) goto cleanup;
 
     /* libcurl may behave strangely if these are not set correctly */
@@ -1112,7 +1395,7 @@ perform_request(S3Handle *hdl,
 
         /* set up the request */
         headers = authenticate_request(hdl, verb, bucket, key, subresource,
-            md5_hash_b64, is_non_empty_string(hdl->bucket_location));
+            md5_hash_b64);
 
         if (hdl->use_ssl && hdl->ca_info) {
             if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_CAINFO, hdl->ca_info)))
@@ -1169,7 +1452,7 @@ perform_request(S3Handle *hdl,
                    goto curl_error;
 
            if (hdl->max_recv_speed)
-               if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_SEND_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
+               if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_MAX_RECV_SPEED_LARGE, (curl_off_t)hdl->max_recv_speed)))
                    goto curl_error;
        }
 #endif
@@ -1301,16 +1584,54 @@ static size_t
 s3_internal_header_func(void *ptr, size_t size, size_t nmemb, void * stream)
 {
     static const char *final_header = "\r\n";
+    time_t remote_time_in_sec,local_time;
     char *header;
     regmatch_t pmatch[2];
     S3InternalData *data = (S3InternalData *) stream;
 
     header = g_strndup((gchar *) ptr, (gsize) size*nmemb);
+
+    if (header[strlen(header)-1] == '\n')
+       header[strlen(header)-1] = '\0';
+    if (header[strlen(header)-1] == '\r')
+       header[strlen(header)-1] = '\0';
     if (!s3_regexec_wrap(&etag_regex, header, 2, pmatch, 0))
-            data->etag = find_regex_substring(header, pmatch[1]);
-    if (!strcmp(final_header, header))
+        data->etag = find_regex_substring(header, pmatch[1]);
+    if (!s3_regexec_wrap(&x_auth_token_regex, header, 2, pmatch, 0))
+       data->hdl->x_auth_token = find_regex_substring(header, pmatch[1]);
+
+    if (!s3_regexec_wrap(&x_storage_url_regex, header, 2, pmatch, 0))
+       data->hdl->x_storage_url = find_regex_substring(header, pmatch[1]);
+
+    if (strlen(header) == 0)
+       data->headers_done = TRUE;
+    if (g_str_equal(final_header, header))
         data->headers_done = TRUE;
+    if (g_str_equal("\n", header))
+       data->headers_done = TRUE;
+
+    /* If date header is found */
+    if (!s3_regexec_wrap(&date_sync_regex, header, 2, pmatch, 0)){
+        char *date = find_regex_substring(header, pmatch[1]);
+
+        /* Remote time is always in GMT: RFC 2616 */
+        /* both curl_getdate and time operate in UTC, so no timezone math is necessary */
+        if ( (remote_time_in_sec = curl_getdate(date, NULL)) < 0 ){
+            g_debug("Error: Conversion of remote time to seconds failed.");
+            data->hdl->time_offset_with_s3 = 0;
+        }else{
+            local_time = time(NULL);
+            /* Offset time */
+            data->hdl->time_offset_with_s3 = remote_time_in_sec - local_time;
+
+           if (data->hdl->verbose)
+               g_debug("Time Offset (remote - local) :%ld",(long)data->hdl->time_offset_with_s3);
+        }
 
+        g_free(date);
+    }
+
+    g_free(header);
     return size*nmemb;
 }
 
@@ -1323,19 +1644,22 @@ compile_regexes(void)
   struct {const char * str; int flags; regex_t *regex;} regexes[] = {
         {"<Code>[[:space:]]*([^<]*)[[:space:]]*</Code>", REG_EXTENDED | REG_ICASE, &error_name_regex},
         {"^ETag:[[:space:]]*\"([^\"]+)\"[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &etag_regex},
+        {"^X-Auth-Token:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_auth_token_regex},
+        {"^X-Storage-Url:[[:space:]]*([^ ]+)[[:space:]]*$", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &x_storage_url_regex},
         {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
         {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
         {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
+        {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
         {NULL, 0, NULL}
     };
     char regmessage[1024];
-    int size, i;
+    int i;
     int reg_result;
 
     for (i = 0; regexes[i].str; i++) {
         reg_result = regcomp(regexes[i].regex, regexes[i].str, regexes[i].flags);
         if (reg_result != 0) {
-            size = regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
+            regerror(reg_result, regexes[i].regex, regmessage, sizeof(regmessage));
             g_error(_("Regex error: %s"), regmessage);
             return FALSE;
         }
@@ -1358,6 +1682,9 @@ compile_regexes(void)
         {"(/>)|(>([^<]*)</LocationConstraint>)",
          G_REGEX_CASELESS,
          &location_con_regex},
+        {"^Date:(.*)\\r",
+         G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
+         &date_sync_regex},
         {NULL, 0, NULL}
   };
   int i;
@@ -1410,13 +1737,41 @@ s3_bucket_location_compat(const char *bucket)
     return !s3_regexec_wrap(&subdomain_regex, bucket, 0, NULL, 0);
 }
 
+static gboolean
+get_openstack_swift_api_setting(
+       S3Handle *hdl)
+{
+    s3_result_t result = S3_RESULT_FAIL;
+    static result_handling_t result_handling[] = {
+       { 200,  0,                    0, S3_RESULT_OK },
+       RESULT_HANDLING_ALWAYS_RETRY,
+       { 0, 0,                       0, /* default: */ S3_RESULT_FAIL  }
+       };
+
+    s3_verbose(hdl, 1);
+    result = perform_request(hdl, "GET", NULL, NULL, NULL, NULL,
+                             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+                             NULL, NULL, result_handling);
+
+    return result == S3_RESULT_OK;
+
+}
+
 S3Handle *
 s3_open(const char *access_key,
         const char *secret_key,
+        const char *swift_account_id,
+        const char *swift_access_key,
+        const char *host,
+        const char *service_path,
+        const gboolean use_subdomain,
         const char *user_token,
         const char *bucket_location,
-        const char *ca_info
-        ) {
+        const char *storage_class,
+        const char *ca_info,
+        const char *server_side_encryption,
+        const gboolean openstack_swift_api)
+{
     S3Handle *hdl;
 
     hdl = g_new0(S3Handle, 1);
@@ -1425,22 +1780,65 @@ s3_open(const char *access_key,
     hdl->verbose = FALSE;
     hdl->use_ssl = s3_curl_supports_ssl();
 
-    g_assert(access_key);
-    hdl->access_key = g_strdup(access_key);
-    g_assert(secret_key);
-    hdl->secret_key = g_strdup(secret_key);
+    if (!openstack_swift_api) {
+       g_assert(access_key);
+       hdl->access_key = g_strdup(access_key);
+       g_assert(secret_key);
+       hdl->secret_key = g_strdup(secret_key);
+    } else {
+       g_assert(swift_account_id);
+       hdl->swift_account_id = g_strdup(swift_account_id);
+       g_assert(swift_access_key);
+       hdl->swift_access_key = g_strdup(swift_access_key);
+    }
+
     /* NULL is okay */
     hdl->user_token = g_strdup(user_token);
 
     /* NULL is okay */
     hdl->bucket_location = g_strdup(bucket_location);
 
+    /* NULL is ok */
+    hdl->storage_class = g_strdup(storage_class);
+
+    /* NULL is ok */
+    hdl->server_side_encryption = g_strdup(server_side_encryption);
+
     /* NULL is okay */
     hdl->ca_info = g_strdup(ca_info);
 
+    if (!is_non_empty_string(host))
+       host = "s3.amazonaws.com";
+    hdl->host = g_ascii_strdown(host, -1);
+    hdl->use_subdomain = use_subdomain ||
+                        (strcmp(hdl->host, "s3.amazonaws.com") == 0 &&
+                         is_non_empty_string(hdl->bucket_location));
+    hdl->openstack_swift_api = openstack_swift_api;
+    if (service_path) {
+       if (strlen(service_path) == 0 ||
+           (strlen(service_path) == 1 && service_path[0] == '/')) {
+           hdl->service_path = NULL;
+       } else if (service_path[0] != '/') {
+           hdl->service_path = g_strdup_printf("/%s", service_path);
+       } else {
+           hdl->service_path = g_strdup(service_path);
+       }
+       if (hdl->service_path) {
+           /* remove trailling / */
+           size_t len = strlen(hdl->service_path) - 1;
+           if (hdl->service_path[len] == '/')
+               hdl->service_path[len] = '\0';
+       }
+    } else {
+       hdl->service_path = NULL;
+    }
+
     hdl->curl = curl_easy_init();
     if (!hdl->curl) goto error;
 
+    if (openstack_swift_api) { /* get the X-Storage-Url and X-Auth-Token */
+       get_openstack_swift_api_setting(hdl);
+    }
     return hdl;
 
 error:
@@ -1456,8 +1854,14 @@ s3_free(S3Handle *hdl)
     if (hdl) {
         g_free(hdl->access_key);
         g_free(hdl->secret_key);
+        g_free(hdl->swift_account_id);
+        g_free(hdl->swift_access_key);
         if (hdl->user_token) g_free(hdl->user_token);
         if (hdl->bucket_location) g_free(hdl->bucket_location);
+        if (hdl->storage_class) g_free(hdl->storage_class);
+        if (hdl->server_side_encryption) g_free(hdl->server_side_encryption);
+        if (hdl->host) g_free(hdl->host);
+        if (hdl->service_path) g_free(hdl->service_path);
         if (hdl->curl) curl_easy_cleanup(hdl->curl);
 
         g_free(hdl);
@@ -1612,6 +2016,7 @@ s3_upload(S3Handle *hdl,
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
         { 200,  0, 0, S3_RESULT_OK },
+        { 201,  0, 0, S3_RESULT_OK },
         RESULT_HANDLING_ALWAYS_RETRY,
         { 0,    0, 0, /* default: */ S3_RESULT_FAIL }
         };
@@ -1637,6 +2042,7 @@ struct list_keys_thunk {
 
     gboolean is_truncated;
     gchar *next_marker;
+    guint64 size;
 
     gboolean want_text;
 
@@ -1657,17 +2063,24 @@ list_start_element(GMarkupParseContext *context G_GNUC_UNUSED,
     struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
 
     thunk->want_text = 0;
-    if (g_strcasecmp(element_name, "contents") == 0) {
+    if (g_ascii_strcasecmp(element_name, "contents") == 0 ||
+       g_ascii_strcasecmp(element_name, "object") == 0) {
         thunk->in_contents = 1;
-    } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
+    } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
         thunk->in_common_prefixes = 1;
-    } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
+    } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
         thunk->want_text = 1;
-    } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
+    } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
+               g_ascii_strcasecmp(element_name, "name") == 0) &&
+              thunk->in_contents) {
         thunk->want_text = 1;
-    } else if (g_strcasecmp(element_name, "istruncated")) {
+    } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
+               g_ascii_strcasecmp(element_name, "bytes") == 0) &&
+              thunk->in_contents) {
         thunk->want_text = 1;
-    } else if (g_strcasecmp(element_name, "nextmarker")) {
+    } else if (g_ascii_strcasecmp(element_name, "istruncated")) {
+        thunk->want_text = 1;
+    } else if (g_ascii_strcasecmp(element_name, "nextmarker")) {
         thunk->want_text = 1;
     }
 }
@@ -1680,20 +2093,31 @@ list_end_element(GMarkupParseContext *context G_GNUC_UNUSED,
 {
     struct list_keys_thunk *thunk = (struct list_keys_thunk *)user_data;
 
-    if (g_strcasecmp(element_name, "contents") == 0) {
+    if (g_ascii_strcasecmp(element_name, "contents") == 0) {
         thunk->in_contents = 0;
-    } else if (g_strcasecmp(element_name, "commonprefixes") == 0) {
+    } else if (g_ascii_strcasecmp(element_name, "commonprefixes") == 0) {
         thunk->in_common_prefixes = 0;
-    } else if (g_strcasecmp(element_name, "key") == 0 && thunk->in_contents) {
+    } else if ((g_ascii_strcasecmp(element_name, "key") == 0 ||
+               g_ascii_strcasecmp(element_name, "name") == 0) &&
+              thunk->in_contents) {
         thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
+       if (thunk->is_truncated) {
+           if (thunk->next_marker) g_free(thunk->next_marker);
+           thunk->next_marker = g_strdup(thunk->text);
+       }
         thunk->text = NULL;
-    } else if (g_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
+    } else if ((g_ascii_strcasecmp(element_name, "size") == 0 ||
+               g_ascii_strcasecmp(element_name, "bytes") == 0) &&
+              thunk->in_contents) {
+        thunk->size += g_ascii_strtoull (thunk->text, NULL, 10);
+        thunk->text = NULL;
+    } else if (g_ascii_strcasecmp(element_name, "prefix") == 0 && thunk->in_common_prefixes) {
         thunk->filename_list = g_slist_prepend(thunk->filename_list, thunk->text);
         thunk->text = NULL;
-    } else if (g_strcasecmp(element_name, "istruncated") == 0) {
-        if (thunk->text && g_strncasecmp(thunk->text, "false", 5) != 0)
+    } else if (g_ascii_strcasecmp(element_name, "istruncated") == 0) {
+        if (thunk->text && g_ascii_strncasecmp(thunk->text, "false", 5) != 0)
             thunk->is_truncated = TRUE;
-    } else if (g_strcasecmp(element_name, "nextmarker") == 0) {
+    } else if (g_ascii_strcasecmp(element_name, "nextmarker") == 0) {
         if (thunk->next_marker) g_free(thunk->next_marker);
         thunk->next_marker = thunk->text;
         thunk->text = NULL;
@@ -1729,6 +2153,7 @@ list_fetch(S3Handle *hdl,
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
         { 200, 0, 0, S3_RESULT_OK },
+        { 204, 0, 0, S3_RESULT_OK },
         RESULT_HANDLING_ALWAYS_RETRY,
         { 0,   0, 0, /* default: */ S3_RESULT_FAIL  }
         };
@@ -1747,15 +2172,25 @@ list_fetch(S3Handle *hdl,
     /* loop over possible parts to build query string */
     query = g_string_new("");
     for (i = 0; pos_parts[i][0]; i++) {
-      if (pos_parts[i][1]) {
-          if (have_prev_part)
-              g_string_append(query, "&");
-          else
-              have_prev_part = TRUE;
-          esc_value = curl_escape(pos_parts[i][1], 0);
-          g_string_append_printf(query, "%s=%s", pos_parts[i][0], esc_value);
-          curl_free(esc_value);
-      }
+       if (pos_parts[i][1]) {
+           const char *keyword;
+            if (have_prev_part)
+               g_string_append(query, "&");
+            else
+               have_prev_part = TRUE;
+            esc_value = curl_escape(pos_parts[i][1], 0);
+           keyword = pos_parts[i][0];
+           if (hdl->openstack_swift_api && strcmp(keyword, "max-keys") == 0) {
+               keyword = "limit";
+           }
+            g_string_append_printf(query, "%s=%s", keyword, esc_value);
+            curl_free(esc_value);
+       }
+    }
+    if (hdl->openstack_swift_api) {
+       if (have_prev_part)
+           g_string_append(query, "&");
+       g_string_append(query, "format=xml");
     }
 
     /* and perform the request on that URI */
@@ -1774,7 +2209,8 @@ s3_list_keys(S3Handle *hdl,
               const char *bucket,
               const char *prefix,
               const char *delimiter,
-              GSList **list)
+              GSList **list,
+              guint64 *total_size)
 {
     /*
      * max len of XML variables:
@@ -1801,6 +2237,7 @@ s3_list_keys(S3Handle *hdl,
     thunk.filename_list = NULL;
     thunk.text = NULL;
     thunk.next_marker = NULL;
+    thunk.size = 0;
 
     /* Loop until S3 has given us the entire picture */
     do {
@@ -1808,11 +2245,14 @@ s3_list_keys(S3Handle *hdl,
         /* get some data from S3 */
         result = list_fetch(hdl, bucket, prefix, delimiter, thunk.next_marker, MAX_KEYS, &buf);
         if (result != S3_RESULT_OK) goto cleanup;
+       if (buf.buffer_pos == 0) goto cleanup; /* no body */
 
         /* run the parser over it */
         thunk.in_contents = FALSE;
         thunk.in_common_prefixes = FALSE;
         thunk.is_truncated = FALSE;
+        if (thunk.next_marker) g_free(thunk.next_marker);
+       thunk.next_marker = NULL;
         thunk.want_text = FALSE;
 
         ctxt = g_markup_parse_context_new(&parser, 0, (gpointer)&thunk, NULL);
@@ -1847,6 +2287,9 @@ cleanup:
         return FALSE;
     } else {
         *list = thunk.filename_list;
+        if(total_size) {
+            *total_size = thunk.size;
+        }
         return TRUE;
     }
 }
@@ -1870,7 +2313,6 @@ s3_read(S3Handle *hdl,
 
     g_assert(hdl != NULL);
     g_assert(write_func != NULL);
-
     result = perform_request(hdl, "GET", bucket, key, NULL, NULL,
         NULL, NULL, NULL, NULL, NULL, write_func, reset_func, write_data,
         progress_func, progress_data, result_handling);
@@ -1886,8 +2328,10 @@ s3_delete(S3Handle *hdl,
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
         { 204,  0,                     0, S3_RESULT_OK },
+        { 404,  0,                     0, S3_RESULT_OK },
         { 404,  S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
         RESULT_HANDLING_ALWAYS_RETRY,
+        { 409,  0,                     0, S3_RESULT_OK },
         { 0,    0,                     0, /* default: */ S3_RESULT_FAIL  }
         };
 
@@ -1908,6 +2352,9 @@ s3_make_bucket(S3Handle *hdl,
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
         { 200,  0,                    0, S3_RESULT_OK },
+        { 201,  0,                    0, S3_RESULT_OK },
+        { 202,  0,                    0, S3_RESULT_OK },
+        { 204,  0,                    0, S3_RESULT_OK },
         { 404, S3_ERROR_NoSuchBucket, 0, S3_RESULT_RETRY },
         RESULT_HANDLING_ALWAYS_RETRY,
         { 0, 0,                       0, /* default: */ S3_RESULT_FAIL  }
@@ -1926,7 +2373,11 @@ s3_make_bucket(S3Handle *hdl,
         0 != strcmp(AMAZON_WILDCARD_LOCATION, hdl->bucket_location)) {
         if (s3_bucket_location_compat(bucket)) {
             ptr = &buf;
-            buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE, hdl->bucket_location);
+            buf.buffer = g_strdup_printf(AMAZON_BUCKET_CONF_TEMPLATE,
+                g_str_equal(hdl->host, "gss.iijgio.com")?
+                       " xmlns=\"http://acs.iijgio.com/doc/2006-03-01/\"":
+                       "",
+               hdl->bucket_location);
             buf.buffer_len = (guint) strlen(buf.buffer);
             buf.buffer_pos = 0;
             buf.max_buffer_size = buf.buffer_len;
@@ -1947,18 +2398,21 @@ s3_make_bucket(S3Handle *hdl,
                  NULL, NULL, NULL, NULL, NULL, result_handling);
 
    if (result == S3_RESULT_OK ||
-       (is_non_empty_string(hdl->bucket_location) && result != S3_RESULT_OK
-         && hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
+       (result != S3_RESULT_OK &&
+        hdl->last_s3_error_code == S3_ERROR_BucketAlreadyOwnedByYou)) {
         /* verify the that the location constraint on the existing bucket matches
          * the one that's configured.
          */
-        result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
-                                 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-                                 NULL, NULL, result_handling);
+       if (is_non_empty_string(hdl->bucket_location)) {
+            result = perform_request(hdl, "GET", bucket, NULL, "location", NULL,
+                                     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+                                     NULL, NULL, result_handling);
+       } else {
+            result = perform_request(hdl, "GET", bucket, NULL, NULL, NULL,
+                                     NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+                                     NULL, NULL, result_handling);
+       }
 
-        /* note that we can check only one of the three AND conditions above 
-         * and infer that the others are true
-         */
         if (result == S3_RESULT_OK && is_non_empty_string(hdl->bucket_location)) {
             /* return to the default state of failure */
             result = S3_RESULT_FAIL;
@@ -2005,6 +2459,26 @@ cleanup:
 
 }
 
+gboolean
+s3_is_bucket_exists(S3Handle *hdl,
+                     const char *bucket)
+{
+    s3_result_t result = S3_RESULT_FAIL;
+    static result_handling_t result_handling[] = {
+        { 200,  0,                    0, S3_RESULT_OK },
+        { 204,  0,                    0, S3_RESULT_OK },
+        RESULT_HANDLING_ALWAYS_RETRY,
+        { 0, 0,                       0, /* default: */ S3_RESULT_FAIL  }
+        };
+
+    result = perform_request(hdl, "GET", bucket, NULL, NULL,
+                            hdl->openstack_swift_api?"limit=1":"max-keys=1",
+                             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+                             NULL, NULL, result_handling);
+
+    return result == S3_RESULT_OK;
+}
+
 gboolean
 s3_delete_bucket(S3Handle *hdl,
                  const char *bucket)