Imported Upstream version 3.3.3
[debian/amanda] / device-src / s3.c
index a3c030b58d52c746d3868b3238754c1466fc28da..453fabc946dc2dde4612d7fec91c3921316e1377 100644 (file)
@@ -1,9 +1,10 @@
 /*
  * Copyright (c) 2008-2012 Zmanda, Inc.  All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
@@ -186,6 +187,10 @@ struct S3Handle {
     char *content_type;
 
     gboolean reuse_connection;
+
+    /* CAStor */
+    char *reps;
+    char *reps_bucket;
 };
 
 typedef struct {
@@ -761,7 +766,7 @@ build_url(
     }
 
     /* query string */
-    if (subresource || query)
+    if (subresource || query || (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name))
         g_string_append(url, "?");
 
     if (subresource)
@@ -773,6 +778,14 @@ build_url(
     if (query)
         g_string_append(url, query);
 
+    /* add CAStor tenant domain override query arg */
+    if (hdl->s3_api == S3_API_CASTOR && hdl->tenant_name) {
+        if (subresource || query) {
+            g_string_append(url, "&");
+        }
+        g_string_append_printf(url, "domain=%s", hdl->tenant_name);
+    }
+
 cleanup:
 
     return g_string_free(url, FALSE);
@@ -799,6 +812,7 @@ authenticate_request(S3Handle *hdl,
     struct curl_slist *headers = NULL;
     char *esc_bucket = NULL, *esc_key = NULL;
     GString *auth_string = NULL;
+    char *reps = NULL;
 
     /* From RFC 2616 */
     static const char *wkday[] = {"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"};
@@ -850,6 +864,23 @@ authenticate_request(S3Handle *hdl,
             headers = curl_slist_append(headers, buf);
             g_free(buf);
        }
+    } else if (hdl->s3_api == S3_API_CASTOR) {
+        if (g_str_equal(verb, "PUT") || g_str_equal(verb, "POST")) {
+            if (key) {
+                buf = g_strdup("CAStor-Application: Amanda");
+                headers = curl_slist_append(headers, buf);
+                g_free(buf);
+                reps = g_strdup(hdl->reps); /* object replication level */
+            } else {
+                reps = g_strdup(hdl->reps_bucket); /* bucket replication level */
+            }
+
+            /* set object replicas in lifepoint */
+            buf = g_strdup_printf("lifepoint: [] reps=%s", reps);
+            headers = curl_slist_append(headers, buf);
+            g_free(buf);
+            g_free(reps);
+        }
     } else {
        /* Build the string to sign, per the S3 spec.
         * See: "Authenticating REST Requests" - API Version 2006-03-01 pg 58
@@ -1249,8 +1280,9 @@ interpret_response(S3Handle *hdl,
     curl_easy_getinfo(hdl->curl, CURLINFO_RESPONSE_CODE, &response_code);
     hdl->last_response_code = response_code;
 
-    /* check ETag, if present */
-    if (etag && content_md5 && 200 == response_code) {
+    /* check ETag, if present and not CAStor */
+    if (etag && content_md5 && 200 == response_code &&
+        hdl->s3_api != S3_API_CASTOR) {
         if (etag && g_ascii_strcasecmp(etag, content_md5))
             hdl->last_message = g_strdup("S3 Error: Possible data corruption (ETag returned by Amazon did not match the MD5 hash of the data sent)");
         else
@@ -1357,6 +1389,10 @@ interpret_response(S3Handle *hdl,
        g_free(details);
        g_free(body_copy);
        return FALSE;
+    } else if (hdl->s3_api == S3_API_CASTOR) {
+       /* The error mesage is the body */
+        hdl->last_message = g_strndup(body, body_len);
+        return FALSE;
     } else if (!hdl->content_type ||
               !g_str_equal(hdl->content_type, "application/xml")) {
        return FALSE;
@@ -1700,7 +1736,8 @@ perform_request(S3Handle *hdl,
     /* Set S3Internal Data */
     S3InternalData int_writedata = {{NULL, 0, 0, MAX_ERROR_RESPONSE_LEN}, NULL, NULL, NULL, FALSE, FALSE, NULL, hdl};
     gboolean should_retry;
-    guint retries = 0;
+    gint retries = 0;
+    gint retry_after_close = 0;
     gulong backoff = EXPONENTIAL_BACKOFF_START_USEC;
     /* corresponds to PUT, HEAD, GET, and POST */
     int curlopt_upload = 0, curlopt_nobody = 0, curlopt_httpget = 0, curlopt_post = 0;
@@ -1898,7 +1935,7 @@ perform_request(S3Handle *hdl,
        }
 
        if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FRESH_CONNECT,
-               (long)(hdl->reuse_connection? 0 : 1)))) {
+               (long)(hdl->reuse_connection && retry_after_close == 0 ? 0 : 1)))) {
            goto curl_error;
        }
        if ((curl_code = curl_easy_setopt(hdl->curl, CURLOPT_FORBID_REUSE,
@@ -1930,6 +1967,13 @@ perform_request(S3Handle *hdl,
                 break;
         }
 
+        if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES &&
+           retry_after_close < 3 &&
+           hdl->last_s3_error_code == S3_ERROR_RequestTimeout) {
+           retries = -1;
+           retry_after_close++;
+           g_debug("Retry on a new connection");
+       }
         if (retries >= EXPONENTIAL_BACKOFF_MAX_RETRIES) {
             /* we're out of retries, so annotate hdl->last_message appropriately and bail
              * out. */
@@ -2079,7 +2123,7 @@ compile_regexes(void)
         {"<Message>[[:space:]]*([^<]*)[[:space:]]*</Message>", REG_EXTENDED | REG_ICASE, &message_regex},
         {"^[a-z0-9](-*[a-z0-9]){2,62}$", REG_EXTENDED | REG_NOSUB, &subdomain_regex},
         {"(/>)|(>([^<]*)</LocationConstraint>)", REG_EXTENDED | REG_ICASE, &location_con_regex},
-        {"^Date:(.*)\r",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
+        {"^Date:(.*)$",REG_EXTENDED | REG_ICASE | REG_NEWLINE, &date_sync_regex},
         {"\"access_token\" : \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &access_token_regex},
        {"\"expires_in\" : (.*)", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &expires_in_regex},
         {"\"details\": \"([^\"]*)\",", REG_EXTENDED | REG_ICASE | REG_NEWLINE, &details_regex},
@@ -2125,7 +2169,7 @@ compile_regexes(void)
         {"(/>)|(>([^<]*)</LocationConstraint>)",
          G_REGEX_CASELESS,
          &location_con_regex},
-        {"^Date:(.*)\\r",
+        {"^Date:(.*)$",
          G_REGEX_OPTIMIZE | G_REGEX_CASELESS,
          &date_sync_regex},
         {"\"access_token\" : \"([^\"]*)\"",
@@ -2289,7 +2333,9 @@ s3_open(const char *access_key,
        const char *client_id,
        const char *client_secret,
        const char *refresh_token,
-       const gboolean reuse_connection)
+       const gboolean reuse_connection,
+        const char *reps,
+        const char *reps_bucket)
 {
     S3Handle *hdl;
 
@@ -2323,6 +2369,12 @@ s3_open(const char *access_key,
        hdl->client_id = g_strdup(client_id);
        hdl->client_secret = g_strdup(client_secret);
        hdl->refresh_token = g_strdup(refresh_token);
+    } else if (s3_api == S3_API_CASTOR) {
+       hdl->username = g_strdup(username);
+       hdl->password = g_strdup(password);
+       hdl->tenant_name = g_strdup(tenant_name);
+        hdl->reps = g_strdup(reps);
+        hdl->reps_bucket = g_strdup(reps_bucket);
     }
 
     /* NULL is okay */
@@ -2372,6 +2424,29 @@ s3_open(const char *access_key,
     hdl->curl = curl_easy_init();
     if (!hdl->curl) goto error;
 
+    /* Set HTTP handling options for CAStor */
+    if (s3_api == S3_API_CASTOR) {
+#if LIBCURL_VERSION_NUM >= 0x071301
+       curl_version_info_data *info;
+       /* check the runtime version too */
+       info = curl_version_info(CURLVERSION_NOW);
+       if (info->version_num >= 0x071301) {
+            curl_easy_setopt(hdl->curl, CURLOPT_FOLLOWLOCATION, 1);
+            curl_easy_setopt(hdl->curl, CURLOPT_UNRESTRICTED_AUTH, 1);
+            curl_easy_setopt(hdl->curl, CURLOPT_MAXREDIRS, 5);
+            curl_easy_setopt(hdl->curl, CURLOPT_POSTREDIR, CURL_REDIR_POST_ALL);
+            curl_easy_setopt(hdl->curl, CURLOPT_HTTP_VERSION,
+                                       CURL_HTTP_VERSION_1_1);
+            if (hdl->username)
+                curl_easy_setopt(hdl->curl, CURLOPT_USERNAME, hdl->username);
+            if (hdl->password)
+                curl_easy_setopt(hdl->curl, CURLOPT_PASSWORD, hdl->password);
+            curl_easy_setopt(hdl->curl, CURLOPT_HTTPAUTH,
+                            (CURLAUTH_BASIC | CURLAUTH_DIGEST));
+       }
+#endif
+    }
+
     return hdl;
 
 error:
@@ -2585,10 +2660,17 @@ s3_upload(S3Handle *hdl,
         RESULT_HANDLING_ALWAYS_RETRY,
         { 0,    0, 0, /* default: */ S3_RESULT_FAIL }
         };
+    char *verb = "PUT";
+    char *content_type = NULL;
 
     g_assert(hdl != NULL);
 
-    result = perform_request(hdl, "PUT", bucket, key, NULL, NULL, NULL, NULL,
+    if (hdl->s3_api == S3_API_CASTOR) {
+        verb = "POST";
+       content_type = "application/x-amanda-backup-data";
+    }
+
+    result = perform_request(hdl, verb, bucket, key, NULL, NULL, content_type, NULL,
                  read_func, reset_func, size_func, md5_func, read_data,
                  NULL, NULL, NULL, progress_func, progress_data,
                  result_handling);
@@ -2749,12 +2831,17 @@ list_fetch(S3Handle *hdl,
                 hdl->s3_api == S3_API_SWIFT_2) &&
                strcmp(keyword, "max-keys") == 0) {
                keyword = "limit";
-           }
+           } else if ((hdl->s3_api == S3_API_CASTOR) &&
+                strcmp(keyword, "max-keys") == 0) {
+                keyword = "size";
+            }
             g_string_append_printf(query, "%s=%s", keyword, esc_value);
             curl_free(esc_value);
        }
     }
-    if (hdl->s3_api == S3_API_SWIFT_1 || hdl->s3_api == S3_API_SWIFT_2) {
+    if (hdl->s3_api == S3_API_SWIFT_1 ||
+        hdl->s3_api == S3_API_SWIFT_2 ||
+        hdl->s3_api == S3_API_CASTOR) {
        if (have_prev_part)
            g_string_append(query, "&");
        g_string_append(query, "format=xml");
@@ -2896,6 +2983,7 @@ s3_delete(S3Handle *hdl,
 {
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
+        { 200,  0,                     0, S3_RESULT_OK },
         { 204,  0,                     0, S3_RESULT_OK },
         { 404,  0,                     0, S3_RESULT_OK },
         { 404,  S3_ERROR_NoSuchBucket, 0, S3_RESULT_OK },
@@ -2975,6 +3063,8 @@ s3_make_bucket(S3Handle *hdl,
               const char *project_id)
 {
     char *body = NULL;
+    char *verb = "PUT";
+    char *content_type = NULL;
     s3_result_t result = S3_RESULT_FAIL;
     static result_handling_t result_handling[] = {
         { 200,  0,                    0, S3_RESULT_OK },
@@ -3019,7 +3109,12 @@ s3_make_bucket(S3Handle *hdl,
         }
     }
 
-    result = perform_request(hdl, "PUT", bucket, NULL, NULL, NULL, NULL,
+    if (hdl->s3_api == S3_API_CASTOR) {
+        verb = "POST";
+        content_type = "application/castorcontext";
+    }
+
+    result = perform_request(hdl, verb, bucket, NULL, NULL, NULL, content_type,
                 project_id,
                  read_func, reset_func, size_func, md5_func, ptr,
                  NULL, NULL, NULL, NULL, NULL, result_handling);
@@ -3044,7 +3139,6 @@ s3_make_bucket(S3Handle *hdl,
             /* return to the default state of failure */
             result = S3_RESULT_FAIL;
 
-            if (body) g_free(body);
             /* use strndup to get a null-terminated string */
             body = g_strndup(hdl->last_response_body, hdl->last_response_body_size);
             if (!body) {
@@ -3171,6 +3265,8 @@ s3_is_bucket_exists(S3Handle *hdl,
     if (hdl->s3_api == S3_API_SWIFT_1 ||
        hdl->s3_api == S3_API_SWIFT_2) {
        query = "limit=1";
+    } else if (hdl->s3_api == S3_API_CASTOR) {
+        query = "format=xml&size=0";
     } else {
        query = "max-keys=1";
     }