spaces.php

This commit is contained in:
Devang Srivastava 2017-12-07 21:23:18 +05:30
commit eefa32741e
845 changed files with 50409 additions and 0 deletions

View file

@ -0,0 +1,60 @@
<?php
namespace Aws\S3;
use Aws\Api\Parser\AbstractParser;
use Aws\CommandInterface;
use Aws\Exception\AwsException;
use Psr\Http\Message\ResponseInterface;
/**
* Converts errors returned with a status code of 200 to a retryable error type.
*
* @internal
*/
class AmbiguousSuccessParser extends AbstractParser
{
private static $ambiguousSuccesses = [
'UploadPartCopy' => true,
'CopyObject' => true,
'CompleteMultipartUpload' => true,
];
/** @var callable */
private $parser;
/** @var callable */
private $errorParser;
/** @var string */
private $exceptionClass;
public function __construct(
callable $parser,
callable $errorParser,
$exceptionClass = AwsException::class
) {
$this->parser = $parser;
$this->errorParser = $errorParser;
$this->exceptionClass = $exceptionClass;
}
public function __invoke(
CommandInterface $command,
ResponseInterface $response
) {
if (200 === $response->getStatusCode()
&& isset(self::$ambiguousSuccesses[$command->getName()])
) {
$errorParser = $this->errorParser;
$parsed = $errorParser($response);
if (isset($parsed['code']) && isset($parsed['message'])) {
throw new $this->exceptionClass(
$parsed['message'],
$command,
['connection_error' => true]
);
}
}
$fn = $this->parser;
return $fn($command, $response);
}
}

View file

@ -0,0 +1,75 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use GuzzleHttp\Psr7;
use Psr\Http\Message\RequestInterface;
/**
* Apply required or optional MD5s to requests before sending.
*
* IMPORTANT: This middleware must be added after the "build" step.
*
* @internal
*/
class ApplyChecksumMiddleware
{
private static $md5 = [
'DeleteObjects',
'PutBucketCors',
'PutBucketLifecycle',
'PutBucketLifecycleConfiguration',
'PutBucketPolicy',
'PutBucketTagging',
'PutBucketReplication',
];
private static $sha256 = [
'PutObject',
'UploadPart',
];
private $nextHandler;
/**
* Create a middleware wrapper function.
*
* @return callable
*/
public static function wrap()
{
return function (callable $handler) {
return new self($handler);
};
}
public function __construct(callable $nextHandler)
{
$this->nextHandler = $nextHandler;
}
public function __invoke(
CommandInterface $command,
RequestInterface $request
) {
$next = $this->nextHandler;
$name = $command->getName();
$body = $request->getBody();
if (in_array($name, self::$md5) && !$request->hasHeader('Content-MD5')) {
// Set the content MD5 header for operations that require it.
$request = $request->withHeader(
'Content-MD5',
base64_encode(Psr7\hash($body, 'md5', true))
);
} elseif (in_array($name, self::$sha256) && $command['ContentSHA256']) {
// Set the content hash header if provided in the parameters.
$request = $request->withHeader(
'X-Amz-Content-Sha256',
$command['ContentSHA256']
);
}
return $next($command, $request);
}
}

237
aws/Aws/S3/BatchDelete.php Normal file
View file

@ -0,0 +1,237 @@
<?php
namespace Aws\S3;
use Aws\AwsClientInterface;
use Aws\S3\Exception\DeleteMultipleObjectsException;
use GuzzleHttp\Promise;
use GuzzleHttp\Promise\PromisorInterface;
use GuzzleHttp\Promise\PromiseInterface;
/**
* Efficiently deletes many objects from a single Amazon S3 bucket using an
* iterator that yields keys. Deletes are made using the DeleteObjects API
* operation.
*
* $s3 = new Aws\S3\Client([
* 'region' => 'us-west-2',
* 'version' => 'latest'
* ]);
*
* $listObjectsParams = ['Bucket' => 'foo', 'Prefix' => 'starts/with/'];
* $delete = Aws\S3\BatchDelete::fromListObjects($s3, $listObjectsParams);
* // Asynchronously delete
* $promise = $delete->promise();
* // Force synchronous completion
* $delete->delete();
*
* When using one of the batch delete creational static methods, you can supply
* an associative array of options:
*
* - before: Function invoked before executing a command. The function is
* passed the command that is about to be executed. This can be useful
* for logging, adding custom request headers, etc.
* - batch_size: The size of each delete batch. Defaults to 1000.
*
* @link http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html
*/
class BatchDelete implements PromisorInterface
{
private $bucket;
/** @var AwsClientInterface */
private $client;
/** @var callable */
private $before;
/** @var PromiseInterface */
private $cachedPromise;
/** @var callable */
private $promiseCreator;
private $batchSize = 1000;
private $queue = [];
/**
* Creates a BatchDelete object from all of the paginated results of a
* ListObjects operation. Each result that is returned by the ListObjects
* operation will be deleted.
*
* @param AwsClientInterface $client AWS Client to use.
* @param array $listObjectsParams ListObjects API parameters
* @param array $options BatchDelete options.
*
* @return BatchDelete
*/
public static function fromListObjects(
AwsClientInterface $client,
array $listObjectsParams,
array $options = []
) {
$iter = $client->getPaginator('ListObjects', $listObjectsParams);
$bucket = $listObjectsParams['Bucket'];
$fn = function (BatchDelete $that) use ($iter) {
return $iter->each(function ($result) use ($that) {
$promises = [];
if (is_array($result['Contents'])) {
foreach ($result['Contents'] as $object) {
if ($promise = $that->enqueue($object)) {
$promises[] = $promise;
}
}
}
return $promises ? Promise\all($promises) : null;
});
};
return new self($client, $bucket, $fn, $options);
}
/**
* Creates a BatchDelete object from an iterator that yields results.
*
* @param AwsClientInterface $client AWS Client to use to execute commands
* @param string $bucket Bucket where the objects are stored
* @param \Iterator $iter Iterator that yields assoc arrays
* @param array $options BatchDelete options
*
* @return BatchDelete
*/
public static function fromIterator(
AwsClientInterface $client,
$bucket,
\Iterator $iter,
array $options = []
) {
$fn = function (BatchDelete $that) use ($iter) {
return \GuzzleHttp\Promise\coroutine(function () use ($that, $iter) {
foreach ($iter as $obj) {
if ($promise = $that->enqueue($obj)) {
yield $promise;
}
}
});
};
return new self($client, $bucket, $fn, $options);
}
public function promise()
{
if (!$this->cachedPromise) {
$this->cachedPromise = $this->createPromise();
}
return $this->cachedPromise;
}
/**
* Synchronously deletes all of the objects.
*
* @throws DeleteMultipleObjectsException on error.
*/
public function delete()
{
$this->promise()->wait();
}
/**
* @param AwsClientInterface $client Client used to transfer the requests
* @param string $bucket Bucket to delete from.
* @param callable $promiseFn Creates a promise.
* @param array $options Hash of options used with the batch
*
* @throws \InvalidArgumentException if the provided batch_size is <= 0
*/
private function __construct(
AwsClientInterface $client,
$bucket,
callable $promiseFn,
array $options = []
) {
$this->client = $client;
$this->bucket = $bucket;
$this->promiseCreator = $promiseFn;
if (isset($options['before'])) {
if (!is_callable($options['before'])) {
throw new \InvalidArgumentException('before must be callable');
}
$this->before = $options['before'];
}
if (isset($options['batch_size'])) {
if ($options['batch_size'] <= 0) {
throw new \InvalidArgumentException('batch_size is not > 0');
}
$this->batchSize = min($options['batch_size'], 1000);
}
}
private function enqueue(array $obj)
{
$this->queue[] = $obj;
return count($this->queue) >= $this->batchSize
? $this->flushQueue()
: null;
}
private function flushQueue()
{
static $validKeys = ['Key' => true, 'VersionId' => true];
if (count($this->queue) === 0) {
return null;
}
$batch = [];
while ($obj = array_shift($this->queue)) {
$batch[] = array_intersect_key($obj, $validKeys);
}
$command = $this->client->getCommand('DeleteObjects', [
'Bucket' => $this->bucket,
'Delete' => ['Objects' => $batch]
]);
if ($this->before) {
call_user_func($this->before, $command);
}
return $this->client->executeAsync($command)
->then(function ($result) {
if (!empty($result['Errors'])) {
throw new DeleteMultipleObjectsException(
$result['Deleted'] ?: [],
$result['Errors']
);
}
return $result;
});
}
/**
* Returns a promise that will clean up any references when it completes.
*
* @return PromiseInterface
*/
private function createPromise()
{
// Create the promise
$promise = call_user_func($this->promiseCreator, $this);
$this->promiseCreator = null;
// Cleans up the promise state and references.
$cleanup = function () {
$this->before = $this->client = $this->queue = null;
};
// When done, ensure cleanup and that any remaining are processed.
return $promise->then(
function () use ($cleanup) {
return Promise\promise_for($this->flushQueue())
->then($cleanup);
},
function ($reason) use ($cleanup) {
$cleanup();
return Promise\rejection_for($reason);
}
);
}
}

View file

@ -0,0 +1,75 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Psr\Http\Message\RequestInterface;
/**
* Used to update the host used for S3 requests in the case of using a
* "bucket endpoint" or CNAME bucket.
*
* IMPORTANT: this middleware must be added after the "build" step.
*
* @internal
*/
class BucketEndpointMiddleware
{
private static $exclusions = ['GetBucketLocation' => true];
private $nextHandler;
/**
* Create a middleware wrapper function.
*
* @return callable
*/
public static function wrap()
{
return function (callable $handler) {
return new self($handler);
};
}
public function __construct(callable $nextHandler)
{
$this->nextHandler = $nextHandler;
}
public function __invoke(CommandInterface $command, RequestInterface $request)
{
$nextHandler = $this->nextHandler;
$bucket = $command['Bucket'];
if ($bucket && !isset(self::$exclusions[$command->getName()])) {
$request = $this->modifyRequest($request, $command);
}
return $nextHandler($command, $request);
}
private function removeBucketFromPath($path, $bucket)
{
$len = strlen($bucket) + 1;
if (substr($path, 0, $len) === "/{$bucket}") {
$path = substr($path, $len);
}
return $path ?: '/';
}
private function modifyRequest(
RequestInterface $request,
CommandInterface $command
) {
$uri = $request->getUri();
$path = $uri->getPath();
$bucket = $command['Bucket'];
$path = $this->removeBucketFromPath($path, $bucket);
// Modify the Key to make sure the key is encoded, but slashes are not.
if ($command['Key']) {
$path = S3Client::encodeKey(rawurldecode($path));
}
return $request->withUri($uri->withPath($path));
}
}

View file

@ -0,0 +1,52 @@
<?php
namespace Aws\S3\Crypto;
use \Aws\Crypto\MetadataStrategyInterface;
use \Aws\Crypto\MetadataEnvelope;
class HeadersMetadataStrategy implements MetadataStrategyInterface
{
/**
* Places the information in the MetadataEnvelope in to the Meatadata for
* the PutObject request of the encrypted object.
*
* @param MetadataEnvelope $envelope Encryption data to save according to
* the strategy.
* @param array $args Arguments for PutObject that can be manipulated to
* store strategy related information.
*
* @return array Updated arguments for PutObject.
*/
public function save(MetadataEnvelope $envelope, array $args)
{
foreach ($envelope as $header=>$value) {
$args['Metadata'][$header] = $value;
}
return $args;
}
/**
* Generates a MetadataEnvelope according to the Metadata headers from the
* GetObject result.
*
* @param array $args Arguments from Command and Result that contains
* S3 Object information, relevant headers, and command
* configuration.
*
* @return MetadataEnvelope
*/
public function load(array $args)
{
$envelope = new MetadataEnvelope();
$constantValues = MetadataEnvelope::getConstantValues();
foreach ($constantValues as $constant) {
if (!empty($args['Metadata'][$constant])) {
$envelope[$constant] = $args['Metadata'][$constant];
}
}
return $envelope;
}
}

View file

@ -0,0 +1,90 @@
<?php
namespace Aws\S3\Crypto;
use \Aws\Crypto\MetadataStrategyInterface;
use \Aws\Crypto\MetadataEnvelope;
use \Aws\S3\S3Client;
/**
* Stores and reads encryption MetadataEnvelope information in a file on Amazon
* S3.
*
* A file with the contents of a MetadataEnvelope will be created or read from
* alongside the base file on Amazon S3. The provided client will be used for
* reading or writing this object. A specified suffix (default of '.instruction'
* will be applied to each of the operations involved with the instruction file.
*
* If there is a failure after an instruction file has been uploaded, it will
* not be automatically deleted.
*/
class InstructionFileMetadataStrategy implements MetadataStrategyInterface
{
const DEFAULT_FILE_SUFFIX = '.instruction';
private $client;
private $suffix;
/**
* @param S3Client $client Client for use in uploading the instruction file.
* @param string|null $suffix Optional override suffix for instruction file
* object keys.
*/
public function __construct(S3Client $client, $suffix = null)
{
$this->suffix = empty($suffix)
? self::DEFAULT_FILE_SUFFIX
: $suffix;
$this->client = $client;
}
/**
* Places the information in the MetadataEnvelope to a location on S3.
*
* @param MetadataEnvelope $envelope Encryption data to save according to
* the strategy.
* @param array $args Starting arguments for PutObject, used for saving
* extra the instruction file.
*
* @return array Updated arguments for PutObject.
*/
public function save(MetadataEnvelope $envelope, array $args)
{
$this->client->putObject([
'Bucket' => $args['Bucket'],
'Key' => $args['Key'] . $this->suffix,
'Body' => json_encode($envelope)
]);
return $args;
}
/**
* Uses the strategy's client to retrieve the instruction file from S3 and generates
* a MetadataEnvelope from its contents.
*
* @param array $args Arguments from Command and Result that contains
* S3 Object information, relevant headers, and command
* configuration.
*
* @return MetadataEnvelope
*/
public function load(array $args)
{
$result = $this->client->getObject([
'Bucket' => $args['Bucket'],
'Key' => $args['Key'] . $this->suffix
]);
$metadataHeaders = json_decode($result['Body'], true);
$envelope = new MetadataEnvelope();
$constantValues = MetadataEnvelope::getConstantValues();
foreach ($constantValues as $constant) {
if (!empty($metadataHeaders[$constant])) {
$envelope[$constant] = $metadataHeaders[$constant];
}
}
return $envelope;
}
}

View file

@ -0,0 +1,381 @@
<?php
namespace Aws\S3\Crypto;
use Aws\HashingStream;
use Aws\PhpHash;
use Aws\Crypto\AbstractCryptoClient;
use Aws\Crypto\EncryptionTrait;
use Aws\Crypto\DecryptionTrait;
use Aws\Crypto\MetadataEnvelope;
use Aws\Crypto\MaterialsProvider;
use Aws\Crypto\MetadataStrategyInterface;
use Aws\S3\S3Client;
use GuzzleHttp\Promise;
use GuzzleHttp\Promise\PromiseInterface;
use GuzzleHttp\Psr7;
/**
* Provides a wrapper for an S3Client that supplies functionality to encrypt
* data on putObject[Async] calls and decrypt data on getObject[Async] calls.
*/
class S3EncryptionClient extends AbstractCryptoClient
{
use EncryptionTrait, DecryptionTrait;
private $client;
private $instructionFileSuffix;
/**
* @param S3Client $client The S3Client to be used for true uploading and
* retrieving objects from S3 when using the
* encryption client.
* @param string|null $instructionFileSuffix Suffix for a client wide
* default when using instruction
* files for metadata storage.
*/
public function __construct(
S3Client $client,
$instructionFileSuffix = null
) {
$this->client = $client;
$this->instructionFileSuffix = $instructionFileSuffix;
}
private function getMaterialsProvider(array $args)
{
if ($args['@MaterialsProvider'] instanceof MaterialsProvider) {
return $args['@MaterialsProvider'];
} else {
throw new \InvalidArgumentException('An instance of MaterialsProvider'
. ' must be passed in the "MaterialsProvider" field.');
}
}
private function getInstructionFileSuffix(array $args)
{
return !empty($args['@InstructionFileSuffix'])
? $args['@InstructionFileSuffix']
: $this->instructionFileSuffix;
}
private static function getDefaultStrategy()
{
return new HeadersMetadataStrategy();
}
private function determineGetObjectStrategy(
$result,
$instructionFileSuffix
) {
if (isset($result['Metadata'][MetadataEnvelope::CONTENT_KEY_V2_HEADER])) {
return new HeadersMetadataStrategy();
} else {
return new InstructionFileMetadataStrategy(
$this->client,
$instructionFileSuffix
);
}
}
private function getMetadataStrategy(array $args, $instructionFileSuffix)
{
if (!empty($args['@MetadataStrategy'])) {
if ($args['@MetadataStrategy'] instanceof MetadataStrategyInterface) {
return $args['@MetadataStrategy'];
} elseif (is_string($args['@MetadataStrategy'])) {
switch ($args['@MetadataStrategy']) {
case HeadersMetadataStrategy::class:
return new HeadersMetadataStrategy();
case InstructionFileMetadataStrategy::class:
return new InstructionFileMetadataStrategy(
$this->client,
$instructionFileSuffix
);
default:
throw new \InvalidArgumentException('Could not match the'
. ' specified string in "MetadataStrategy" to a'
. ' predefined strategy.');
}
} else {
throw new \InvalidArgumentException('The metadata strategy that'
. ' was passed to "MetadataStrategy" was unrecognized.');
}
} elseif ($instructionFileSuffix) {
return new InstructionFileMetadataStrategy(
$this->client,
$instructionFileSuffix
);
}
return null;
}
/**
* Encrypts the data in the 'Body' field of $args and promises to upload it
* to the specified location on S3.
*
* @param array $args Arguments for encrypting an object and uploading it
* to S3 via PutObject.
*
* The required configuration arguments are as follows:
*
* - @MaterialsProvider: (MaterialsProvider) Provides Cek, Iv, and Cek
* encrypting/decrypting for encryption metadata.
* - @CipherOptions: (array) Cipher options for encrypting data. Only the
* Cipher option is required. Accepts the following:
* - Cipher: (string) cbc|gcm
* See also: AbstractCryptoClient::$supportedCiphers
* - KeySize: (int) 128|192|256
* See also: MaterialsProvider::$supportedKeySizes
* - Aad: (string) Additional authentication data. This option is
* passed directly to OpenSSL when using gcm. It is ignored when
* using cbc.
*
* The optional configuration arguments are as follows:
*
* - @MetadataStrategy: (MetadataStrategy|string|null) Strategy for storing
* MetadataEnvelope information. Defaults to using a
* HeadersMetadataStrategy. Can either be a class implementing
* MetadataStrategy, a class name of a predefined strategy, or empty/null
* to default.
* - @InstructionFileSuffix: (string|null) Suffix used when writing to an
* instruction file if using an InstructionFileMetadataHandler.
*
* @return PromiseInterface
*
* @throws \InvalidArgumentException Thrown when arguments above are not
* passed or are passed incorrectly.
*/
public function putObjectAsync(array $args)
{
$provider = $this->getMaterialsProvider($args);
unset($args['@MaterialsProvider']);
$instructionFileSuffix = $this->getInstructionFileSuffix($args);
unset($args['@InstructionFileSuffix']);
$strategy = $this->getMetadataStrategy($args, $instructionFileSuffix);
unset($args['@MetadataStrategy']);
$envelope = new MetadataEnvelope();
return Promise\promise_for($this->encrypt(
Psr7\stream_for($args['Body']),
$args['@CipherOptions'] ?: [],
$provider,
$envelope
))->then(
function ($encryptedBodyStream) use ($args) {
$hash = new PhpHash('sha256');
$hashingEncryptedBodyStream = new HashingStream(
$encryptedBodyStream,
$hash,
self::getContentShaDecorator($args)
);
return [$hashingEncryptedBodyStream, $args];
}
)->then(
function ($putObjectContents) use ($strategy, $envelope) {
list($bodyStream, $args) = $putObjectContents;
if ($strategy === null) {
$strategy = self::getDefaultStrategy();
}
$updatedArgs = $strategy->save($envelope, $args);
$updatedArgs['Body'] = $bodyStream;
return $updatedArgs;
}
)->then(
function ($args) {
unset($args['@CipherOptions']);
return $this->client->putObjectAsync($args);
}
);
}
private static function getContentShaDecorator(&$args)
{
return function ($hash) use (&$args) {
$args['ContentSHA256'] = bin2hex($hash);
};
}
/**
* Encrypts the data in the 'Body' field of $args and uploads it to the
* specified location on S3.
*
* @param array $args Arguments for encrypting an object and uploading it
* to S3 via PutObject.
*
* The required configuration arguments are as follows:
*
* - @MaterialsProvider: (MaterialsProvider) Provides Cek, Iv, and Cek
* encrypting/decrypting for encryption metadata.
* - @CipherOptions: (array) Cipher options for encrypting data. A Cipher
* is required. Accepts the following options:
* - Cipher: (string) cbc|gcm
* See also: AbstractCryptoClient::$supportedCiphers
* - KeySize: (int) 128|192|256
* See also: MaterialsProvider::$supportedKeySizes
* - Aad: (string) Additional authentication data. This option is
* passed directly to OpenSSL when using gcm. It is ignored when
* using cbc.
*
* The optional configuration arguments are as follows:
*
* - @MetadataStrategy: (MetadataStrategy|string|null) Strategy for storing
* MetadataEnvelope information. Defaults to using a
* HeadersMetadataStrategy. Can either be a class implementing
* MetadataStrategy, a class name of a predefined strategy, or empty/null
* to default.
* - @InstructionFileSuffix: (string|null) Suffix used when writing to an
* instruction file if an using an InstructionFileMetadataHandler was
* determined.
*
* @return \Aws\Result PutObject call result with the details of uploading
* the encrypted file.
*
* @throws \InvalidArgumentException Thrown when arguments above are not
* passed or are passed incorrectly.
*/
public function putObject(array $args)
{
return $this->putObjectAsync($args)->wait();
}
/**
* Promises to retrieve an object from S3 and decrypt the data in the
* 'Body' field.
*
* @param array $args Arguments for retrieving an object from S3 via
* GetObject and decrypting it.
*
* The required configuration argument is as follows:
*
* - @MaterialsProvider: (MaterialsProvider) Provides Cek, Iv, and Cek
* encrypting/decrypting for decryption metadata. May have data loaded
* from the MetadataEnvelope upon decryption.
*
* The optional configuration arguments are as follows:
*
* - SaveAs: (string) The path to a file on disk to save the decrypted
* object data. This will be handled by file_put_contents instead of the
* Guzzle sink.
*
* - @MetadataStrategy: (MetadataStrategy|string|null) Strategy for reading
* MetadataEnvelope information. Defaults to determining based on object
* response headers. Can either be a class implementing MetadataStrategy,
* a class name of a predefined strategy, or empty/null to default.
* - @InstructionFileSuffix: (string) Suffix used when looking for an
* instruction file if an InstructionFileMetadataHandler is being used.
* - @CipherOptions: (array) Cipher options for decrypting data. A Cipher
* is required. Accepts the following options:
* - Aad: (string) Additional authentication data. This option is
* passed directly to OpenSSL when using gcm. It is ignored when
* using cbc.
*
* @return PromiseInterface
*
* @throws \InvalidArgumentException Thrown when required arguments are not
* passed or are passed incorrectly.
*/
public function getObjectAsync(array $args)
{
$provider = $this->getMaterialsProvider($args);
unset($args['@MaterialsProvider']);
$instructionFileSuffix = $this->getInstructionFileSuffix($args);
unset($args['@InstructionFileSuffix']);
$strategy = $this->getMetadataStrategy($args, $instructionFileSuffix);
unset($args['@MetadataStrategy']);
$saveAs = null;
if (!empty($args['SaveAs'])) {
$saveAs = $args['SaveAs'];
}
$promise = $this->client->getObjectAsync($args)
->then(
function ($result) use (
$provider,
$instructionFileSuffix,
$strategy,
$args
) {
if ($strategy === null) {
$strategy = $this->determineGetObjectStrategy(
$result,
$instructionFileSuffix
);
}
$envelope = $strategy->load($args + [
'Metadata' => $result['Metadata']
]);
$provider = $provider->fromDecryptionEnvelope($envelope);
$result['Body'] = $this->decrypt(
$result['Body'],
$provider,
$envelope,
isset($args['@CipherOptions'])
? $args['@CipherOptions']
: []
);
return $result;
}
)->then(
function ($result) use ($saveAs) {
if (!empty($saveAs)) {
file_put_contents(
$saveAs,
(string)$result['Body'],
LOCK_EX
);
}
return $result;
}
);
return $promise;
}
/**
* Retrieves an object from S3 and decrypts the data in the 'Body' field.
*
* @param array $args Arguments for retrieving an object from S3 via
* GetObject and decrypting it.
*
* The required configuration argument is as follows:
*
* - @MaterialsProvider: (MaterialsProvider) Provides Cek, Iv, and Cek
* encrypting/decrypting for decryption metadata. May have data loaded
* from the MetadataEnvelope upon decryption.
*
* The optional configuration arguments are as follows:
*
* - SaveAs: (string) The path to a file on disk to save the decrypted
* object data. This will be handled by file_put_contents instead of the
* Guzzle sink.
* - @InstructionFileSuffix: (string|null) Suffix used when looking for an
* instruction file if an InstructionFileMetadataHandler was detected.
* - @CipherOptions: (array) Cipher options for encrypting data. A Cipher
* is required. Accepts the following options:
* - Aad: (string) Additional authentication data. This option is
* passed directly to OpenSSL when using gcm. It is ignored when
* using cbc.
*
* @return \Aws\Result GetObject call result with the 'Body' field
* wrapped in a decryption stream with its metadata
* information.
*
* @throws \InvalidArgumentException Thrown when arguments above are not
* passed or are passed incorrectly.
*/
public function getObject(array $args)
{
return $this->getObjectAsync($args)->wait();
}
}

View file

@ -0,0 +1,62 @@
<?php
namespace Aws\S3\Exception;
/**
* Exception thrown when errors occur while deleting objects using a
* {@see S3\BatchDelete} object.
*/
class DeleteMultipleObjectsException extends \Exception
{
private $deleted = [];
private $errors = [];
/**
* @param array $deleted Array of successfully deleted keys
* @param array $errors Array of errors that were encountered
*/
public function __construct(array $deleted, array $errors)
{
$this->deleted = array_values($deleted);
$this->errors = array_values($errors);
parent::__construct('Unable to delete certain keys when executing a'
. ' DeleteMultipleObjects request: '
. self::createMessageFromErrors($errors));
}
/**
* Create a single error message from multiple errors.
*
* @param array $errors Errors encountered
*
* @return string
*/
public static function createMessageFromErrors(array $errors)
{
return "\n- " . implode("\n- ", array_map(function ($key) {
return json_encode($key);
}, $errors));
}
/**
* Get the errored objects
*
* @return array Returns an array of associative arrays, each containing
* a 'Code', 'Message', and 'Key' key.
*/
public function getErrors()
{
return $this->errors;
}
/**
* Get the successfully deleted objects
*
* @return array Returns an array of associative arrays, each containing
* a 'Key' and optionally 'DeleteMarker' and
* 'DeleterMarkerVersionId'
*/
public function getDeleted()
{
return $this->deleted;
}
}

View file

@ -0,0 +1,4 @@
<?php
namespace Aws\S3\Exception;
class PermanentRedirectException extends S3Exception {}

View file

@ -0,0 +1,9 @@
<?php
namespace Aws\S3\Exception;
use Aws\Exception\AwsException;
/**
* Represents an error interacting with the Amazon Simple Storage Service.
*/
class S3Exception extends AwsException {}

View file

@ -0,0 +1,84 @@
<?php
namespace Aws\S3\Exception;
use Aws\CommandInterface;
use Aws\Exception\AwsException;
use Aws\Multipart\UploadState;
class S3MultipartUploadException extends \Aws\Exception\MultipartUploadException
{
/** @var string Bucket of the transfer object */
private $bucket;
/** @var string Key of the transfer object */
private $key;
/** @var string Source file name of the transfer object */
private $filename;
/**
* @param UploadState $state Upload state at time of the exception.
* @param \Exception|array $prev Exception being thrown. Could be an array of
* AwsExceptions being thrown when uploading parts
* for one object, or an instance of AwsException
* for a specific Multipart error being thrown in
* the MultipartUpload process.
*/
public function __construct(UploadState $state, $prev = null) {
if (is_array($prev) && $error = $prev[key($prev)]) {
$this->collectPathInfo($error->getCommand());
} elseif ($prev instanceof AwsException) {
$this->collectPathInfo($prev->getCommand());
}
parent::__construct($state, $prev);
}
/**
* Get the Bucket information of the transfer object
*
* @return string|null Returns null when 'Bucket' information
* is unavailable.
*/
public function getBucket()
{
return $this->bucket;
}
/**
* Get the Key information of the transfer object
*
* @return string|null Returns null when 'Key' information
* is unavailable.
*/
public function getKey()
{
return $this->key;
}
/**
* Get the source file name of the transfer object
*
* @return string|null Returns null when metadata of the stream
* wrapped in 'Body' parameter is unavailable.
*/
public function getSourceFileName()
{
return $this->filename;
}
/**
* Collect file path information when accessible. (Bucket, Key)
*
* @param CommandInterface $cmd
*/
private function collectPathInfo(CommandInterface $cmd)
{
if (empty($this->bucket) && isset($cmd['Bucket'])) {
$this->bucket = $cmd['Bucket'];
}
if (empty($this->key) && isset($cmd['Key'])) {
$this->key = $cmd['Key'];
}
if (empty($this->filename) && isset($cmd['Body'])) {
$this->filename = $cmd['Body']->getMetadata('uri');
}
}
}

View file

@ -0,0 +1,42 @@
<?php
namespace Aws\S3;
use Aws\Api\Parser\AbstractParser;
use Aws\CommandInterface;
use Psr\Http\Message\ResponseInterface;
/**
* @internal Decorates a parser for the S3 service to correctly handle the
* GetBucketLocation operation.
*/
class GetBucketLocationParser extends AbstractParser
{
/** @var callable */
private $parser;
/**
* @param callable $parser Parser to wrap.
*/
public function __construct(callable $parser)
{
$this->parser = $parser;
}
public function __invoke(
CommandInterface $command,
ResponseInterface $response
) {
$fn = $this->parser;
$result = $fn($command, $response);
if ($command->getName() === 'GetBucketLocation') {
$location = 'us-east-1';
if (preg_match('/>(.+?)<\/LocationConstraint>/', $response->getBody(), $matches)) {
$location = $matches[1] === 'EU' ? 'eu-west-1' : $matches[1];
}
$result['LocationConstraint'] = $location;
}
return $result;
}
}

View file

@ -0,0 +1,183 @@
<?php
namespace Aws\S3;
use Aws\Multipart\AbstractUploadManager;
use Aws\ResultInterface;
use GuzzleHttp\Psr7;
class MultipartCopy extends AbstractUploadManager
{
use MultipartUploadingTrait;
/** @var string */
private $source;
/** @var ResultInterface */
private $sourceMetadata;
/**
* Creates a multipart upload for copying an S3 object.
*
* The valid configuration options are as follows:
*
* - acl: (string) ACL to set on the object being upload. Objects are
* private by default.
* - before_complete: (callable) Callback to invoke before the
* `CompleteMultipartUpload` operation. The callback should have a
* function signature like `function (Aws\Command $command) {...}`.
* - before_initiate: (callable) Callback to invoke before the
* `CreateMultipartUpload` operation. The callback should have a function
* signature like `function (Aws\Command $command) {...}`.
* - before_upload: (callable) Callback to invoke before `UploadPartCopy`
* operations. The callback should have a function signature like
* `function (Aws\Command $command) {...}`.
* - bucket: (string, required) Name of the bucket to which the object is
* being uploaded.
* - concurrency: (int, default=int(5)) Maximum number of concurrent
* `UploadPart` operations allowed during the multipart upload.
* - key: (string, required) Key to use for the object being uploaded.
* - params: (array) An array of key/value parameters that will be applied
* to each of the sub-commands run by the uploader as a base.
* Auto-calculated options will override these parameters. If you need
* more granularity over parameters to each sub-command, use the before_*
* options detailed above to update the commands directly.
* - part_size: (int, default=int(5242880)) Part size, in bytes, to use when
* doing a multipart upload. This must between 5 MB and 5 GB, inclusive.
* - state: (Aws\Multipart\UploadState) An object that represents the state
* of the multipart upload and that is used to resume a previous upload.
* When this option is provided, the `bucket`, `key`, and `part_size`
* options are ignored.
* - source_metadata: (Aws\ResultInterface) An object that represents the
* result of executing a HeadObject command on the copy source.
*
* @param S3ClientInterface $client Client used for the upload.
* @param string $source Location of the data to be copied
* (in the form /<bucket>/<key>).
* @param array $config Configuration used to perform the upload.
*/
public function __construct(
S3ClientInterface $client,
$source,
array $config = []
) {
$this->source = '/' . ltrim($source, '/');
parent::__construct($client, array_change_key_case($config) + [
'source_metadata' => null
]);
}
/**
* An alias of the self::upload method.
*
* @see self::upload
*/
public function copy()
{
return $this->upload();
}
protected function loadUploadWorkflowInfo()
{
return [
'command' => [
'initiate' => 'CreateMultipartUpload',
'upload' => 'UploadPartCopy',
'complete' => 'CompleteMultipartUpload',
],
'id' => [
'bucket' => 'Bucket',
'key' => 'Key',
'upload_id' => 'UploadId',
],
'part_num' => 'PartNumber',
];
}
protected function getUploadCommands(callable $resultHandler)
{
$parts = ceil($this->getSourceSize() / $this->determinePartSize());
for ($partNumber = 1; $partNumber <= $parts; $partNumber++) {
// If we haven't already uploaded this part, yield a new part.
if (!$this->state->hasPartBeenUploaded($partNumber)) {
$command = $this->client->getCommand(
$this->info['command']['upload'],
$this->createPart($partNumber, $parts)
+ $this->getState()->getId()
);
$command->getHandlerList()->appendSign($resultHandler, 'mup');
yield $command;
}
}
}
private function createPart($partNumber, $partsCount)
{
$data = [];
// Apply custom params to UploadPartCopy data
$config = $this->getConfig();
$params = isset($config['params']) ? $config['params'] : [];
foreach ($params as $k => $v) {
$data[$k] = $v;
}
$data['CopySource'] = $this->source;
$data['PartNumber'] = $partNumber;
$defaultPartSize = $this->determinePartSize();
$startByte = $defaultPartSize * ($partNumber - 1);
$data['ContentLength'] = $partNumber < $partsCount
? $defaultPartSize
: $this->getSourceSize() - ($defaultPartSize * ($partsCount - 1));
$endByte = $startByte + $data['ContentLength'] - 1;
$data['CopySourceRange'] = "bytes=$startByte-$endByte";
return $data;
}
protected function extractETag(ResultInterface $result)
{
return $result->search('CopyPartResult.ETag');
}
protected function getSourceMimeType()
{
return $this->getSourceMetadata()['ContentType'];
}
protected function getSourceSize()
{
return $this->getSourceMetadata()['ContentLength'];
}
private function getSourceMetadata()
{
if (empty($this->sourceMetadata)) {
$this->sourceMetadata = $this->fetchSourceMetadata();
}
return $this->sourceMetadata;
}
private function fetchSourceMetadata()
{
if ($this->config['source_metadata'] instanceof ResultInterface) {
return $this->config['source_metadata'];
}
list($bucket, $key) = explode('/', ltrim($this->source, '/'), 2);
$headParams = [
'Bucket' => $bucket,
'Key' => $key,
];
if (strpos($key, '?')) {
list($key, $query) = explode('?', $key, 2);
$headParams['Key'] = $key;
$query = Psr7\parse_query($query, false);
if (isset($query['versionId'])) {
$headParams['VersionId'] = $query['versionId'];
}
}
return $this->client->headObject($headParams);
}
}

View file

@ -0,0 +1,165 @@
<?php
namespace Aws\S3;
use Aws\HashingStream;
use Aws\Multipart\AbstractUploader;
use Aws\PhpHash;
use Aws\ResultInterface;
use GuzzleHttp\Psr7;
use Psr\Http\Message\StreamInterface as Stream;
use Aws\S3\Exception\S3MultipartUploadException;
/**
* Encapsulates the execution of a multipart upload to S3 or Glacier.
*/
class MultipartUploader extends AbstractUploader
{
use MultipartUploadingTrait;
const PART_MIN_SIZE = 5242880;
const PART_MAX_SIZE = 5368709120;
const PART_MAX_NUM = 10000;
/**
* Creates a multipart upload for an S3 object.
*
* The valid configuration options are as follows:
*
* - acl: (string) ACL to set on the object being upload. Objects are
* private by default.
* - before_complete: (callable) Callback to invoke before the
* `CompleteMultipartUpload` operation. The callback should have a
* function signature like `function (Aws\Command $command) {...}`.
* - before_initiate: (callable) Callback to invoke before the
* `CreateMultipartUpload` operation. The callback should have a function
* signature like `function (Aws\Command $command) {...}`.
* - before_upload: (callable) Callback to invoke before any `UploadPart`
* operations. The callback should have a function signature like
* `function (Aws\Command $command) {...}`.
* - bucket: (string, required) Name of the bucket to which the object is
* being uploaded.
* - concurrency: (int, default=int(5)) Maximum number of concurrent
* `UploadPart` operations allowed during the multipart upload.
* - key: (string, required) Key to use for the object being uploaded.
* - params: (array) An array of key/value parameters that will be applied
* to each of the sub-commands run by the uploader as a base.
* Auto-calculated options will override these parameters. If you need
* more granularity over parameters to each sub-command, use the before_*
* options detailed above to update the commands directly.
* - part_size: (int, default=int(5242880)) Part size, in bytes, to use when
* doing a multipart upload. This must between 5 MB and 5 GB, inclusive.
* - state: (Aws\Multipart\UploadState) An object that represents the state
* of the multipart upload and that is used to resume a previous upload.
* When this option is provided, the `bucket`, `key`, and `part_size`
* options are ignored.
*
* @param S3ClientInterface $client Client used for the upload.
* @param mixed $source Source of the data to upload.
* @param array $config Configuration used to perform the upload.
*/
public function __construct(
S3ClientInterface $client,
$source,
array $config = []
) {
parent::__construct($client, $source, array_change_key_case($config) + [
'bucket' => null,
'key' => null,
'exception_class' => S3MultipartUploadException::class,
]);
}
protected function loadUploadWorkflowInfo()
{
return [
'command' => [
'initiate' => 'CreateMultipartUpload',
'upload' => 'UploadPart',
'complete' => 'CompleteMultipartUpload',
],
'id' => [
'bucket' => 'Bucket',
'key' => 'Key',
'upload_id' => 'UploadId',
],
'part_num' => 'PartNumber',
];
}
protected function createPart($seekable, $number)
{
// Initialize the array of part data that will be returned.
$data = [];
// Apply custom params to UploadPart data
$config = $this->getConfig();
$params = isset($config['params']) ? $config['params'] : [];
foreach ($params as $k => $v) {
$data[$k] = $v;
}
$data['PartNumber'] = $number;
// Read from the source to create the body stream.
if ($seekable) {
// Case 1: Source is seekable, use lazy stream to defer work.
$body = $this->limitPartStream(
new Psr7\LazyOpenStream($this->source->getMetadata('uri'), 'r')
);
} else {
// Case 2: Stream is not seekable; must store in temp stream.
$source = $this->limitPartStream($this->source);
$source = $this->decorateWithHashes($source, $data);
$body = Psr7\stream_for();
Psr7\copy_to_stream($source, $body);
}
$contentLength = $body->getSize();
// Do not create a part if the body size is zero.
if ($contentLength === 0) {
return false;
}
$body->seek(0);
$data['Body'] = $body;
$data['ContentLength'] = $contentLength;
return $data;
}
protected function extractETag(ResultInterface $result)
{
return $result['ETag'];
}
protected function getSourceMimeType()
{
if ($uri = $this->source->getMetadata('uri')) {
return Psr7\mimetype_from_filename($uri)
?: 'application/octet-stream';
}
}
protected function getSourceSize()
{
return $this->source->getSize();
}
/**
* Decorates a stream with a sha256 linear hashing stream.
*
* @param Stream $stream Stream to decorate.
* @param array $data Part data to augment with the hash result.
*
* @return Stream
*/
private function decorateWithHashes(Stream $stream, array &$data)
{
// Decorate source with a hashing stream
$hash = new PhpHash('sha256');
return new HashingStream($stream, $hash, function ($result) use (&$data) {
$data['ContentSHA256'] = bin2hex($result);
});
}
}

View file

@ -0,0 +1,132 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Aws\Multipart\UploadState;
use Aws\ResultInterface;
trait MultipartUploadingTrait
{
/**
* Creates an UploadState object for a multipart upload by querying the
* service for the specified upload's information.
*
* @param S3ClientInterface $client S3Client used for the upload.
* @param string $bucket Bucket for the multipart upload.
* @param string $key Object key for the multipart upload.
* @param string $uploadId Upload ID for the multipart upload.
*
* @return UploadState
*/
public static function getStateFromService(
S3ClientInterface $client,
$bucket,
$key,
$uploadId
) {
$state = new UploadState([
'Bucket' => $bucket,
'Key' => $key,
'UploadId' => $uploadId,
]);
foreach ($client->getPaginator('ListParts', $state->getId()) as $result) {
// Get the part size from the first part in the first result.
if (!$state->getPartSize()) {
$state->setPartSize($result->search('Parts[0].Size'));
}
// Mark all the parts returned by ListParts as uploaded.
foreach ($result['Parts'] as $part) {
$state->markPartAsUploaded($part['PartNumber'], [
'PartNumber' => $part['PartNumber'],
'ETag' => $part['ETag']
]);
}
}
$state->setStatus(UploadState::INITIATED);
return $state;
}
protected function handleResult(CommandInterface $command, ResultInterface $result)
{
$this->getState()->markPartAsUploaded($command['PartNumber'], [
'PartNumber' => $command['PartNumber'],
'ETag' => $this->extractETag($result),
]);
}
abstract protected function extractETag(ResultInterface $result);
protected function getCompleteParams()
{
$config = $this->getConfig();
$params = isset($config['params']) ? $config['params'] : [];
$params['MultipartUpload'] = [
'Parts' => $this->getState()->getUploadedParts()
];
return $params;
}
protected function determinePartSize()
{
// Make sure the part size is set.
$partSize = $this->getConfig()['part_size'] ?: MultipartUploader::PART_MIN_SIZE;
// Adjust the part size to be larger for known, x-large uploads.
if ($sourceSize = $this->getSourceSize()) {
$partSize = (int) max(
$partSize,
ceil($sourceSize / MultipartUploader::PART_MAX_NUM)
);
}
// Ensure that the part size follows the rules: 5 MB <= size <= 5 GB.
if ($partSize < MultipartUploader::PART_MIN_SIZE || $partSize > MultipartUploader::PART_MAX_SIZE) {
throw new \InvalidArgumentException('The part size must be no less '
. 'than 5 MB and no greater than 5 GB.');
}
return $partSize;
}
protected function getInitiateParams()
{
$config = $this->getConfig();
$params = isset($config['params']) ? $config['params'] : [];
if (isset($config['acl'])) {
$params['ACL'] = $config['acl'];
}
// Set the ContentType if not already present
if (empty($params['ContentType']) && $type = $this->getSourceMimeType()) {
$params['ContentType'] = $type;
}
return $params;
}
/**
* @return UploadState
*/
abstract protected function getState();
/**
* @return array
*/
abstract protected function getConfig();
/**
* @return int
*/
abstract protected function getSourceSize();
/**
* @return string|null
*/
abstract protected function getSourceMimeType();
}

150
aws/Aws/S3/ObjectCopier.php Normal file
View file

@ -0,0 +1,150 @@
<?php
namespace Aws\S3;
use Aws\Exception\MultipartUploadException;
use Aws\Result;
use Aws\S3\Exception\S3Exception;
use GuzzleHttp\Promise\PromisorInterface;
use InvalidArgumentException;
/**
* Copies objects from one S3 location to another, utilizing a multipart copy
* when appropriate.
*/
class ObjectCopier implements PromisorInterface
{
const DEFAULT_MULTIPART_THRESHOLD = MultipartUploader::PART_MAX_SIZE;
private $client;
private $source;
private $destination;
private $acl;
private $options;
private static $defaults = [
'before_lookup' => null,
'before_upload' => null,
'concurrency' => 5,
'mup_threshold' => self::DEFAULT_MULTIPART_THRESHOLD,
'params' => [],
'part_size' => null,
'version_id' => null,
];
/**
* @param S3ClientInterface $client The S3 Client used to execute
* the copy command(s).
* @param array $source The object to copy, specified as
* an array with a 'Bucket' and
* 'Key' keys. Provide a
* 'VersionID' key to copy a
* specified version of an object.
* @param array $destination The bucket and key to which to
* copy the $source, specified as
* an array with a 'Bucket' and
* 'Key' keys.
* @param string $acl ACL to apply to the copy
* (default: private).
* @param array $options Options used to configure the
* copy process. Options passed in
* through 'params' are added to
* the sub commands.
*
* @throws InvalidArgumentException
*/
public function __construct(
S3ClientInterface $client,
array $source,
array $destination,
$acl = 'private',
array $options = []
) {
$this->validateLocation($source);
$this->validateLocation($destination);
$this->client = $client;
$this->source = $source;
$this->destination = $destination;
$this->acl = $acl;
$this->options = $options + self::$defaults;
}
/**
* Perform the configured copy asynchronously. Returns a promise that is
* fulfilled with the result of the CompleteMultipartUpload or CopyObject
* operation or rejected with an exception.
*/
public function promise()
{
return \GuzzleHttp\Promise\coroutine(function () {
$headObjectCommand = $this->client->getCommand(
'HeadObject',
$this->options['params'] + $this->source
);
if (is_callable($this->options['before_lookup'])) {
$this->options['before_lookup']($headObjectCommand);
}
$objectStats = (yield $this->client->executeAsync(
$headObjectCommand
));
if ($objectStats['ContentLength'] > $this->options['mup_threshold']) {
$mup = new MultipartCopy(
$this->client,
$this->getSourcePath(),
['source_metadata' => $objectStats, 'acl' => $this->acl]
+ $this->destination
+ $this->options
);
yield $mup->promise();
} else {
$defaults = [
'ACL' => $this->acl,
'MetadataDirective' => 'COPY',
'CopySource' => $this->getSourcePath(),
];
$params = array_diff_key($this->options, self::$defaults)
+ $this->destination + $defaults + $this->options['params'];
yield $this->client->executeAsync(
$this->client->getCommand('CopyObject', $params)
);
}
});
}
/**
* Perform the configured copy synchronously. Returns the result of the
* CompleteMultipartUpload or CopyObject operation.
*
* @return Result
*
* @throws S3Exception
* @throws MultipartUploadException
*/
public function copy()
{
return $this->promise()->wait();
}
private function validateLocation(array $location)
{
if (empty($location['Bucket']) || empty($location['Key'])) {
throw new \InvalidArgumentException('Locations provided to an'
. ' Aws\S3\ObjectCopier must have a non-empty Bucket and Key');
}
}
private function getSourcePath()
{
$sourcePath = "/{$this->source['Bucket']}/"
. rawurlencode($this->source['Key']);
if (isset($this->source['VersionId'])) {
$sourcePath .= "?versionId={$this->source['VersionId']}";
}
return $sourcePath;
}
}

View file

@ -0,0 +1,140 @@
<?php
namespace Aws\S3;
use GuzzleHttp\Promise\PromisorInterface;
use GuzzleHttp\Psr7;
use Psr\Http\Message\StreamInterface;
/**
* Uploads an object to S3, using a PutObject command or a multipart upload as
* appropriate.
*/
class ObjectUploader implements PromisorInterface
{
const DEFAULT_MULTIPART_THRESHOLD = 16777216;
private $client;
private $bucket;
private $key;
private $body;
private $acl;
private $options;
private static $defaults = [
'before_upload' => null,
'concurrency' => 3,
'mup_threshold' => self::DEFAULT_MULTIPART_THRESHOLD,
'params' => [],
'part_size' => null,
];
/**
* @param S3ClientInterface $client The S3 Client used to execute
* the upload command(s).
* @param string $bucket Bucket to upload the object.
* @param string $key Key of the object.
* @param mixed $body Object data to upload. Can be a
* StreamInterface, PHP stream
* resource, or a string of data to
* upload.
* @param string $acl ACL to apply to the copy
* (default: private).
* @param array $options Options used to configure the
* copy process. Options passed in
* through 'params' are added to
* the sub command(s).
*/
public function __construct(
S3ClientInterface $client,
$bucket,
$key,
$body,
$acl = 'private',
array $options = []
) {
$this->client = $client;
$this->bucket = $bucket;
$this->key = $key;
$this->body = Psr7\stream_for($body);
$this->acl = $acl;
$this->options = $options + self::$defaults;
}
public function promise()
{
/** @var int $mup_threshold */
$mup_threshold = $this->options['mup_threshold'];
if ($this->requiresMultipart($this->body, $mup_threshold)) {
// Perform a multipart upload.
return (new MultipartUploader($this->client, $this->body, [
'bucket' => $this->bucket,
'key' => $this->key,
'acl' => $this->acl
] + $this->options))->promise();
} else {
// Perform a regular PutObject operation.
$command = $this->client->getCommand('PutObject', [
'Bucket' => $this->bucket,
'Key' => $this->key,
'Body' => $this->body,
'ACL' => $this->acl,
] + $this->options['params']);
if (is_callable($this->options['before_upload'])) {
$this->options['before_upload']($command);
}
return $this->client->executeAsync($command);
}
}
public function upload()
{
return $this->promise()->wait();
}
/**
* Determines if the body should be uploaded using PutObject or the
* Multipart Upload System. It also modifies the passed-in $body as needed
* to support the upload.
*
* @param StreamInterface $body Stream representing the body.
* @param integer $threshold Minimum bytes before using Multipart.
*
* @return bool
*/
private function requiresMultipart(StreamInterface &$body, $threshold)
{
// If body size known, compare to threshold to determine if Multipart.
if ($body->getSize() !== null) {
return $body->getSize() >= $threshold;
}
/**
* Handle the situation where the body size is unknown.
* Read up to 5MB into a buffer to determine how to upload the body.
* @var StreamInterface $buffer
*/
$buffer = Psr7\stream_for();
Psr7\copy_to_stream($body, $buffer, MultipartUploader::PART_MIN_SIZE);
// If body < 5MB, use PutObject with the buffer.
if ($buffer->getSize() < MultipartUploader::PART_MIN_SIZE) {
$buffer->seek(0);
$body = $buffer;
return false;
}
// If body >= 5 MB, then use multipart. [YES]
if ($body->isSeekable()) {
// If the body is seekable, just rewind the body.
$body->seek(0);
} else {
// If the body is non-seekable, stitch the rewind the buffer and
// the partially read body together into one stream. This avoids
// unnecessary disc usage and does not require seeking on the
// original stream.
$buffer->seek(0);
$body = new Psr7\AppendStream([$buffer, $body]);
}
return true;
}
}

View file

@ -0,0 +1,62 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Aws\ResultInterface;
use Aws\S3\Exception\PermanentRedirectException;
use Psr\Http\Message\RequestInterface;
/**
* Throws a PermanentRedirectException exception when a 301 redirect is
* encountered.
*
* @internal
*/
class PermanentRedirectMiddleware
{
/** @var callable */
private $nextHandler;
/**
* Create a middleware wrapper function.
*
* @return callable
*/
public static function wrap()
{
return function (callable $handler) {
return new self($handler);
};
}
/**
* @param callable $nextHandler Next handler to invoke.
*/
public function __construct(callable $nextHandler)
{
$this->nextHandler = $nextHandler;
}
public function __invoke(CommandInterface $command, RequestInterface $request = null)
{
$next = $this->nextHandler;
return $next($command, $request)->then(
function (ResultInterface $result) use ($command) {
$status = isset($result['@metadata']['statusCode'])
? $result['@metadata']['statusCode']
: null;
if ($status == 301) {
throw new PermanentRedirectException(
'Encountered a permanent redirect while requesting '
. $result->search('"@metadata".effectiveUri') . '. '
. 'Are you sure you are using the correct region for '
. 'this bucket?',
$command,
['result' => $result]
);
}
return $result;
}
);
}
}

160
aws/Aws/S3/PostObject.php Normal file
View file

@ -0,0 +1,160 @@
<?php
namespace Aws\S3;
use Aws\Credentials\CredentialsInterface;
use GuzzleHttp\Psr7\Uri;
/**
* @deprecated
*/
class PostObject
{
private $client;
private $bucket;
private $formAttributes;
private $formInputs;
private $jsonPolicy;
/**
* Constructs the PostObject.
*
* @param S3ClientInterface $client Client used with the POST object
* @param string $bucket Bucket to use
* @param array $formInputs Associative array of form input
* fields.
* @param string|array $jsonPolicy JSON encoded POST policy document.
* The policy will be base64 encoded
* and applied to the form on your
* behalf.
*/
public function __construct(
S3ClientInterface $client,
$bucket,
array $formInputs,
$jsonPolicy
) {
$this->client = $client;
$this->bucket = $bucket;
if (is_array($jsonPolicy)) {
$jsonPolicy = json_encode($jsonPolicy);
}
$this->jsonPolicy = $jsonPolicy;
$this->formAttributes = [
'action' => $this->generateUri(),
'method' => 'POST',
'enctype' => 'multipart/form-data'
];
$this->formInputs = $formInputs + ['key' => '${filename}'];
$credentials = $client->getCredentials()->wait();
$this->formInputs += $this->getPolicyAndSignature($credentials);
}
/**
* Gets the S3 client.
*
* @return S3ClientInterface
*/
public function getClient()
{
return $this->client;
}
/**
* Gets the bucket name.
*
* @return string
*/
public function getBucket()
{
return $this->bucket;
}
/**
* Gets the form attributes as an array.
*
* @return array
*/
public function getFormAttributes()
{
return $this->formAttributes;
}
/**
* Set a form attribute.
*
* @param string $attribute Form attribute to set.
* @param string $value Value to set.
*/
public function setFormAttribute($attribute, $value)
{
$this->formAttributes[$attribute] = $value;
}
/**
* Gets the form inputs as an array.
*
* @return array
*/
public function getFormInputs()
{
return $this->formInputs;
}
/**
* Set a form input.
*
* @param string $field Field name to set
* @param string $value Value to set.
*/
public function setFormInput($field, $value)
{
$this->formInputs[$field] = $value;
}
/**
* Gets the raw JSON policy.
*
* @return string
*/
public function getJsonPolicy()
{
return $this->jsonPolicy;
}
private function generateUri()
{
$uri = new Uri($this->client->getEndpoint());
if ($this->client->getConfig('use_path_style_endpoint') === true
|| ($uri->getScheme() === 'https'
&& strpos($this->bucket, '.') !== false)
) {
// Use path-style URLs
$uri = $uri->withPath("/{$this->bucket}");
} else {
// Use virtual-style URLs
$uri = $uri->withHost($this->bucket . '.' . $uri->getHost());
}
return (string) $uri;
}
protected function getPolicyAndSignature(CredentialsInterface $creds)
{
$jsonPolicy64 = base64_encode($this->jsonPolicy);
return [
'AWSAccessKeyId' => $creds->getAccessKeyId(),
'policy' => $jsonPolicy64,
'signature' => base64_encode(hash_hmac(
'sha1',
$jsonPolicy64,
$creds->getSecretKey(),
true
))
];
}
}

195
aws/Aws/S3/PostObjectV4.php Normal file
View file

@ -0,0 +1,195 @@
<?php
namespace Aws\S3;
use Aws\Credentials\CredentialsInterface;
use GuzzleHttp\Psr7\Uri;
use Aws\Signature\SignatureTrait;
use Aws\Signature\SignatureV4 as SignatureV4;
use Aws\Api\TimestampShape as TimestampShape;
/**
* Encapsulates the logic for getting the data for an S3 object POST upload form
*
* @link http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html
* @link http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-post-example.html
*/
class PostObjectV4
{
use SignatureTrait;
private $client;
private $bucket;
private $formAttributes;
private $formInputs;
/**
* Constructs the PostObject.
*
* The options array accepts the following keys:
* @link http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
*
* @param S3ClientInterface $client Client used with the POST object
* @param string $bucket Bucket to use
* @param array $formInputs Associative array of form input
* fields.
* @param array $options Policy condition options
* @param mixed $expiration Upload expiration time value. By
* default: 1 hour valid period.
*/
public function __construct(
S3ClientInterface $client,
$bucket,
array $formInputs,
array $options = [],
$expiration = '+1 hours'
) {
$this->client = $client;
$this->bucket = $bucket;
// setup form attributes
$this->formAttributes = [
'action' => $this->generateUri(),
'method' => 'POST',
'enctype' => 'multipart/form-data'
];
$credentials = $this->client->getCredentials()->wait();
if ($securityToken = $credentials->getSecurityToken()) {
array_push($options, ['x-amz-security-token' => $securityToken]);
$formInputs['X-Amz-Security-Token'] = $securityToken;
}
// setup basic policy
$policy = [
'expiration' => TimestampShape::format($expiration, 'iso8601'),
'conditions' => $options,
];
// setup basic formInputs
$this->formInputs = $formInputs + ['key' => '${filename}'];
// finalize policy and signature
$this->formInputs += $this->getPolicyAndSignature(
$credentials,
$policy
);
}
/**
* Gets the S3 client.
*
* @return S3ClientInterface
*/
public function getClient()
{
return $this->client;
}
/**
* Gets the bucket name.
*
* @return string
*/
public function getBucket()
{
return $this->bucket;
}
/**
* Gets the form attributes as an array.
*
* @return array
*/
public function getFormAttributes()
{
return $this->formAttributes;
}
/**
* Set a form attribute.
*
* @param string $attribute Form attribute to set.
* @param string $value Value to set.
*/
public function setFormAttribute($attribute, $value)
{
$this->formAttributes[$attribute] = $value;
}
/**
* Gets the form inputs as an array.
*
* @return array
*/
public function getFormInputs()
{
return $this->formInputs;
}
/**
* Set a form input.
*
* @param string $field Field name to set
* @param string $value Value to set.
*/
public function setFormInput($field, $value)
{
$this->formInputs[$field] = $value;
}
private function generateUri()
{
$uri = new Uri($this->client->getEndpoint());
if ($this->client->getConfig('use_path_style_endpoint') === true
|| ($uri->getScheme() === 'https'
&& strpos($this->bucket, '.') !== false)
) {
// Use path-style URLs
$uri = $uri->withPath("/{$this->bucket}");
} else {
// Use virtual-style URLs if haven't been set up already
if (strpos($uri->getHost(), $this->bucket . '.') !== 0) {
$uri = $uri->withHost($this->bucket . '.' . $uri->getHost());
}
}
return (string) $uri;
}
protected function getPolicyAndSignature(
CredentialsInterface $credentials,
array $policy
){
$ldt = gmdate(SignatureV4::ISO8601_BASIC);
$sdt = substr($ldt, 0, 8);
$policy['conditions'][] = ['X-Amz-Date' => $ldt];
$region = $this->client->getRegion();
$scope = $this->createScope($sdt, $region, 's3');
$creds = "{$credentials->getAccessKeyId()}/$scope";
$policy['conditions'][] = ['X-Amz-Credential' => $creds];
$policy['conditions'][] = ['X-Amz-Algorithm' => "AWS4-HMAC-SHA256"];
$jsonPolicy64 = base64_encode(json_encode($policy));
$key = $this->getSigningKey(
$sdt,
$region,
's3',
$credentials->getSecretKey()
);
return [
'X-Amz-Credential' => $creds,
'X-Amz-Algorithm' => "AWS4-HMAC-SHA256",
'X-Amz-Date' => $ldt,
'Policy' => $jsonPolicy64,
'X-Amz-Signature' => bin2hex(
hash_hmac('sha256', $jsonPolicy64, $key, true)
),
];
}
}

View file

@ -0,0 +1,57 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Aws\ResultInterface;
use Psr\Http\Message\RequestInterface;
/**
* Injects ObjectURL into the result of the PutObject operation.
*
* @internal
*/
class PutObjectUrlMiddleware
{
/** @var callable */
private $nextHandler;
/**
* Create a middleware wrapper function.
*
* @return callable
*/
public static function wrap()
{
return function (callable $handler) {
return new self($handler);
};
}
/**
* @param callable $nextHandler Next handler to invoke.
*/
public function __construct(callable $nextHandler)
{
$this->nextHandler = $nextHandler;
}
public function __invoke(CommandInterface $command, RequestInterface $request = null)
{
$next = $this->nextHandler;
return $next($command, $request)->then(
function (ResultInterface $result) use ($command) {
$name = $command->getName();
switch ($name) {
case 'PutObject':
case 'CopyObject':
$result['ObjectURL'] = $result['@metadata']['effectiveUri'];
break;
case 'CompleteMultipartUpload':
$result['ObjectURL'] = $result['Location'];
break;
}
return $result;
}
);
}
}

View file

@ -0,0 +1,48 @@
<?php
namespace Aws\S3;
use Aws\Api\Parser\AbstractParser;
use Aws\Api\Parser\Exception\ParserException;
use Aws\CommandInterface;
use Aws\Exception\AwsException;
use Psr\Http\Message\ResponseInterface;
/**
* Converts malformed responses to a retryable error type.
*
* @internal
*/
class RetryableMalformedResponseParser extends AbstractParser
{
/** @var callable */
private $parser;
/** @var string */
private $exceptionClass;
public function __construct(
callable $parser,
$exceptionClass = AwsException::class
) {
$this->parser = $parser;
$this->exceptionClass = $exceptionClass;
}
public function __invoke(
CommandInterface $command,
ResponseInterface $response
) {
$fn = $this->parser;
try {
return $fn($command, $response);
} catch (ParserException $e) {
throw new $this->exceptionClass(
"Error parsing response for {$command->getName()}:"
. " AWS parsing error: {$e->getMessage()}",
$command,
['connection_error' => true, 'exception' => $e],
$e
);
}
}
}

609
aws/Aws/S3/S3Client.php Normal file
View file

@ -0,0 +1,609 @@
<?php
namespace Aws\S3;
use Aws\Api\ApiProvider;
use Aws\Api\DocModel;
use Aws\Api\Service;
use Aws\AwsClient;
use Aws\ClientResolver;
use Aws\Command;
use Aws\Exception\AwsException;
use Aws\HandlerList;
use Aws\Middleware;
use Aws\RetryMiddleware;
use Aws\ResultInterface;
use Aws\CommandInterface;
use GuzzleHttp\Exception\RequestException;
use GuzzleHttp\Promise;
use GuzzleHttp\Psr7;
use Psr\Http\Message\RequestInterface;
/**
* Client used to interact with **Amazon Simple Storage Service (Amazon S3)**.
*
* @method \Aws\Result abortMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise abortMultipartUploadAsync(array $args = [])
* @method \Aws\Result completeMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise completeMultipartUploadAsync(array $args = [])
* @method \Aws\Result copyObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise copyObjectAsync(array $args = [])
* @method \Aws\Result createBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise createBucketAsync(array $args = [])
* @method \Aws\Result createMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise createMultipartUploadAsync(array $args = [])
* @method \Aws\Result deleteBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketAsync(array $args = [])
* @method \Aws\Result deleteBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketCorsAsync(array $args = [])
* @method \Aws\Result deleteBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketEncryptionAsync(array $args = [])
* @method \Aws\Result deleteBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketLifecycleAsync(array $args = [])
* @method \Aws\Result deleteBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketPolicyAsync(array $args = [])
* @method \Aws\Result deleteBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketReplicationAsync(array $args = [])
* @method \Aws\Result deleteBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketTaggingAsync(array $args = [])
* @method \Aws\Result deleteBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketWebsiteAsync(array $args = [])
* @method \Aws\Result deleteObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectAsync(array $args = [])
* @method \Aws\Result deleteObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectTaggingAsync(array $args = [])
* @method \Aws\Result deleteObjects(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectsAsync(array $args = [])
* @method \Aws\Result getBucketAccelerateConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAccelerateConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAclAsync(array $args = [])
* @method \Aws\Result getBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketCorsAsync(array $args = [])
* @method \Aws\Result getBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketEncryptionAsync(array $args = [])
* @method \Aws\Result getBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLifecycleAsync(array $args = [])
* @method \Aws\Result getBucketLifecycleConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLifecycleConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketLocation(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLocationAsync(array $args = [])
* @method \Aws\Result getBucketLogging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLoggingAsync(array $args = [])
* @method \Aws\Result getBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketNotification(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketNotificationAsync(array $args = [])
* @method \Aws\Result getBucketNotificationConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketNotificationConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketPolicyAsync(array $args = [])
* @method \Aws\Result getBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketReplicationAsync(array $args = [])
* @method \Aws\Result getBucketRequestPayment(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketRequestPaymentAsync(array $args = [])
* @method \Aws\Result getBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketTaggingAsync(array $args = [])
* @method \Aws\Result getBucketVersioning(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketVersioningAsync(array $args = [])
* @method \Aws\Result getBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketWebsiteAsync(array $args = [])
* @method \Aws\Result getObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectAsync(array $args = [])
* @method \Aws\Result getObjectAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectAclAsync(array $args = [])
* @method \Aws\Result getObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectTaggingAsync(array $args = [])
* @method \Aws\Result getObjectTorrent(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectTorrentAsync(array $args = [])
* @method \Aws\Result headBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise headBucketAsync(array $args = [])
* @method \Aws\Result headObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise headObjectAsync(array $args = [])
* @method \Aws\Result listBucketAnalyticsConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketAnalyticsConfigurationsAsync(array $args = [])
* @method \Aws\Result listBucketInventoryConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketInventoryConfigurationsAsync(array $args = [])
* @method \Aws\Result listBucketMetricsConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketMetricsConfigurationsAsync(array $args = [])
* @method \Aws\Result listBuckets(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketsAsync(array $args = [])
* @method \Aws\Result listMultipartUploads(array $args = [])
* @method \GuzzleHttp\Promise\Promise listMultipartUploadsAsync(array $args = [])
* @method \Aws\Result listObjectVersions(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectVersionsAsync(array $args = [])
* @method \Aws\Result listObjects(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectsAsync(array $args = [])
* @method \Aws\Result listObjectsV2(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectsV2Async(array $args = [])
* @method \Aws\Result listParts(array $args = [])
* @method \GuzzleHttp\Promise\Promise listPartsAsync(array $args = [])
* @method \Aws\Result putBucketAccelerateConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAccelerateConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAclAsync(array $args = [])
* @method \Aws\Result putBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketCorsAsync(array $args = [])
* @method \Aws\Result putBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketEncryptionAsync(array $args = [])
* @method \Aws\Result putBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLifecycleAsync(array $args = [])
* @method \Aws\Result putBucketLifecycleConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLifecycleConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketLogging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLoggingAsync(array $args = [])
* @method \Aws\Result putBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketNotification(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketNotificationAsync(array $args = [])
* @method \Aws\Result putBucketNotificationConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketNotificationConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketPolicyAsync(array $args = [])
* @method \Aws\Result putBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketReplicationAsync(array $args = [])
* @method \Aws\Result putBucketRequestPayment(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketRequestPaymentAsync(array $args = [])
* @method \Aws\Result putBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketTaggingAsync(array $args = [])
* @method \Aws\Result putBucketVersioning(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketVersioningAsync(array $args = [])
* @method \Aws\Result putBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketWebsiteAsync(array $args = [])
* @method \Aws\Result putObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectAsync(array $args = [])
* @method \Aws\Result putObjectAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectAclAsync(array $args = [])
* @method \Aws\Result putObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectTaggingAsync(array $args = [])
* @method \Aws\Result restoreObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise restoreObjectAsync(array $args = [])
* @method \Aws\Result uploadPart(array $args = [])
* @method \GuzzleHttp\Promise\Promise uploadPartAsync(array $args = [])
* @method \Aws\Result uploadPartCopy(array $args = [])
* @method \GuzzleHttp\Promise\Promise uploadPartCopyAsync(array $args = [])
*/
class S3Client extends AwsClient implements S3ClientInterface
{
use S3ClientTrait;
public static function getArguments()
{
$args = parent::getArguments();
$args['retries']['fn'] = [__CLASS__, '_applyRetryConfig'];
$args['api_provider']['fn'] = [__CLASS__, '_applyApiProvider'];
return $args + [
'bucket_endpoint' => [
'type' => 'config',
'valid' => ['bool'],
'doc' => 'Set to true to send requests to a hardcoded '
. 'bucket endpoint rather than create an endpoint as a '
. 'result of injecting the bucket into the URL. This '
. 'option is useful for interacting with CNAME endpoints.',
],
'use_accelerate_endpoint' => [
'type' => 'config',
'valid' => ['bool'],
'doc' => 'Set to true to send requests to an S3 Accelerate'
. ' endpoint by default. Can be enabled or disabled on'
. ' individual operations by setting'
. ' \'@use_accelerate_endpoint\' to true or false. Note:'
. ' you must enable S3 Accelerate on a bucket before it can'
. ' be accessed via an Accelerate endpoint.',
'default' => false,
],
'use_dual_stack_endpoint' => [
'type' => 'config',
'valid' => ['bool'],
'doc' => 'Set to true to send requests to an S3 Dual Stack'
. ' endpoint by default, which enables IPv6 Protocol.'
. ' Can be enabled or disabled on individual operations by setting'
. ' \'@use_dual_stack_endpoint\' to true or false.',
'default' => false,
],
'use_path_style_endpoint' => [
'type' => 'config',
'valid' => ['bool'],
'doc' => 'Set to true to send requests to an S3 path style'
. ' endpoint by default.'
. ' Can be enabled or disabled on individual operations by setting'
. ' \'@use_path_style_endpoint\' to true or false.',
'default' => false,
],
];
}
/**
* {@inheritdoc}
*
* In addition to the options available to
* {@see Aws\AwsClient::__construct}, S3Client accepts the following
* options:
*
* - bucket_endpoint: (bool) Set to true to send requests to a
* hardcoded bucket endpoint rather than create an endpoint as a result
* of injecting the bucket into the URL. This option is useful for
* interacting with CNAME endpoints.
* - calculate_md5: (bool) Set to false to disable calculating an MD5
* for all Amazon S3 signed uploads.
* - use_accelerate_endpoint: (bool) Set to true to send requests to an S3
* Accelerate endpoint by default. Can be enabled or disabled on
* individual operations by setting '@use_accelerate_endpoint' to true or
* false. Note: you must enable S3 Accelerate on a bucket before it can be
* accessed via an Accelerate endpoint.
* - use_dual_stack_endpoint: (bool) Set to true to send requests to an S3
* Dual Stack endpoint by default, which enables IPv6 Protocol.
* Can be enabled or disabled on individual operations by setting
* '@use_dual_stack_endpoint\' to true or false. Note:
* you cannot use it together with an accelerate endpoint.
* - use_path_style_endpoint: (bool) Set to true to send requests to an S3
* path style endpoint by default.
* Can be enabled or disabled on individual operations by setting
* '@use_path_style_endpoint\' to true or false. Note:
* you cannot use it together with an accelerate endpoint.
*
* @param array $args
*/
public function __construct(array $args)
{
parent::__construct($args);
$stack = $this->getHandlerList();
$stack->appendInit(SSECMiddleware::wrap($this->getEndpoint()->getScheme()), 's3.ssec');
$stack->appendBuild(ApplyChecksumMiddleware::wrap(), 's3.checksum');
$stack->appendBuild(
Middleware::contentType(['PutObject', 'UploadPart']),
's3.content_type'
);
// Use the bucket style middleware when using a "bucket_endpoint" (for cnames)
if ($this->getConfig('bucket_endpoint')) {
$stack->appendBuild(BucketEndpointMiddleware::wrap(), 's3.bucket_endpoint');
} else {
$stack->appendBuild(
S3EndpointMiddleware::wrap(
$this->getRegion(),
[
'dual_stack' => $this->getConfig('use_dual_stack_endpoint'),
'accelerate' => $this->getConfig('use_accelerate_endpoint'),
'path_style' => $this->getConfig('use_path_style_endpoint')
]
),
's3.endpoint_middleware'
);
}
$stack->appendSign(PutObjectUrlMiddleware::wrap(), 's3.put_object_url');
$stack->appendSign(PermanentRedirectMiddleware::wrap(), 's3.permanent_redirect');
$stack->appendInit(Middleware::sourceFile($this->getApi()), 's3.source_file');
$stack->appendInit($this->getSaveAsParameter(), 's3.save_as');
$stack->appendInit($this->getLocationConstraintMiddleware(), 's3.location');
$stack->appendInit($this->getEncodingTypeMiddleware(), 's3.auto_encode');
$stack->appendInit($this->getHeadObjectMiddleware(), 's3.head_object');
}
/**
* Determine if a string is a valid name for a DNS compatible Amazon S3
* bucket.
*
* DNS compatible bucket names can be used as a subdomain in a URL (e.g.,
* "<bucket>.s3.amazonaws.com").
*
* @param string $bucket Bucket name to check.
*
* @return bool
*/
public static function isBucketDnsCompatible($bucket)
{
$bucketLen = strlen($bucket);
return ($bucketLen >= 3 && $bucketLen <= 63) &&
// Cannot look like an IP address
!filter_var($bucket, FILTER_VALIDATE_IP) &&
preg_match('/^[a-z0-9]([a-z0-9\-\.]*[a-z0-9])?$/', $bucket);
}
public function createPresignedRequest(CommandInterface $command, $expires)
{
$command = clone $command;
$command->getHandlerList()->remove('signer');
/** @var \Aws\Signature\SignatureInterface $signer */
$signer = call_user_func(
$this->getSignatureProvider(),
$this->getConfig('signature_version'),
$this->getConfig('signing_name'),
$this->getConfig('signing_region')
);
return $signer->presign(
\Aws\serialize($command),
$this->getCredentials()->wait(),
$expires
);
}
public function getObjectUrl($bucket, $key)
{
$command = $this->getCommand('GetObject', [
'Bucket' => $bucket,
'Key' => $key
]);
return (string) \Aws\serialize($command)->getUri();
}
/**
* Raw URL encode a key and allow for '/' characters
*
* @param string $key Key to encode
*
* @return string Returns the encoded key
*/
public static function encodeKey($key)
{
return str_replace('%2F', '/', rawurlencode($key));
}
/**
* Provides a middleware that removes the need to specify LocationConstraint on CreateBucket.
*
* @return \Closure
*/
private function getLocationConstraintMiddleware()
{
$region = $this->getRegion();
return static function (callable $handler) use ($region) {
return function (Command $command, $request = null) use ($handler, $region) {
if ($command->getName() === 'CreateBucket') {
$locationConstraint = isset($command['CreateBucketConfiguration']['LocationConstraint'])
? $command['CreateBucketConfiguration']['LocationConstraint']
: null;
if ($locationConstraint === 'us-east-1') {
unset($command['CreateBucketConfiguration']);
} elseif ('us-east-1' !== $region && empty($locationConstraint)) {
$command['CreateBucketConfiguration'] = ['LocationConstraint' => $region];
}
}
return $handler($command, $request);
};
};
}
/**
* Provides a middleware that supports the `SaveAs` parameter.
*
* @return \Closure
*/
private function getSaveAsParameter()
{
return static function (callable $handler) {
return function (Command $command, $request = null) use ($handler) {
if ($command->getName() === 'GetObject' && isset($command['SaveAs'])) {
$command['@http']['sink'] = $command['SaveAs'];
unset($command['SaveAs']);
}
return $handler($command, $request);
};
};
}
/**
* Provides a middleware that disables content decoding on HeadObject
* commands.
*
* @return \Closure
*/
private function getHeadObjectMiddleware()
{
return static function (callable $handler) {
return function (
CommandInterface $command,
RequestInterface $request = null
) use ($handler) {
if ($command->getName() === 'HeadObject'
&& !isset($command['@http']['decode_content'])
) {
$command['@http']['decode_content'] = false;
}
return $handler($command, $request);
};
};
}
/**
* Provides a middleware that autopopulates the EncodingType parameter on
* ListObjects commands.
*
* @return \Closure
*/
private function getEncodingTypeMiddleware()
{
return static function (callable $handler) {
return function (Command $command, $request = null) use ($handler) {
$autoSet = false;
if ($command->getName() === 'ListObjects'
&& empty($command['EncodingType'])
) {
$command['EncodingType'] = 'url';
$autoSet = true;
}
return $handler($command, $request)
->then(function (ResultInterface $result) use ($autoSet) {
if ($result['EncodingType'] === 'url' && $autoSet) {
static $topLevel = [
'Delimiter',
'Marker',
'NextMarker',
'Prefix',
];
static $nested = [
['Contents', 'Key'],
['CommonPrefixes', 'Prefix'],
];
foreach ($topLevel as $key) {
if (isset($result[$key])) {
$result[$key] = urldecode($result[$key]);
}
}
foreach ($nested as $steps) {
if (isset($result[$steps[0]])) {
foreach ($result[$steps[0]] as $key => $part) {
if (isset($part[$steps[1]])) {
$result[$steps[0]][$key][$steps[1]]
= urldecode($part[$steps[1]]);
}
}
}
}
}
return $result;
});
};
};
}
/** @internal */
public static function _applyRetryConfig($value, $_, HandlerList $list)
{
if (!$value) {
return;
}
$decider = RetryMiddleware::createDefaultDecider($value);
$decider = function ($retries, $command, $request, $result, $error) use ($decider, $value) {
$maxRetries = null !== $command['@retries']
? $command['@retries']
: $value;
if ($decider($retries, $command, $request, $result, $error)) {
return true;
} elseif ($error instanceof AwsException
&& $retries < $maxRetries
) {
if (
$error->getResponse()
&& $error->getResponse()->getStatusCode() >= 400
) {
return strpos(
$error->getResponse()->getBody(),
'Your socket connection to the server'
) !== false;
} elseif ($error->getPrevious() instanceof RequestException) {
// All commands except CompleteMultipartUpload are
// idempotent and may be retried without worry if a
// networking error has occurred.
return $command->getName() !== 'CompleteMultipartUpload';
}
}
return false;
};
$delay = [RetryMiddleware::class, 'exponentialDelay'];
$list->appendSign(Middleware::retry($decider, $delay), 'retry');
}
/** @internal */
public static function _applyApiProvider($value, array &$args, HandlerList $list)
{
ClientResolver::_apply_api_provider($value, $args, $list);
$args['parser'] = new GetBucketLocationParser(
new AmbiguousSuccessParser(
new RetryableMalformedResponseParser(
$args['parser'],
$args['exception_class']
),
$args['error_parser'],
$args['exception_class']
)
);
}
/**
* @internal
* @codeCoverageIgnore
*/
public static function applyDocFilters(array $api, array $docs)
{
$b64 = '<div class="alert alert-info">This value will be base64 encoded on your behalf.</div>';
$opt = '<div class="alert alert-info">This value will be computed for you it is not supplied.</div>';
// Add the SourceFile parameter.
$docs['shapes']['SourceFile']['base'] = 'The path to a file on disk to use instead of the Body parameter.';
$api['shapes']['SourceFile'] = ['type' => 'string'];
$api['shapes']['PutObjectRequest']['members']['SourceFile'] = ['shape' => 'SourceFile'];
$api['shapes']['UploadPartRequest']['members']['SourceFile'] = ['shape' => 'SourceFile'];
// Add the ContentSHA256 parameter.
$docs['shapes']['ContentSHA256']['base'] = 'A SHA256 hash of the body content of the request.';
$api['shapes']['ContentSHA256'] = ['type' => 'string'];
$api['shapes']['PutObjectRequest']['members']['ContentSHA256'] = ['shape' => 'ContentSHA256'];
$api['shapes']['UploadPartRequest']['members']['ContentSHA256'] = ['shape' => 'ContentSHA256'];
unset($api['shapes']['PutObjectRequest']['members']['ContentMD5']);
unset($api['shapes']['UploadPartRequest']['members']['ContentMD5']);
$docs['shapes']['ContentSHA256']['append'] = $opt;
// Add the SaveAs parameter.
$docs['shapes']['SaveAs']['base'] = 'The path to a file on disk to save the object data.';
$api['shapes']['SaveAs'] = ['type' => 'string'];
$api['shapes']['GetObjectRequest']['members']['SaveAs'] = ['shape' => 'SaveAs'];
// Several SSECustomerKey documentation updates.
$docs['shapes']['SSECustomerKey']['append'] = $b64;
$docs['shapes']['CopySourceSSECustomerKey']['append'] = $b64;
$docs['shapes']['SSECustomerKeyMd5']['append'] = $opt;
// Add the ObjectURL to various output shapes and documentation.
$docs['shapes']['ObjectURL']['base'] = 'The URI of the created object.';
$api['shapes']['ObjectURL'] = ['type' => 'string'];
$api['shapes']['PutObjectOutput']['members']['ObjectURL'] = ['shape' => 'ObjectURL'];
$api['shapes']['CopyObjectOutput']['members']['ObjectURL'] = ['shape' => 'ObjectURL'];
$api['shapes']['CompleteMultipartUploadOutput']['members']['ObjectURL'] = ['shape' => 'ObjectURL'];
// Fix references to Location Constraint.
unset($api['shapes']['CreateBucketRequest']['payload']);
$api['shapes']['BucketLocationConstraint']['enum'] = [
"ap-northeast-1",
"ap-southeast-2",
"ap-southeast-1",
"cn-north-1",
"eu-central-1",
"eu-west-1",
"us-east-1",
"us-west-1",
"us-west-2",
"sa-east-1",
];
// Add a note that the ContentMD5 is optional.
$docs['shapes']['ContentMD5']['append'] = '<div class="alert alert-info">The value will be computed on '
. 'your behalf.</div>';
return [
new Service($api, ApiProvider::defaultProvider()),
new DocModel($docs)
];
}
}

View file

@ -0,0 +1,322 @@
<?php
namespace Aws\S3;
use Aws\AwsClientInterface;
use Aws\CommandInterface;
use Aws\ResultInterface;
use GuzzleHttp\Promise\PromiseInterface;
use Psr\Http\Message\RequestInterface;
interface S3ClientInterface extends AwsClientInterface
{
/**
* Create a pre-signed URL for the given S3 command object.
*
* @param CommandInterface $command Command to create a pre-signed
* URL for.
* @param int|string|\DateTime $expires The time at which the URL should
* expire. This can be a Unix
* timestamp, a PHP DateTime object,
* or a string that can be evaluated
* by strtotime().
*
* @return RequestInterface
*/
public function createPresignedRequest(CommandInterface $command, $expires);
/**
* Returns the URL to an object identified by its bucket and key.
*
* The URL returned by this method is not signed nor does it ensure the the
* bucket and key given to the method exist. If you need a signed URL, then
* use the {@see \Aws\S3\S3Client::createPresignedRequest} method and get
* the URI of the signed request.
*
* @param string $bucket The name of the bucket where the object is located
* @param string $key The key of the object
*
* @return string The URL to the object
*/
public function getObjectUrl($bucket, $key);
/**
* Determines whether or not a bucket exists by name.
*
* @param string $bucket The name of the bucket
*
* @return bool
*/
public function doesBucketExist($bucket);
/**
* Determines whether or not an object exists by name.
*
* @param string $bucket The name of the bucket
* @param string $key The key of the object
* @param array $options Additional options available in the HeadObject
* operation (e.g., VersionId).
*
* @return bool
*/
public function doesObjectExist($bucket, $key, array $options = []);
/**
* Register the Amazon S3 stream wrapper with this client instance.
*/
public function registerStreamWrapper();
/**
* Deletes objects from Amazon S3 that match the result of a ListObjects
* operation. For example, this allows you to do things like delete all
* objects that match a specific key prefix.
*
* @param string $bucket Bucket that contains the object keys
* @param string $prefix Optionally delete only objects under this key prefix
* @param string $regex Delete only objects that match this regex
* @param array $options Aws\S3\BatchDelete options array.
*
* @see Aws\S3\S3Client::listObjects
* @throws \RuntimeException if no prefix and no regex is given
*/
public function deleteMatchingObjects(
$bucket,
$prefix = '',
$regex = '',
array $options = []
);
/**
* Deletes objects from Amazon S3 that match the result of a ListObjects
* operation. For example, this allows you to do things like delete all
* objects that match a specific key prefix.
*
* @param string $bucket Bucket that contains the object keys
* @param string $prefix Optionally delete only objects under this key prefix
* @param string $regex Delete only objects that match this regex
* @param array $options Aws\S3\BatchDelete options array.
*
* @see Aws\S3\S3Client::listObjects
*
* @return PromiseInterface A promise that is settled when matching
* objects are deleted.
*/
public function deleteMatchingObjectsAsync(
$bucket,
$prefix = '',
$regex = '',
array $options = []
);
/**
* Upload a file, stream, or string to a bucket.
*
* If the upload size exceeds the specified threshold, the upload will be
* performed using concurrent multipart uploads.
*
* The options array accepts the following options:
*
* - before_upload: (callable) Callback to invoke before any upload
* operations during the upload process. The callback should have a
* function signature like `function (Aws\Command $command) {...}`.
* - concurrency: (int, default=int(3)) Maximum number of concurrent
* `UploadPart` operations allowed during a multipart upload.
* - mup_threshold: (int, default=int(16777216)) The size, in bytes, allowed
* before the upload must be sent via a multipart upload. Default: 16 MB.
* - params: (array, default=array([])) Custom parameters to use with the
* upload. For single uploads, they must correspond to those used for the
* `PutObject` operation. For multipart uploads, they correspond to the
* parameters of the `CreateMultipartUpload` operation.
* - part_size: (int) Part size to use when doing a multipart upload.
*
* @param string $bucket Bucket to upload the object.
* @param string $key Key of the object.
* @param mixed $body Object data to upload. Can be a
* StreamInterface, PHP stream resource, or a
* string of data to upload.
* @param string $acl ACL to apply to the object (default: private).
* @param array $options Options used to configure the upload process.
*
* @see Aws\S3\MultipartUploader for more info about multipart uploads.
* @return ResultInterface Returns the result of the upload.
*/
public function upload(
$bucket,
$key,
$body,
$acl = 'private',
array $options = []
);
/**
* Upload a file, stream, or string to a bucket asynchronously.
*
* @param string $bucket Bucket to upload the object.
* @param string $key Key of the object.
* @param mixed $body Object data to upload. Can be a
* StreamInterface, PHP stream resource, or a
* string of data to upload.
* @param string $acl ACL to apply to the object (default: private).
* @param array $options Options used to configure the upload process.
*
* @see self::upload
* @return PromiseInterface Returns a promise that will be fulfilled
* with the result of the upload.
*/
public function uploadAsync(
$bucket,
$key,
$body,
$acl = 'private',
array $options = []
);
/**
* Copy an object of any size to a different location.
*
* If the upload size exceeds the maximum allowable size for direct S3
* copying, a multipart copy will be used.
*
* The options array accepts the following options:
*
* - before_upload: (callable) Callback to invoke before any upload
* operations during the upload process. The callback should have a
* function signature like `function (Aws\Command $command) {...}`.
* - concurrency: (int, default=int(5)) Maximum number of concurrent
* `UploadPart` operations allowed during a multipart upload.
* - params: (array, default=array([])) Custom parameters to use with the
* upload. For single uploads, they must correspond to those used for the
* `CopyObject` operation. For multipart uploads, they correspond to the
* parameters of the `CreateMultipartUpload` operation.
* - part_size: (int) Part size to use when doing a multipart upload.
*
* @param string $fromBucket Bucket where the copy source resides.
* @param string $fromKey Key of the copy source.
* @param string $destBucket Bucket to which to copy the object.
* @param string $destKey Key to which to copy the object.
* @param string $acl ACL to apply to the copy (default: private).
* @param array $options Options used to configure the upload process.
*
* @see Aws\S3\MultipartCopy for more info about multipart uploads.
* @return ResultInterface Returns the result of the copy.
*/
public function copy(
$fromBucket,
$fromKey,
$destBucket,
$destKey,
$acl = 'private',
array $options = []
);
/**
* Copy an object of any size to a different location asynchronously.
*
* @param string $fromBucket Bucket where the copy source resides.
* @param string $fromKey Key of the copy source.
* @param string $destBucket Bucket to which to copy the object.
* @param string $destKey Key to which to copy the object.
* @param string $acl ACL to apply to the copy (default: private).
* @param array $options Options used to configure the upload process.
*
* @see self::copy for more info about the parameters above.
* @return PromiseInterface Returns a promise that will be fulfilled
* with the result of the copy.
*/
public function copyAsync(
$fromBucket,
$fromKey,
$destBucket,
$destKey,
$acl = 'private',
array $options = []
);
/**
* Recursively uploads all files in a given directory to a given bucket.
*
* @param string $directory Full path to a directory to upload
* @param string $bucket Name of the bucket
* @param string $keyPrefix Virtual directory key prefix to add to each upload
* @param array $options Options available in Aws\S3\Transfer::__construct
*
* @see Aws\S3\Transfer for more options and customization
*/
public function uploadDirectory(
$directory,
$bucket,
$keyPrefix = null,
array $options = []
);
/**
* Recursively uploads all files in a given directory to a given bucket.
*
* @param string $directory Full path to a directory to upload
* @param string $bucket Name of the bucket
* @param string $keyPrefix Virtual directory key prefix to add to each upload
* @param array $options Options available in Aws\S3\Transfer::__construct
*
* @see Aws\S3\Transfer for more options and customization
*
* @return PromiseInterface A promise that is settled when the upload is
* complete.
*/
public function uploadDirectoryAsync(
$directory,
$bucket,
$keyPrefix = null,
array $options = []
);
/**
* Downloads a bucket to the local filesystem
*
* @param string $directory Directory to download to
* @param string $bucket Bucket to download from
* @param string $keyPrefix Only download objects that use this key prefix
* @param array $options Options available in Aws\S3\Transfer::__construct
*/
public function downloadBucket(
$directory,
$bucket,
$keyPrefix = '',
array $options = []
);
/**
* Downloads a bucket to the local filesystem
*
* @param string $directory Directory to download to
* @param string $bucket Bucket to download from
* @param string $keyPrefix Only download objects that use this key prefix
* @param array $options Options available in Aws\S3\Transfer::__construct
*
* @return PromiseInterface A promise that is settled when the download is
* complete.
*/
public function downloadBucketAsync(
$directory,
$bucket,
$keyPrefix = '',
array $options = []
);
/**
* Returns the region in which a given bucket is located.
*
* @param string $bucketName
*
* @return string
*/
public function determineBucketRegion($bucketName);
/**
* Returns a promise fulfilled with the region in which a given bucket is
* located.
*
* @param string $bucketName
*
* @return PromiseInterface
*/
public function determineBucketRegionAsync($bucketName);
}

View file

@ -0,0 +1,322 @@
<?php
namespace Aws\S3;
use Aws\Api\Parser\PayloadParserTrait;
use Aws\CommandInterface;
use Aws\Exception\AwsException;
use Aws\HandlerList;
use Aws\ResultInterface;
use Aws\S3\Exception\S3Exception;
use GuzzleHttp\Promise\PromiseInterface;
use GuzzleHttp\Promise\RejectedPromise;
/**
* A trait providing S3-specific functionality. This is meant to be used in
* classes implementing \Aws\S3\S3ClientInterface
*/
trait S3ClientTrait
{
use PayloadParserTrait;
/**
* @see S3ClientInterface::upload()
*/
public function upload(
$bucket,
$key,
$body,
$acl = 'private',
array $options = []
) {
return $this
->uploadAsync($bucket, $key, $body, $acl, $options)
->wait();
}
/**
* @see S3ClientInterface::uploadAsync()
*/
public function uploadAsync(
$bucket,
$key,
$body,
$acl = 'private',
array $options = []
) {
return (new ObjectUploader($this, $bucket, $key, $body, $acl, $options))
->promise();
}
/**
* @see S3ClientInterface::copy()
*/
public function copy(
$fromB,
$fromK,
$destB,
$destK,
$acl = 'private',
array $opts = []
) {
return $this->copyAsync($fromB, $fromK, $destB, $destK, $acl, $opts)
->wait();
}
/**
* @see S3ClientInterface::copyAsync()
*/
public function copyAsync(
$fromB,
$fromK,
$destB,
$destK,
$acl = 'private',
array $opts = []
) {
$source = [
'Bucket' => $fromB,
'Key' => $fromK,
];
if (isset($opts['version_id'])) {
$source['VersionId'] = $opts['version_id'];
}
$destination = [
'Bucket' => $destB,
'Key' => $destK
];
return (new ObjectCopier($this, $source, $destination, $acl, $opts))
->promise();
}
/**
* @see S3ClientInterface::registerStreamWrapper()
*/
public function registerStreamWrapper()
{
StreamWrapper::register($this);
}
/**
* @see S3ClientInterface::deleteMatchingObjects()
*/
public function deleteMatchingObjects(
$bucket,
$prefix = '',
$regex = '',
array $options = []
) {
$this->deleteMatchingObjectsAsync($bucket, $prefix, $regex, $options)
->wait();
}
/**
* @see S3ClientInterface::deleteMatchingObjectsAsync()
*/
public function deleteMatchingObjectsAsync(
$bucket,
$prefix = '',
$regex = '',
array $options = []
) {
if (!$prefix && !$regex) {
return new RejectedPromise(
new \RuntimeException('A prefix or regex is required.')
);
}
$params = ['Bucket' => $bucket, 'Prefix' => $prefix];
$iter = $this->getIterator('ListObjects', $params);
if ($regex) {
$iter = \Aws\filter($iter, function ($c) use ($regex) {
return preg_match($regex, $c['Key']);
});
}
return BatchDelete::fromIterator($this, $bucket, $iter, $options)
->promise();
}
/**
* @see S3ClientInterface::uploadDirectory()
*/
public function uploadDirectory(
$directory,
$bucket,
$keyPrefix = null,
array $options = []
) {
$this->uploadDirectoryAsync($directory, $bucket, $keyPrefix, $options)
->wait();
}
/**
* @see S3ClientInterface::uploadDirectoryAsync()
*/
public function uploadDirectoryAsync(
$directory,
$bucket,
$keyPrefix = null,
array $options = []
) {
$d = "s3://$bucket" . ($keyPrefix ? '/' . ltrim($keyPrefix, '/') : '');
return (new Transfer($this, $directory, $d, $options))->promise();
}
/**
* @see S3ClientInterface::downloadBucket()
*/
public function downloadBucket(
$directory,
$bucket,
$keyPrefix = '',
array $options = []
) {
$this->downloadBucketAsync($directory, $bucket, $keyPrefix, $options)
->wait();
}
/**
* @see S3ClientInterface::downloadBucketAsync()
*/
public function downloadBucketAsync(
$directory,
$bucket,
$keyPrefix = '',
array $options = []
) {
$s = "s3://$bucket" . ($keyPrefix ? '/' . ltrim($keyPrefix, '/') : '');
return (new Transfer($this, $s, $directory, $options))->promise();
}
/**
* @see S3ClientInterface::determineBucketRegion()
*/
public function determineBucketRegion($bucketName)
{
return $this->determineBucketRegionAsync($bucketName)->wait();
}
/**
* @see S3ClientInterface::determineBucketRegionAsync()
*
* @param string $bucketName
*
* @return PromiseInterface
*/
public function determineBucketRegionAsync($bucketName)
{
$command = $this->getCommand('HeadBucket', ['Bucket' => $bucketName]);
$handlerList = clone $this->getHandlerList();
$handlerList->remove('s3.permanent_redirect');
$handlerList->remove('signer');
$handler = $handlerList->resolve();
return $handler($command)
->then(static function (ResultInterface $result) {
return $result['@metadata']['headers']['x-amz-bucket-region'];
}, function (AwsException $e) {
$response = $e->getResponse();
if ($response === null) {
throw $e;
}
if ($e->getAwsErrorCode() === 'AuthorizationHeaderMalformed') {
$region = $this->determineBucketRegionFromExceptionBody(
$response->getBody()
);
if (!empty($region)) {
return $region;
}
throw $e;
}
return $response->getHeaderLine('x-amz-bucket-region');
});
}
private function determineBucketRegionFromExceptionBody($responseBody)
{
try {
$element = $this->parseXml($responseBody);
if (!empty($element->Region)) {
return (string)$element->Region;
}
} catch (\Exception $e) {
// Fallthrough on exceptions from parsing
}
return false;
}
/**
* @see S3ClientInterface::doesBucketExist()
*/
public function doesBucketExist($bucket)
{
return $this->checkExistenceWithCommand(
$this->getCommand('HeadBucket', ['Bucket' => $bucket])
);
}
/**
* @see S3ClientInterface::doesObjectExist()
*/
public function doesObjectExist($bucket, $key, array $options = [])
{
return $this->checkExistenceWithCommand(
$this->getCommand('HeadObject', [
'Bucket' => $bucket,
'Key' => $key
] + $options)
);
}
/**
* Determines whether or not a resource exists using a command
*
* @param CommandInterface $command Command used to poll for the resource
*
* @return bool
* @throws S3Exception|\Exception if there is an unhandled exception
*/
private function checkExistenceWithCommand(CommandInterface $command)
{
try {
$this->execute($command);
return true;
} catch (S3Exception $e) {
if ($e->getAwsErrorCode() == 'AccessDenied') {
return true;
}
if ($e->getStatusCode() >= 500) {
throw $e;
}
return false;
}
}
/**
* @see S3ClientInterface::execute()
*/
abstract public function execute(CommandInterface $command);
/**
* @see S3ClientInterface::getCommand()
*/
abstract public function getCommand($name, array $args = []);
/**
* @see S3ClientInterface::getHandlerList()
*
* @return HandlerList
*/
abstract public function getHandlerList();
/**
* @see S3ClientInterface::getIterator()
*
* @return \Iterator
*/
abstract public function getIterator($name, array $args = []);
}

View file

@ -0,0 +1,229 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Aws\S3\Exception\S3Exception;
use Psr\Http\Message\RequestInterface;
/**
* Used to update the URL used for S3 requests to support:
* S3 Accelerate, S3 DualStack or Both. It will build to
* host style paths unless specified, including for S3
* DualStack.
*
* IMPORTANT: this middleware must be added after the "build" step.
*
* @internal
*/
class S3EndpointMiddleware
{
private static $exclusions = [
'CreateBucket' => true,
'DeleteBucket' => true,
'ListBuckets' => true,
];
const NO_PATTERN = 0;
const DUALSTACK = 1;
const ACCELERATE = 2;
const ACCELERATE_DUALSTACK = 3;
const PATH_STYLE = 4;
const HOST_STYLE = 5;
/** @var bool */
private $accelerateByDefault;
/** @var bool */
private $dualStackByDefault;
/** @var bool */
private $pathStyleByDefault;
/** @var string */
private $region;
/** @var callable */
private $nextHandler;
/**
* Create a middleware wrapper function
*
* @param string $region
* @param array $options
*
* @return callable
*/
public static function wrap($region, array $options)
{
return function (callable $handler) use ($region, $options) {
return new self($handler, $region, $options);
};
}
public function __construct(
callable $nextHandler,
$region,
array $options
) {
$this->pathStyleByDefault = isset($options['path_style'])
? (bool) $options['path_style'] : false;
$this->dualStackByDefault = isset($options['dual_stack'])
? (bool) $options['dual_stack'] : false;
$this->accelerateByDefault = isset($options['accelerate'])
? (bool) $options['accelerate'] : false;
$this->region = (string) $region;
$this->nextHandler = $nextHandler;
}
public function __invoke(CommandInterface $command, RequestInterface $request)
{
switch ($this->endpointPatternDecider($command, $request)) {
case self::HOST_STYLE:
$request = $this->applyHostStyleEndpoint($command, $request);
break;
case self::NO_PATTERN:
case self::PATH_STYLE:
break;
case self::DUALSTACK:
$request = $this->applyDualStackEndpoint($command, $request);
break;
case self::ACCELERATE:
$request = $this->applyAccelerateEndpoint(
$command,
$request,
's3-accelerate'
);
break;
case self::ACCELERATE_DUALSTACK:
$request = $this->applyAccelerateEndpoint(
$command,
$request,
's3-accelerate.dualstack'
);
break;
}
$nextHandler = $this->nextHandler;
return $nextHandler($command, $request);
}
private static function isRequestHostStyleCompatible(
CommandInterface $command,
RequestInterface $request
) {
return S3Client::isBucketDnsCompatible($command['Bucket'])
&& (
$request->getUri()->getScheme() === 'http'
|| strpos($command['Bucket'], '.') === false
);
}
private function endpointPatternDecider(
CommandInterface $command,
RequestInterface $request
) {
$accelerate = isset($command['@use_accelerate_endpoint'])
? $command['@use_accelerate_endpoint'] : $this->accelerateByDefault;
$dualStack = isset($command['@use_dual_stack_endpoint'])
? $command['@use_dual_stack_endpoint'] : $this->dualStackByDefault;
$pathStyle = isset($command['@use_path_style_endpoint'])
? $command['@use_path_style_endpoint'] : $this->pathStyleByDefault;
if ($accelerate && $dualStack) {
// When try to enable both for operations excluded from s3-accelerate,
// only dualstack endpoints will be enabled.
return $this->canAccelerate($command)
? self::ACCELERATE_DUALSTACK
: self::DUALSTACK;
} elseif ($accelerate && $this->canAccelerate($command)) {
return self::ACCELERATE;
} elseif ($dualStack) {
return self::DUALSTACK;
} elseif (!$pathStyle
&& self::isRequestHostStyleCompatible($command, $request)
) {
return self::HOST_STYLE;
} else {
return self::PATH_STYLE;
}
}
private function canAccelerate(CommandInterface $command)
{
return empty(self::$exclusions[$command->getName()])
&& S3Client::isBucketDnsCompatible($command['Bucket']);
}
private function getBucketStyleHost(CommandInterface $command, $host)
{
// For operations on the base host (e.g. ListBuckets)
if (!isset($command['Bucket'])) {
return $host;
}
return "{$command['Bucket']}.{$host}";
}
private function applyHostStyleEndpoint(
CommandInterface $command,
RequestInterface $request
) {
$uri = $request->getUri();
$request = $request->withUri(
$uri->withHost($this->getBucketStyleHost(
$command,
$uri->getHost()
))
->withPath($this->getBucketlessPath(
$uri->getPath(),
$command
))
);
return $request;
}
private function applyDualStackEndpoint(
CommandInterface $command,
RequestInterface $request
) {
$request = $request->withUri(
$request->getUri()
->withHost($this->getDualStackHost())
);
if (empty($command['@use_path_style_endpoint'])
&& !$this->pathStyleByDefault
&& self::isRequestHostStyleCompatible($command, $request)
) {
$request = $this->applyHostStyleEndpoint($command, $request);
}
return $request;
}
private function getDualStackHost()
{
return "s3.dualstack.{$this->region}.amazonaws.com";
}
private function applyAccelerateEndpoint(
CommandInterface $command,
RequestInterface $request,
$pattern
) {
$request = $request->withUri(
$request->getUri()
->withHost($this->getAccelerateHost($command, $pattern))
->withPath($this->getBucketlessPath(
$request->getUri()->getPath(),
$command
))
);
return $request;
}
private function getAccelerateHost(CommandInterface $command, $pattern)
{
return "{$command['Bucket']}.{$pattern}.amazonaws.com";
}
private function getBucketlessPath($path, CommandInterface $command)
{
$pattern = '/^\\/' . preg_quote($command['Bucket'], '/') . '/';
return preg_replace($pattern, '', $path) ?: '/';
}
}

View file

@ -0,0 +1,318 @@
<?php
namespace Aws\S3;
use Aws\Api\Parser\PayloadParserTrait;
use Aws\CacheInterface;
use Aws\CommandInterface;
use Aws\LruArrayCache;
use Aws\MultiRegionClient as BaseClient;
use Aws\Exception\AwsException;
use Aws\S3\Exception\PermanentRedirectException;
use GuzzleHttp\Promise;
/**
* **Amazon Simple Storage Service** multi-region client.
*
* @method \Aws\Result abortMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise abortMultipartUploadAsync(array $args = [])
* @method \Aws\Result completeMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise completeMultipartUploadAsync(array $args = [])
* @method \Aws\Result copyObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise copyObjectAsync(array $args = [])
* @method \Aws\Result createBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise createBucketAsync(array $args = [])
* @method \Aws\Result createMultipartUpload(array $args = [])
* @method \GuzzleHttp\Promise\Promise createMultipartUploadAsync(array $args = [])
* @method \Aws\Result deleteBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketAsync(array $args = [])
* @method \Aws\Result deleteBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketCorsAsync(array $args = [])
* @method \Aws\Result deleteBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketEncryptionAsync(array $args = [])
* @method \Aws\Result deleteBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketLifecycleAsync(array $args = [])
* @method \Aws\Result deleteBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result deleteBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketPolicyAsync(array $args = [])
* @method \Aws\Result deleteBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketReplicationAsync(array $args = [])
* @method \Aws\Result deleteBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketTaggingAsync(array $args = [])
* @method \Aws\Result deleteBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteBucketWebsiteAsync(array $args = [])
* @method \Aws\Result deleteObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectAsync(array $args = [])
* @method \Aws\Result deleteObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectTaggingAsync(array $args = [])
* @method \Aws\Result deleteObjects(array $args = [])
* @method \GuzzleHttp\Promise\Promise deleteObjectsAsync(array $args = [])
* @method \Aws\Result getBucketAccelerateConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAccelerateConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAclAsync(array $args = [])
* @method \Aws\Result getBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketCorsAsync(array $args = [])
* @method \Aws\Result getBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketEncryptionAsync(array $args = [])
* @method \Aws\Result getBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLifecycleAsync(array $args = [])
* @method \Aws\Result getBucketLifecycleConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLifecycleConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketLocation(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLocationAsync(array $args = [])
* @method \Aws\Result getBucketLogging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketLoggingAsync(array $args = [])
* @method \Aws\Result getBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketNotification(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketNotificationAsync(array $args = [])
* @method \Aws\Result getBucketNotificationConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketNotificationConfigurationAsync(array $args = [])
* @method \Aws\Result getBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketPolicyAsync(array $args = [])
* @method \Aws\Result getBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketReplicationAsync(array $args = [])
* @method \Aws\Result getBucketRequestPayment(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketRequestPaymentAsync(array $args = [])
* @method \Aws\Result getBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketTaggingAsync(array $args = [])
* @method \Aws\Result getBucketVersioning(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketVersioningAsync(array $args = [])
* @method \Aws\Result getBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise getBucketWebsiteAsync(array $args = [])
* @method \Aws\Result getObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectAsync(array $args = [])
* @method \Aws\Result getObjectAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectAclAsync(array $args = [])
* @method \Aws\Result getObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectTaggingAsync(array $args = [])
* @method \Aws\Result getObjectTorrent(array $args = [])
* @method \GuzzleHttp\Promise\Promise getObjectTorrentAsync(array $args = [])
* @method \Aws\Result headBucket(array $args = [])
* @method \GuzzleHttp\Promise\Promise headBucketAsync(array $args = [])
* @method \Aws\Result headObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise headObjectAsync(array $args = [])
* @method \Aws\Result listBucketAnalyticsConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketAnalyticsConfigurationsAsync(array $args = [])
* @method \Aws\Result listBucketInventoryConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketInventoryConfigurationsAsync(array $args = [])
* @method \Aws\Result listBucketMetricsConfigurations(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketMetricsConfigurationsAsync(array $args = [])
* @method \Aws\Result listBuckets(array $args = [])
* @method \GuzzleHttp\Promise\Promise listBucketsAsync(array $args = [])
* @method \Aws\Result listMultipartUploads(array $args = [])
* @method \GuzzleHttp\Promise\Promise listMultipartUploadsAsync(array $args = [])
* @method \Aws\Result listObjectVersions(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectVersionsAsync(array $args = [])
* @method \Aws\Result listObjects(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectsAsync(array $args = [])
* @method \Aws\Result listObjectsV2(array $args = [])
* @method \GuzzleHttp\Promise\Promise listObjectsV2Async(array $args = [])
* @method \Aws\Result listParts(array $args = [])
* @method \GuzzleHttp\Promise\Promise listPartsAsync(array $args = [])
* @method \Aws\Result putBucketAccelerateConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAccelerateConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAclAsync(array $args = [])
* @method \Aws\Result putBucketAnalyticsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketAnalyticsConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketCors(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketCorsAsync(array $args = [])
* @method \Aws\Result putBucketEncryption(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketEncryptionAsync(array $args = [])
* @method \Aws\Result putBucketInventoryConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketInventoryConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketLifecycle(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLifecycleAsync(array $args = [])
* @method \Aws\Result putBucketLifecycleConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLifecycleConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketLogging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketLoggingAsync(array $args = [])
* @method \Aws\Result putBucketMetricsConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketMetricsConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketNotification(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketNotificationAsync(array $args = [])
* @method \Aws\Result putBucketNotificationConfiguration(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketNotificationConfigurationAsync(array $args = [])
* @method \Aws\Result putBucketPolicy(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketPolicyAsync(array $args = [])
* @method \Aws\Result putBucketReplication(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketReplicationAsync(array $args = [])
* @method \Aws\Result putBucketRequestPayment(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketRequestPaymentAsync(array $args = [])
* @method \Aws\Result putBucketTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketTaggingAsync(array $args = [])
* @method \Aws\Result putBucketVersioning(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketVersioningAsync(array $args = [])
* @method \Aws\Result putBucketWebsite(array $args = [])
* @method \GuzzleHttp\Promise\Promise putBucketWebsiteAsync(array $args = [])
* @method \Aws\Result putObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectAsync(array $args = [])
* @method \Aws\Result putObjectAcl(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectAclAsync(array $args = [])
* @method \Aws\Result putObjectTagging(array $args = [])
* @method \GuzzleHttp\Promise\Promise putObjectTaggingAsync(array $args = [])
* @method \Aws\Result restoreObject(array $args = [])
* @method \GuzzleHttp\Promise\Promise restoreObjectAsync(array $args = [])
* @method \Aws\Result uploadPart(array $args = [])
* @method \GuzzleHttp\Promise\Promise uploadPartAsync(array $args = [])
* @method \Aws\Result uploadPartCopy(array $args = [])
* @method \GuzzleHttp\Promise\Promise uploadPartCopyAsync(array $args = [])
*/
class S3MultiRegionClient extends BaseClient implements S3ClientInterface
{
use S3ClientTrait;
/** @var CacheInterface */
private $cache;
public static function getArguments()
{
$args = parent::getArguments();
$regionDef = $args['region'] + ['default' => function (array &$args) {
$availableRegions = array_keys($args['partition']['regions']);
return end($availableRegions);
}];
unset($args['region']);
return $args + [
'bucket_region_cache' => [
'type' => 'config',
'valid' => [CacheInterface::class],
'doc' => 'Cache of regions in which given buckets are located.',
'default' => function () { return new LruArrayCache; },
],
'region' => $regionDef,
];
}
public function __construct(array $args)
{
parent::__construct($args);
$this->cache = $this->getConfig('bucket_region_cache');
$this->getHandlerList()->prependInit(
$this->determineRegionMiddleware(),
'determine_region'
);
}
private function determineRegionMiddleware()
{
return function (callable $handler) {
return function (CommandInterface $command) use ($handler) {
$cacheKey = $this->getCacheKey($command['Bucket']);
if (
empty($command['@region']) &&
$region = $this->cache->get($cacheKey)
) {
$command['@region'] = $region;
}
return Promise\coroutine(function () use (
$handler,
$command,
$cacheKey
) {
try {
yield $handler($command);
} catch (PermanentRedirectException $e) {
if (empty($command['Bucket'])) {
throw $e;
}
$result = $e->getResult();
$region = null;
if (isset($result['@metadata']['headers']['x-amz-bucket-region'])) {
$region = $result['@metadata']['headers']['x-amz-bucket-region'];
$this->cache->set($cacheKey, $region);
} else {
$region = (yield $this->determineBucketRegionAsync(
$command['Bucket']
));
}
$command['@region'] = $region;
yield $handler($command);
} catch (AwsException $e) {
if ($e->getAwsErrorCode() === 'AuthorizationHeaderMalformed') {
$region = $this->determineBucketRegionFromExceptionBody(
$e->getResponse()->getBody()
);
if (!empty($region)) {
$this->cache->set($cacheKey, $region);
$command['@region'] = $region;
yield $handler($command);
} else {
throw $e;
}
} else {
throw $e;
}
}
});
};
};
}
public function createPresignedRequest(CommandInterface $command, $expires)
{
if (empty($command['Bucket'])) {
throw new \InvalidArgumentException('The S3\\MultiRegionClient'
. ' cannot create presigned requests for commands without a'
. ' specified bucket.');
}
/** @var S3ClientInterface $client */
$client = $this->getClientFromPool(
$this->determineBucketRegion($command['Bucket'])
);
return $client->createPresignedRequest(
$client->getCommand($command->getName(), $command->toArray()),
$expires
);
}
public function getObjectUrl($bucket, $key)
{
/** @var S3Client $regionalClient */
$regionalClient = $this->getClientFromPool(
$this->determineBucketRegion($bucket)
);
return $regionalClient->getObjectUrl($bucket, $key);
}
public function determineBucketRegionAsync($bucketName)
{
$cacheKey = $this->getCacheKey($bucketName);
if ($cached = $this->cache->get($cacheKey)) {
return Promise\promise_for($cached);
}
/** @var S3ClientInterface $regionalClient */
$regionalClient = $this->getClientFromPool();
return $regionalClient->determineBucketRegionAsync($bucketName)
->then(
function ($region) use ($cacheKey) {
$this->cache->set($cacheKey, $region);
return $region;
}
);
}
private function getCacheKey($bucketName)
{
return "aws:s3:{$bucketName}:location";
}
}

133
aws/Aws/S3/S3UriParser.php Normal file
View file

@ -0,0 +1,133 @@
<?php
namespace Aws\S3;
use GuzzleHttp\Psr7;
use Psr\Http\Message\UriInterface;
/**
* Extracts a region, bucket, key, and and if a URI is in path-style
*/
class S3UriParser
{
private $pattern = '/^(.+\\.)?s3[.-]([A-Za-z0-9-]+)\\./';
private $streamWrapperScheme = 's3';
private static $defaultResult = [
'path_style' => true,
'bucket' => null,
'key' => null,
'region' => null
];
/**
* Parses a URL or S3 StreamWrapper Uri (s3://) into an associative array
* of Amazon S3 data including:
*
* - bucket: The Amazon S3 bucket (null if none)
* - key: The Amazon S3 key (null if none)
* - path_style: Set to true if using path style, or false if not
* - region: Set to a string if a non-class endpoint is used or null.
*
* @param string|UriInterface $uri
*
* @return array
* @throws \InvalidArgumentException
*/
public function parse($uri)
{
$url = Psr7\uri_for($uri);
if ($url->getScheme() == $this->streamWrapperScheme) {
return $this->parseStreamWrapper($url);
}
if (!$url->getHost()) {
throw new \InvalidArgumentException('No hostname found in URI: '
. $uri);
}
if (!preg_match($this->pattern, $url->getHost(), $matches)) {
return $this->parseCustomEndpoint($url);
}
// Parse the URI based on the matched format (path / virtual)
$result = empty($matches[1])
? $this->parsePathStyle($url)
: $this->parseVirtualHosted($url, $matches);
// Add the region if one was found and not the classic endpoint
$result['region'] = $matches[2] == 'amazonaws' ? null : $matches[2];
return $result;
}
private function parseStreamWrapper(UriInterface $url)
{
$result = self::$defaultResult;
$result['path_style'] = false;
$result['bucket'] = $url->getHost();
if ($url->getPath()) {
$key = ltrim($url->getPath(), '/ ');
if (!empty($key)) {
$result['key'] = $key;
}
}
return $result;
}
private function parseCustomEndpoint(UriInterface $url)
{
$result = self::$defaultResult;
$path = ltrim($url->getPath(), '/ ');
$segments = explode('/', $path, 2);
if (isset($segments[0])) {
$result['bucket'] = $segments[0];
if (isset($segments[1])) {
$result['key'] = $segments[1];
}
}
return $result;
}
private function parsePathStyle(UriInterface $url)
{
$result = self::$defaultResult;
if ($url->getPath() != '/') {
$path = ltrim($url->getPath(), '/');
if ($path) {
$pathPos = strpos($path, '/');
if ($pathPos === false) {
// https://s3.amazonaws.com/bucket
$result['bucket'] = $path;
} elseif ($pathPos == strlen($path) - 1) {
// https://s3.amazonaws.com/bucket/
$result['bucket'] = substr($path, 0, -1);
} else {
// https://s3.amazonaws.com/bucket/key
$result['bucket'] = substr($path, 0, $pathPos);
$result['key'] = substr($path, $pathPos + 1) ?: null;
}
}
}
return $result;
}
private function parseVirtualHosted(UriInterface $url, array $matches)
{
$result = self::$defaultResult;
$result['path_style'] = false;
// Remove trailing "." from the prefix to get the bucket
$result['bucket'] = substr($matches[1], 0, -1);
$path = $url->getPath();
// Check if a key was present, and if so, removing the leading "/"
$result['key'] = !$path || $path == '/' ? null : substr($path, 1);
return $result;
}
}

View file

@ -0,0 +1,75 @@
<?php
namespace Aws\S3;
use Aws\CommandInterface;
use Psr\Http\Message\RequestInterface;
/**
* Simplifies the SSE-C process by encoding and hashing the key.
* @internal
*/
class SSECMiddleware
{
private $endpointScheme;
private $nextHandler;
/**
* Provide the URI scheme of the client sending requests.
*
* @param string $endpointScheme URI scheme (http/https).
*
* @return callable
*/
public static function wrap($endpointScheme)
{
return function (callable $handler) use ($endpointScheme) {
return new self($endpointScheme, $handler);
};
}
public function __construct($endpointScheme, callable $nextHandler)
{
$this->nextHandler = $nextHandler;
$this->endpointScheme = $endpointScheme;
}
public function __invoke(
CommandInterface $command,
RequestInterface $request = null
) {
// Allows only HTTPS connections when using SSE-C
if (($command['SSECustomerKey'] || $command['CopySourceSSECustomerKey'])
&& $this->endpointScheme !== 'https'
) {
throw new \RuntimeException('You must configure your S3 client to '
. 'use HTTPS in order to use the SSE-C features.');
}
// Prepare the normal SSE-CPK headers
if ($command['SSECustomerKey']) {
$this->prepareSseParams($command);
}
// If it's a copy operation, prepare the SSE-CPK headers for the source.
if ($command['CopySourceSSECustomerKey']) {
$this->prepareSseParams($command, 'CopySource');
}
$f = $this->nextHandler;
return $f($command, $request);
}
private function prepareSseParams(CommandInterface $command, $prefix = '')
{
// Base64 encode the provided key
$key = $command[$prefix . 'SSECustomerKey'];
$command[$prefix . 'SSECustomerKey'] = base64_encode($key);
// Base64 the provided MD5 or, generate an MD5 if not provided
if ($md5 = $command[$prefix . 'SSECustomerKeyMD5']) {
$command[$prefix . 'SSECustomerKeyMD5'] = base64_encode($md5);
} else {
$command[$prefix . 'SSECustomerKeyMD5'] = base64_encode(md5($key, true));
}
}
}

View file

@ -0,0 +1,950 @@
<?php
namespace Aws\S3;
use Aws\CacheInterface;
use Aws\LruArrayCache;
use Aws\Result;
use Aws\S3\Exception\S3Exception;
use GuzzleHttp\Psr7;
use GuzzleHttp\Psr7\Stream;
use GuzzleHttp\Psr7\CachingStream;
use Psr\Http\Message\StreamInterface;
/**
* Amazon S3 stream wrapper to use "s3://<bucket>/<key>" files with PHP
* streams, supporting "r", "w", "a", "x".
*
* # Opening "r" (read only) streams:
*
* Read only streams are truly streaming by default and will not allow you to
* seek. This is because data read from the stream is not kept in memory or on
* the local filesystem. You can force a "r" stream to be seekable by setting
* the "seekable" stream context option true. This will allow true streaming of
* data from Amazon S3, but will maintain a buffer of previously read bytes in
* a 'php://temp' stream to allow seeking to previously read bytes from the
* stream.
*
* You may pass any GetObject parameters as 's3' stream context options. These
* options will affect how the data is downloaded from Amazon S3.
*
* # Opening "w" and "x" (write only) streams:
*
* Because Amazon S3 requires a Content-Length header, write only streams will
* maintain a 'php://temp' stream to buffer data written to the stream until
* the stream is flushed (usually by closing the stream with fclose).
*
* You may pass any PutObject parameters as 's3' stream context options. These
* options will affect how the data is uploaded to Amazon S3.
*
* When opening an "x" stream, the file must exist on Amazon S3 for the stream
* to open successfully.
*
* # Opening "a" (write only append) streams:
*
* Similar to "w" streams, opening append streams requires that the data be
* buffered in a "php://temp" stream. Append streams will attempt to download
* the contents of an object in Amazon S3, seek to the end of the object, then
* allow you to append to the contents of the object. The data will then be
* uploaded using a PutObject operation when the stream is flushed (usually
* with fclose).
*
* You may pass any GetObject and/or PutObject parameters as 's3' stream
* context options. These options will affect how the data is downloaded and
* uploaded from Amazon S3.
*
* Stream context options:
*
* - "seekable": Set to true to create a seekable "r" (read only) stream by
* using a php://temp stream buffer
* - For "unlink" only: Any option that can be passed to the DeleteObject
* operation
*/
class StreamWrapper
{
/** @var resource|null Stream context (this is set by PHP) */
public $context;
/** @var StreamInterface Underlying stream resource */
private $body;
/** @var int Size of the body that is opened */
private $size;
/** @var array Hash of opened stream parameters */
private $params = [];
/** @var string Mode in which the stream was opened */
private $mode;
/** @var \Iterator Iterator used with opendir() related calls */
private $objectIterator;
/** @var string The bucket that was opened when opendir() was called */
private $openedBucket;
/** @var string The prefix of the bucket that was opened with opendir() */
private $openedBucketPrefix;
/** @var string Opened bucket path */
private $openedPath;
/** @var CacheInterface Cache for object and dir lookups */
private $cache;
/** @var string The opened protocol (e.g., "s3") */
private $protocol = 's3';
/**
* Register the 's3://' stream wrapper
*
* @param S3ClientInterface $client Client to use with the stream wrapper
* @param string $protocol Protocol to register as.
* @param CacheInterface $cache Default cache for the protocol.
*/
public static function register(
S3ClientInterface $client,
$protocol = 's3',
CacheInterface $cache = null
) {
if (in_array($protocol, stream_get_wrappers())) {
stream_wrapper_unregister($protocol);
}
// Set the client passed in as the default stream context client
stream_wrapper_register($protocol, get_called_class(), STREAM_IS_URL);
$default = stream_context_get_options(stream_context_get_default());
$default[$protocol]['client'] = $client;
if ($cache) {
$default[$protocol]['cache'] = $cache;
} elseif (!isset($default[$protocol]['cache'])) {
// Set a default cache adapter.
$default[$protocol]['cache'] = new LruArrayCache();
}
stream_context_set_default($default);
}
public function stream_close()
{
$this->body = $this->cache = null;
}
public function stream_open($path, $mode, $options, &$opened_path)
{
$this->initProtocol($path);
$this->params = $this->getBucketKey($path);
$this->mode = rtrim($mode, 'bt');
if ($errors = $this->validate($path, $this->mode)) {
return $this->triggerError($errors);
}
return $this->boolCall(function() use ($path) {
switch ($this->mode) {
case 'r': return $this->openReadStream($path);
case 'a': return $this->openAppendStream($path);
default: return $this->openWriteStream($path);
}
});
}
public function stream_eof()
{
return $this->body->eof();
}
public function stream_flush()
{
if ($this->mode == 'r') {
return false;
}
if ($this->body->isSeekable()) {
$this->body->seek(0);
}
$params = $this->getOptions(true);
$params['Body'] = $this->body;
// Attempt to guess the ContentType of the upload based on the
// file extension of the key
if (!isset($params['ContentType']) &&
($type = Psr7\mimetype_from_filename($params['Key']))
) {
$params['ContentType'] = $type;
}
$this->clearCacheKey("s3://{$params['Bucket']}/{$params['Key']}");
return $this->boolCall(function () use ($params) {
return (bool) $this->getClient()->putObject($params);
});
}
public function stream_read($count)
{
return $this->body->read($count);
}
public function stream_seek($offset, $whence = SEEK_SET)
{
return !$this->body->isSeekable()
? false
: $this->boolCall(function () use ($offset, $whence) {
$this->body->seek($offset, $whence);
return true;
});
}
public function stream_tell()
{
return $this->boolCall(function() { return $this->body->tell(); });
}
public function stream_write($data)
{
return $this->body->write($data);
}
public function unlink($path)
{
$this->initProtocol($path);
return $this->boolCall(function () use ($path) {
$this->clearCacheKey($path);
$this->getClient()->deleteObject($this->withPath($path));
return true;
});
}
public function stream_stat()
{
$stat = $this->getStatTemplate();
$stat[7] = $stat['size'] = $this->getSize();
$stat[2] = $stat['mode'] = $this->mode;
return $stat;
}
/**
* Provides information for is_dir, is_file, filesize, etc. Works on
* buckets, keys, and prefixes.
* @link http://www.php.net/manual/en/streamwrapper.url-stat.php
*/
public function url_stat($path, $flags)
{
$this->initProtocol($path);
// Some paths come through as S3:// for some reason.
$split = explode('://', $path);
$path = strtolower($split[0]) . '://' . $split[1];
// Check if this path is in the url_stat cache
if ($value = $this->getCacheStorage()->get($path)) {
return $value;
}
$stat = $this->createStat($path, $flags);
if (is_array($stat)) {
$this->getCacheStorage()->set($path, $stat);
}
return $stat;
}
/**
* Parse the protocol out of the given path.
*
* @param $path
*/
private function initProtocol($path)
{
$parts = explode('://', $path, 2);
$this->protocol = $parts[0] ?: 's3';
}
private function createStat($path, $flags)
{
$this->initProtocol($path);
$parts = $this->withPath($path);
if (!$parts['Key']) {
return $this->statDirectory($parts, $path, $flags);
}
return $this->boolCall(function () use ($parts, $path) {
try {
$result = $this->getClient()->headObject($parts);
if (substr($parts['Key'], -1, 1) == '/' &&
$result['ContentLength'] == 0
) {
// Return as if it is a bucket to account for console
// bucket objects (e.g., zero-byte object "foo/")
return $this->formatUrlStat($path);
} else {
// Attempt to stat and cache regular object
return $this->formatUrlStat($result->toArray());
}
} catch (S3Exception $e) {
// Maybe this isn't an actual key, but a prefix. Do a prefix
// listing of objects to determine.
$result = $this->getClient()->listObjects([
'Bucket' => $parts['Bucket'],
'Prefix' => rtrim($parts['Key'], '/') . '/',
'MaxKeys' => 1
]);
if (!$result['Contents'] && !$result['CommonPrefixes']) {
throw new \Exception("File or directory not found: $path");
}
return $this->formatUrlStat($path);
}
}, $flags);
}
private function statDirectory($parts, $path, $flags)
{
// Stat "directories": buckets, or "s3://"
if (!$parts['Bucket'] ||
$this->getClient()->doesBucketExist($parts['Bucket'])
) {
return $this->formatUrlStat($path);
}
return $this->triggerError("File or directory not found: $path", $flags);
}
/**
* Support for mkdir().
*
* @param string $path Directory which should be created.
* @param int $mode Permissions. 700-range permissions map to
* ACL_PUBLIC. 600-range permissions map to
* ACL_AUTH_READ. All other permissions map to
* ACL_PRIVATE. Expects octal form.
* @param int $options A bitwise mask of values, such as
* STREAM_MKDIR_RECURSIVE.
*
* @return bool
* @link http://www.php.net/manual/en/streamwrapper.mkdir.php
*/
public function mkdir($path, $mode, $options)
{
$this->initProtocol($path);
$params = $this->withPath($path);
$this->clearCacheKey($path);
if (!$params['Bucket']) {
return false;
}
if (!isset($params['ACL'])) {
$params['ACL'] = $this->determineAcl($mode);
}
return empty($params['Key'])
? $this->createBucket($path, $params)
: $this->createSubfolder($path, $params);
}
public function rmdir($path, $options)
{
$this->initProtocol($path);
$this->clearCacheKey($path);
$params = $this->withPath($path);
$client = $this->getClient();
if (!$params['Bucket']) {
return $this->triggerError('You must specify a bucket');
}
return $this->boolCall(function () use ($params, $path, $client) {
if (!$params['Key']) {
$client->deleteBucket(['Bucket' => $params['Bucket']]);
return true;
}
return $this->deleteSubfolder($path, $params);
});
}
/**
* Support for opendir().
*
* The opendir() method of the Amazon S3 stream wrapper supports a stream
* context option of "listFilter". listFilter must be a callable that
* accepts an associative array of object data and returns true if the
* object should be yielded when iterating the keys in a bucket.
*
* @param string $path The path to the directory
* (e.g. "s3://dir[</prefix>]")
* @param string $options Unused option variable
*
* @return bool true on success
* @see http://www.php.net/manual/en/function.opendir.php
*/
public function dir_opendir($path, $options)
{
$this->initProtocol($path);
$this->openedPath = $path;
$params = $this->withPath($path);
$delimiter = $this->getOption('delimiter');
/** @var callable $filterFn */
$filterFn = $this->getOption('listFilter');
$op = ['Bucket' => $params['Bucket']];
$this->openedBucket = $params['Bucket'];
if ($delimiter === null) {
$delimiter = '/';
}
if ($delimiter) {
$op['Delimiter'] = $delimiter;
}
if ($params['Key']) {
$params['Key'] = rtrim($params['Key'], $delimiter) . $delimiter;
$op['Prefix'] = $params['Key'];
}
$this->openedBucketPrefix = $params['Key'];
// Filter our "/" keys added by the console as directories, and ensure
// that if a filter function is provided that it passes the filter.
$this->objectIterator = \Aws\flatmap(
$this->getClient()->getPaginator('ListObjects', $op),
function (Result $result) use ($filterFn) {
$contentsAndPrefixes = $result->search('[Contents[], CommonPrefixes[]][]');
// Filter out dir place holder keys and use the filter fn.
return array_filter(
$contentsAndPrefixes,
function ($key) use ($filterFn) {
return (!$filterFn || call_user_func($filterFn, $key))
&& (!isset($key['Key']) || substr($key['Key'], -1, 1) !== '/');
}
);
}
);
return true;
}
/**
* Close the directory listing handles
*
* @return bool true on success
*/
public function dir_closedir()
{
$this->objectIterator = null;
gc_collect_cycles();
return true;
}
/**
* This method is called in response to rewinddir()
*
* @return boolean true on success
*/
public function dir_rewinddir()
{
$this->boolCall(function() {
$this->objectIterator = null;
$this->dir_opendir($this->openedPath, null);
return true;
});
}
/**
* This method is called in response to readdir()
*
* @return string Should return a string representing the next filename, or
* false if there is no next file.
* @link http://www.php.net/manual/en/function.readdir.php
*/
public function dir_readdir()
{
// Skip empty result keys
if (!$this->objectIterator->valid()) {
return false;
}
// First we need to create a cache key. This key is the full path to
// then object in s3: protocol://bucket/key.
// Next we need to create a result value. The result value is the
// current value of the iterator without the opened bucket prefix to
// emulate how readdir() works on directories.
// The cache key and result value will depend on if this is a prefix
// or a key.
$cur = $this->objectIterator->current();
if (isset($cur['Prefix'])) {
// Include "directories". Be sure to strip a trailing "/"
// on prefixes.
$result = rtrim($cur['Prefix'], '/');
$key = $this->formatKey($result);
$stat = $this->formatUrlStat($key);
} else {
$result = $cur['Key'];
$key = $this->formatKey($cur['Key']);
$stat = $this->formatUrlStat($cur);
}
// Cache the object data for quick url_stat lookups used with
// RecursiveDirectoryIterator.
$this->getCacheStorage()->set($key, $stat);
$this->objectIterator->next();
// Remove the prefix from the result to emulate other stream wrappers.
return $this->openedBucketPrefix
? substr($result, strlen($this->openedBucketPrefix))
: $result;
}
private function formatKey($key)
{
$protocol = explode('://', $this->openedPath)[0];
return "{$protocol}://{$this->openedBucket}/{$key}";
}
/**
* Called in response to rename() to rename a file or directory. Currently
* only supports renaming objects.
*
* @param string $path_from the path to the file to rename
* @param string $path_to the new path to the file
*
* @return bool true if file was successfully renamed
* @link http://www.php.net/manual/en/function.rename.php
*/
public function rename($path_from, $path_to)
{
// PHP will not allow rename across wrapper types, so we can safely
// assume $path_from and $path_to have the same protocol
$this->initProtocol($path_from);
$partsFrom = $this->withPath($path_from);
$partsTo = $this->withPath($path_to);
$this->clearCacheKey($path_from);
$this->clearCacheKey($path_to);
if (!$partsFrom['Key'] || !$partsTo['Key']) {
return $this->triggerError('The Amazon S3 stream wrapper only '
. 'supports copying objects');
}
return $this->boolCall(function () use ($partsFrom, $partsTo) {
$options = $this->getOptions(true);
// Copy the object and allow overriding default parameters if
// desired, but by default copy metadata
$this->getClient()->copy(
$partsFrom['Bucket'],
$partsFrom['Key'],
$partsTo['Bucket'],
$partsTo['Key'],
isset($options['acl']) ? $options['acl'] : 'private',
$options
);
// Delete the original object
$this->getClient()->deleteObject([
'Bucket' => $partsFrom['Bucket'],
'Key' => $partsFrom['Key']
] + $options);
return true;
});
}
public function stream_cast($cast_as)
{
return false;
}
/**
* Validates the provided stream arguments for fopen and returns an array
* of errors.
*/
private function validate($path, $mode)
{
$errors = [];
if (!$this->getOption('Key')) {
$errors[] = 'Cannot open a bucket. You must specify a path in the '
. 'form of s3://bucket/key';
}
if (!in_array($mode, ['r', 'w', 'a', 'x'])) {
$errors[] = "Mode not supported: {$mode}. "
. "Use one 'r', 'w', 'a', or 'x'.";
}
// When using mode "x" validate if the file exists before attempting
// to read
if ($mode == 'x' &&
$this->getClient()->doesObjectExist(
$this->getOption('Bucket'),
$this->getOption('Key'),
$this->getOptions(true)
)
) {
$errors[] = "{$path} already exists on Amazon S3";
}
return $errors;
}
/**
* Get the stream context options available to the current stream
*
* @param bool $removeContextData Set to true to remove contextual kvp's
* like 'client' from the result.
*
* @return array
*/
private function getOptions($removeContextData = false)
{
// Context is not set when doing things like stat
if ($this->context === null) {
$options = [];
} else {
$options = stream_context_get_options($this->context);
$options = isset($options[$this->protocol])
? $options[$this->protocol]
: [];
}
$default = stream_context_get_options(stream_context_get_default());
$default = isset($default[$this->protocol])
? $default[$this->protocol]
: [];
$result = $this->params + $options + $default;
if ($removeContextData) {
unset($result['client'], $result['seekable'], $result['cache']);
}
return $result;
}
/**
* Get a specific stream context option
*
* @param string $name Name of the option to retrieve
*
* @return mixed|null
*/
private function getOption($name)
{
$options = $this->getOptions();
return isset($options[$name]) ? $options[$name] : null;
}
/**
* Gets the client from the stream context
*
* @return S3ClientInterface
* @throws \RuntimeException if no client has been configured
*/
private function getClient()
{
if (!$client = $this->getOption('client')) {
throw new \RuntimeException('No client in stream context');
}
return $client;
}
private function getBucketKey($path)
{
// Remove the protocol
$parts = explode('://', $path);
// Get the bucket, key
$parts = explode('/', $parts[1], 2);
return [
'Bucket' => $parts[0],
'Key' => isset($parts[1]) ? $parts[1] : null
];
}
/**
* Get the bucket and key from the passed path (e.g. s3://bucket/key)
*
* @param string $path Path passed to the stream wrapper
*
* @return array Hash of 'Bucket', 'Key', and custom params from the context
*/
private function withPath($path)
{
$params = $this->getOptions(true);
return $this->getBucketKey($path) + $params;
}
private function openReadStream()
{
$client = $this->getClient();
$command = $client->getCommand('GetObject', $this->getOptions(true));
$command['@http']['stream'] = true;
$result = $client->execute($command);
$this->size = $result['ContentLength'];
$this->body = $result['Body'];
// Wrap the body in a caching entity body if seeking is allowed
if ($this->getOption('seekable') && !$this->body->isSeekable()) {
$this->body = new CachingStream($this->body);
}
return true;
}
private function openWriteStream()
{
$this->body = new Stream(fopen('php://temp', 'r+'));
return true;
}
private function openAppendStream()
{
try {
// Get the body of the object and seek to the end of the stream
$client = $this->getClient();
$this->body = $client->getObject($this->getOptions(true))['Body'];
$this->body->seek(0, SEEK_END);
return true;
} catch (S3Exception $e) {
// The object does not exist, so use a simple write stream
return $this->openWriteStream();
}
}
/**
* Trigger one or more errors
*
* @param string|array $errors Errors to trigger
* @param mixed $flags If set to STREAM_URL_STAT_QUIET, then no
* error or exception occurs
*
* @return bool Returns false
* @throws \RuntimeException if throw_errors is true
*/
private function triggerError($errors, $flags = null)
{
// This is triggered with things like file_exists()
if ($flags & STREAM_URL_STAT_QUIET) {
return $flags & STREAM_URL_STAT_LINK
// This is triggered for things like is_link()
? $this->formatUrlStat(false)
: false;
}
// This is triggered when doing things like lstat() or stat()
trigger_error(implode("\n", (array) $errors), E_USER_WARNING);
return false;
}
/**
* Prepare a url_stat result array
*
* @param string|array $result Data to add
*
* @return array Returns the modified url_stat result
*/
private function formatUrlStat($result = null)
{
$stat = $this->getStatTemplate();
switch (gettype($result)) {
case 'NULL':
case 'string':
// Directory with 0777 access - see "man 2 stat".
$stat['mode'] = $stat[2] = 0040777;
break;
case 'array':
// Regular file with 0777 access - see "man 2 stat".
$stat['mode'] = $stat[2] = 0100777;
// Pluck the content-length if available.
if (isset($result['ContentLength'])) {
$stat['size'] = $stat[7] = $result['ContentLength'];
} elseif (isset($result['Size'])) {
$stat['size'] = $stat[7] = $result['Size'];
}
if (isset($result['LastModified'])) {
// ListObjects or HeadObject result
$stat['mtime'] = $stat[9] = $stat['ctime'] = $stat[10]
= strtotime($result['LastModified']);
}
}
return $stat;
}
/**
* Creates a bucket for the given parameters.
*
* @param string $path Stream wrapper path
* @param array $params A result of StreamWrapper::withPath()
*
* @return bool Returns true on success or false on failure
*/
private function createBucket($path, array $params)
{
if ($this->getClient()->doesBucketExist($params['Bucket'])) {
return $this->triggerError("Bucket already exists: {$path}");
}
return $this->boolCall(function () use ($params, $path) {
$this->getClient()->createBucket($params);
$this->clearCacheKey($path);
return true;
});
}
/**
* Creates a pseudo-folder by creating an empty "/" suffixed key
*
* @param string $path Stream wrapper path
* @param array $params A result of StreamWrapper::withPath()
*
* @return bool
*/
private function createSubfolder($path, array $params)
{
// Ensure the path ends in "/" and the body is empty.
$params['Key'] = rtrim($params['Key'], '/') . '/';
$params['Body'] = '';
// Fail if this pseudo directory key already exists
if ($this->getClient()->doesObjectExist(
$params['Bucket'],
$params['Key'])
) {
return $this->triggerError("Subfolder already exists: {$path}");
}
return $this->boolCall(function () use ($params, $path) {
$this->getClient()->putObject($params);
$this->clearCacheKey($path);
return true;
});
}
/**
* Deletes a nested subfolder if it is empty.
*
* @param string $path Path that is being deleted (e.g., 's3://a/b/c')
* @param array $params A result of StreamWrapper::withPath()
*
* @return bool
*/
private function deleteSubfolder($path, $params)
{
// Use a key that adds a trailing slash if needed.
$prefix = rtrim($params['Key'], '/') . '/';
$result = $this->getClient()->listObjects([
'Bucket' => $params['Bucket'],
'Prefix' => $prefix,
'MaxKeys' => 1
]);
// Check if the bucket contains keys other than the placeholder
if ($contents = $result['Contents']) {
return (count($contents) > 1 || $contents[0]['Key'] != $prefix)
? $this->triggerError('Subfolder is not empty')
: $this->unlink(rtrim($path, '/') . '/');
}
return $result['CommonPrefixes']
? $this->triggerError('Subfolder contains nested folders')
: true;
}
/**
* Determine the most appropriate ACL based on a file mode.
*
* @param int $mode File mode
*
* @return string
*/
private function determineAcl($mode)
{
switch (substr(decoct($mode), 0, 1)) {
case '7': return 'public-read';
case '6': return 'authenticated-read';
default: return 'private';
}
}
/**
* Gets a URL stat template with default values
*
* @return array
*/
private function getStatTemplate()
{
return [
0 => 0, 'dev' => 0,
1 => 0, 'ino' => 0,
2 => 0, 'mode' => 0,
3 => 0, 'nlink' => 0,
4 => 0, 'uid' => 0,
5 => 0, 'gid' => 0,
6 => -1, 'rdev' => -1,
7 => 0, 'size' => 0,
8 => 0, 'atime' => 0,
9 => 0, 'mtime' => 0,
10 => 0, 'ctime' => 0,
11 => -1, 'blksize' => -1,
12 => -1, 'blocks' => -1,
];
}
/**
* Invokes a callable and triggers an error if an exception occurs while
* calling the function.
*
* @param callable $fn
* @param int $flags
*
* @return bool
*/
private function boolCall(callable $fn, $flags = null)
{
try {
return $fn();
} catch (\Exception $e) {
return $this->triggerError($e->getMessage(), $flags);
}
}
/**
* @return LruArrayCache
*/
private function getCacheStorage()
{
if (!$this->cache) {
$this->cache = $this->getOption('cache') ?: new LruArrayCache();
}
return $this->cache;
}
/**
* Clears a specific stat cache value from the stat cache and LRU cache.
*
* @param string $key S3 path (s3://bucket/key).
*/
private function clearCacheKey($key)
{
clearstatcache(true, $key);
$this->getCacheStorage()->remove($key);
}
/**
* Returns the size of the opened object body.
*
* @return int|null
*/
private function getSize()
{
$size = $this->body->getSize();
return $size !== null ? $size : $this->size;
}
}

430
aws/Aws/S3/Transfer.php Normal file
View file

@ -0,0 +1,430 @@
<?php
namespace Aws\S3;
use Aws;
use Aws\CommandInterface;
use Aws\Exception\AwsException;
use GuzzleHttp\Promise;
use GuzzleHttp\Psr7;
use GuzzleHttp\Promise\PromisorInterface;
use Iterator;
/**
* Transfers files from the local filesystem to S3 or from S3 to the local
* filesystem.
*
* This class does not support copying from the local filesystem to somewhere
* else on the local filesystem or from one S3 bucket to another.
*/
class Transfer implements PromisorInterface
{
private $client;
private $promise;
private $source;
private $sourceMetadata;
private $destination;
private $concurrency;
private $mupThreshold;
private $before;
private $s3Args = [];
/**
* When providing the $source argument, you may provide a string referencing
* the path to a directory on disk to upload, an s3 scheme URI that contains
* the bucket and key (e.g., "s3://bucket/key"), or an \Iterator object
* that yields strings containing filenames that are the path to a file on
* disk or an s3 scheme URI. The "/key" portion of an s3 URI is optional.
*
* When providing an iterator for the $source argument, you must also
* provide a 'base_dir' key value pair in the $options argument.
*
* The $dest argument can be the path to a directory on disk or an s3
* scheme URI (e.g., "s3://bucket/key").
*
* The options array can contain the following key value pairs:
*
* - base_dir: (string) Base directory of the source, if $source is an
* iterator. If the $source option is not an array, then this option is
* ignored.
* - before: (callable) A callback to invoke before each transfer. The
* callback accepts the following positional arguments: string $source,
* string $dest, Aws\CommandInterface $command. The provided command will
* be either a GetObject, PutObject, InitiateMultipartUpload, or
* UploadPart command.
* - mup_threshold: (int) Size in bytes in which a multipart upload should
* be used instead of PutObject. Defaults to 20971520 (20 MB).
* - concurrency: (int, default=5) Number of files to upload concurrently.
* The ideal concurrency value will vary based on the number of files
* being uploaded and the average size of each file. Generally speaking,
* smaller files benefit from a higher concurrency while larger files
* will not.
* - debug: (bool) Set to true to print out debug information for
* transfers. Set to an fopen() resource to write to a specific stream
* rather than writing to STDOUT.
*
* @param S3ClientInterface $client Client used for transfers.
* @param string|Iterator $source Where the files are transferred from.
* @param string $dest Where the files are transferred to.
* @param array $options Hash of options.
*/
public function __construct(
S3ClientInterface $client,
$source,
$dest,
array $options = []
) {
$this->client = $client;
// Prepare the destination.
$this->destination = $this->prepareTarget($dest);
if ($this->destination['scheme'] === 's3') {
$this->s3Args = $this->getS3Args($this->destination['path']);
}
// Prepare the source.
if (is_string($source)) {
$this->sourceMetadata = $this->prepareTarget($source);
$this->source = $source;
} elseif ($source instanceof Iterator) {
if (empty($options['base_dir'])) {
throw new \InvalidArgumentException('You must provide the source'
. ' argument as a string or provide the "base_dir" option.');
}
$this->sourceMetadata = $this->prepareTarget($options['base_dir']);
$this->source = $source;
} else {
throw new \InvalidArgumentException('source must be the path to a '
. 'directory or an iterator that yields file names.');
}
// Validate schemes.
if ($this->sourceMetadata['scheme'] === $this->destination['scheme']) {
throw new \InvalidArgumentException("You cannot copy from"
. " {$this->sourceMetadata['scheme']} to"
. " {$this->destination['scheme']}."
);
}
// Handle multipart-related options.
$this->concurrency = isset($options['concurrency'])
? $options['concurrency']
: MultipartUploader::DEFAULT_CONCURRENCY;
$this->mupThreshold = isset($options['mup_threshold'])
? $options['mup_threshold']
: 16777216;
if ($this->mupThreshold < MultipartUploader::PART_MIN_SIZE) {
throw new \InvalidArgumentException('mup_threshold must be >= 5MB');
}
// Handle "before" callback option.
if (isset($options['before'])) {
$this->before = $options['before'];
if (!is_callable($this->before)) {
throw new \InvalidArgumentException('before must be a callable.');
}
}
// Handle "debug" option.
if (isset($options['debug'])) {
if ($options['debug'] === true) {
$options['debug'] = fopen('php://output', 'w');
}
$this->addDebugToBefore($options['debug']);
}
}
/**
* Transfers the files.
*/
public function promise()
{
// If the promise has been created, just return it.
if (!$this->promise) {
// Create an upload/download promise for the transfer.
$this->promise = $this->sourceMetadata['scheme'] === 'file'
? $this->createUploadPromise()
: $this->createDownloadPromise();
}
return $this->promise;
}
/**
* Transfers the files synchronously.
*/
public function transfer()
{
$this->promise()->wait();
}
private function prepareTarget($targetPath)
{
$target = [
'path' => $this->normalizePath($targetPath),
'scheme' => $this->determineScheme($targetPath),
];
if ($target['scheme'] !== 's3' && $target['scheme'] !== 'file') {
throw new \InvalidArgumentException('Scheme must be "s3" or "file".');
}
return $target;
}
/**
* Creates an array that contains Bucket and Key by parsing the filename.
*
* @param string $path Path to parse.
*
* @return array
*/
private function getS3Args($path)
{
$parts = explode('/', str_replace('s3://', '', $path), 2);
$args = ['Bucket' => $parts[0]];
if (isset($parts[1])) {
$args['Key'] = $parts[1];
}
return $args;
}
/**
* Parses the scheme from a filename.
*
* @param string $path Path to parse.
*
* @return string
*/
private function determineScheme($path)
{
return !strpos($path, '://') ? 'file' : explode('://', $path)[0];
}
/**
* Normalize a path so that it has UNIX-style directory separators and no trailing /
*
* @param string $path
*
* @return string
*/
private function normalizePath($path)
{
return rtrim(str_replace('\\', '/', $path), '/');
}
private function resolveUri($uri)
{
$resolved = [];
$sections = explode('/', $uri);
foreach ($sections as $section) {
if ($section === '.' || $section === '') {
continue;
}
if ($section === '..') {
array_pop($resolved);
} else {
$resolved []= $section;
}
}
return ($uri[0] === '/' ? '/' : '')
. implode('/', $resolved);
}
private function createDownloadPromise()
{
$parts = $this->getS3Args($this->sourceMetadata['path']);
$prefix = "s3://{$parts['Bucket']}/"
. (isset($parts['Key']) ? $parts['Key'] . '/' : '');
$commands = [];
foreach ($this->getDownloadsIterator() as $object) {
// Prepare the sink.
$objectKey = preg_replace('/^' . preg_quote($prefix, '/') . '/', '', $object);
$resolveSink = $this->destination['path'] . '/';
if (isset($parts['Key']) && strpos($objectKey, $parts['Key']) !== 0) {
$resolveSink .= $parts['Key'] . '/';
}
$resolveSink .= $objectKey;
$sink = $this->destination['path'] . '/' . $objectKey;
$command = $this->client->getCommand(
'GetObject',
$this->getS3Args($object) + ['@http' => ['sink' => $sink]]
);
if (strpos(
$this->resolveUri($resolveSink),
$this->destination['path']
) !== 0
) {
throw new AwsException(
'Cannot download key ' . $objectKey
. ', its relative path resolves outside the'
. ' parent directory', $command);
}
// Create the directory if needed.
$dir = dirname($sink);
if (!is_dir($dir) && !mkdir($dir, 0777, true)) {
throw new \RuntimeException("Could not create dir: {$dir}");
}
// Create the command.
$commands []= $command;
}
// Create a GetObject command pool and return the promise.
return (new Aws\CommandPool($this->client, $commands, [
'concurrency' => $this->concurrency,
'before' => $this->before,
'rejected' => function ($reason, $idx, Promise\PromiseInterface $p) {
$p->reject($reason);
}
]))->promise();
}
private function createUploadPromise()
{
// Map each file into a promise that performs the actual transfer.
$files = \Aws\map($this->getUploadsIterator(), function ($file) {
return (filesize($file) >= $this->mupThreshold)
? $this->uploadMultipart($file)
: $this->upload($file);
});
// Create an EachPromise, that will concurrently handle the upload
// operations' yielded promises from the iterator.
return Promise\each_limit_all($files, $this->concurrency);
}
/** @return Iterator */
private function getUploadsIterator()
{
if (is_string($this->source)) {
return Aws\filter(
Aws\recursive_dir_iterator($this->sourceMetadata['path']),
function ($file) { return !is_dir($file); }
);
}
return $this->source;
}
/** @return Iterator */
private function getDownloadsIterator()
{
if (is_string($this->source)) {
$listArgs = $this->getS3Args($this->sourceMetadata['path']);
if (isset($listArgs['Key'])) {
$listArgs['Prefix'] = $listArgs['Key'] . '/';
unset($listArgs['Key']);
}
$files = $this->client
->getPaginator('ListObjects', $listArgs)
->search('Contents[].Key');
$files = Aws\map($files, function ($key) use ($listArgs) {
return "s3://{$listArgs['Bucket']}/$key";
});
return Aws\filter($files, function ($key) {
return substr($key, -1, 1) !== '/';
});
}
return $this->source;
}
private function upload($filename)
{
$args = $this->s3Args;
$args['SourceFile'] = $filename;
$args['Key'] = $this->createS3Key($filename);
$command = $this->client->getCommand('PutObject', $args);
$this->before and call_user_func($this->before, $command);
return $this->client->executeAsync($command);
}
private function uploadMultipart($filename)
{
$args = $this->s3Args;
$args['Key'] = $this->createS3Key($filename);
return (new MultipartUploader($this->client, $filename, [
'bucket' => $args['Bucket'],
'key' => $args['Key'],
'before_initiate' => $this->before,
'before_upload' => $this->before,
'before_complete' => $this->before,
'concurrency' => $this->concurrency,
]))->promise();
}
private function createS3Key($filename)
{
$filename = $this->normalizePath($filename);
$relative_file_path = ltrim(
preg_replace('#^' . preg_quote($this->sourceMetadata['path']) . '#', '', $filename),
'/\\'
);
if (isset($this->s3Args['Key'])) {
return rtrim($this->s3Args['Key'], '/').'/'.$relative_file_path;
}
return $relative_file_path;
}
private function addDebugToBefore($debug)
{
$before = $this->before;
$sourcePath = $this->sourceMetadata['path'];
$s3Args = $this->s3Args;
$this->before = static function (
CommandInterface $command
) use ($before, $debug, $sourcePath, $s3Args) {
// Call the composed before function.
$before and $before($command);
// Determine the source and dest values based on operation.
switch ($operation = $command->getName()) {
case 'GetObject':
$source = "s3://{$command['Bucket']}/{$command['Key']}";
$dest = $command['@http']['sink'];
break;
case 'PutObject':
$source = $command['SourceFile'];
$dest = "s3://{$command['Bucket']}/{$command['Key']}";
break;
case 'UploadPart':
$part = $command['PartNumber'];
case 'CreateMultipartUpload':
case 'CompleteMultipartUpload':
$sourceKey = $command['Key'];
if (isset($s3Args['Key']) && strpos($sourceKey, $s3Args['Key']) === 0) {
$sourceKey = substr($sourceKey, strlen($s3Args['Key']) + 1);
}
$source = "{$sourcePath}/{$sourceKey}";
$dest = "s3://{$command['Bucket']}/{$command['Key']}";
break;
default:
throw new \UnexpectedValueException(
"Transfer encountered an unexpected operation: {$operation}."
);
}
// Print the debugging message.
$context = sprintf('%s -> %s (%s)', $source, $dest, $operation);
if (isset($part)) {
$context .= " : Part={$part}";
}
fwrite($debug, "Transferring {$context}\n");
};
}
}