Update cloudinary to 1.20.0

This commit is contained in:
JonnyWong16 2020-03-21 19:11:41 -07:00
parent 1c56d9c513
commit 2984629b39
27 changed files with 2865 additions and 923 deletions

View file

@ -1,29 +1,43 @@
from __future__ import absolute_import
from copy import deepcopy
import os
import re
import logging
import numbers
import certifi
from math import ceil
from six import python_2_unicode_compatible
logger = logging.getLogger("Cloudinary")
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
import os
import re
from six import python_2_unicode_compatible
from cloudinary import utils
from cloudinary.exceptions import GeneralError
from cloudinary.cache import responsive_breakpoints_cache
from cloudinary.http_client import HttpClient
from cloudinary.compat import urlparse, parse_qs
from cloudinary.search import Search
from platform import python_version
CERT_KWARGS = {
'cert_reqs': 'CERT_REQUIRED',
'ca_certs': certifi.where(),
}
CF_SHARED_CDN = "d3jpl91pxevbkh.cloudfront.net"
OLD_AKAMAI_SHARED_CDN = "cloudinary-a.akamaihd.net"
AKAMAI_SHARED_CDN = "res.cloudinary.com"
SHARED_CDN = AKAMAI_SHARED_CDN
CL_BLANK = "data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7"
URI_SCHEME = "cloudinary"
VERSION = "1.11.0"
USER_AGENT = "CloudinaryPython/" + VERSION
VERSION = "1.20.0"
USER_AGENT = "CloudinaryPython/{} (Python {})".format(VERSION, python_version())
""" :const: USER_AGENT """
USER_PLATFORM = ""
@ -39,7 +53,8 @@ The format of the value should be <ProductName>/Version[ (comment)].
def get_user_agent():
"""Provides the `USER_AGENT` string that is passed to the Cloudinary servers.
"""
Provides the `USER_AGENT` string that is passed to the Cloudinary servers.
Prepends `USER_PLATFORM` if it is defined.
:returns: the user agent
@ -54,15 +69,27 @@ def get_user_agent():
def import_django_settings():
try:
import django.conf
from django.core.exceptions import ImproperlyConfigured
try:
if 'CLOUDINARY' in dir(django.conf.settings):
return django.conf.settings.CLOUDINARY
from django.conf import settings as _django_settings
# We can get a situation when Django module is installed in the system, but not initialized,
# which means we are running not in a Django process.
# In this case the following line throws ImproperlyConfigured exception
if 'cloudinary' in _django_settings.INSTALLED_APPS:
from django import get_version as _get_django_version
global USER_PLATFORM
USER_PLATFORM = "Django/{django_version}".format(django_version=_get_django_version())
if 'CLOUDINARY' in dir(_django_settings):
return _django_settings.CLOUDINARY
else:
return None
except ImproperlyConfigured:
return None
except ImportError:
return None
@ -78,14 +105,18 @@ class Config(object):
api_key=os.environ.get("CLOUDINARY_API_KEY"),
api_secret=os.environ.get("CLOUDINARY_API_SECRET"),
secure_distribution=os.environ.get("CLOUDINARY_SECURE_DISTRIBUTION"),
private_cdn=os.environ.get("CLOUDINARY_PRIVATE_CDN") == 'true'
private_cdn=os.environ.get("CLOUDINARY_PRIVATE_CDN") == 'true',
api_proxy=os.environ.get("CLOUDINARY_API_PROXY"),
)
elif os.environ.get("CLOUDINARY_URL"):
cloudinary_url = os.environ.get("CLOUDINARY_URL")
self._parse_cloudinary_url(cloudinary_url)
def _parse_cloudinary_url(self, cloudinary_url):
uri = urlparse(cloudinary_url.replace("cloudinary://", "http://"))
uri = urlparse(cloudinary_url)
if not self._is_url_scheme_valid(uri):
raise ValueError("Invalid CLOUDINARY_URL scheme. Expecting to start with 'cloudinary://'")
for k, v in parse_qs(uri.query).items():
if self._is_nested_key(k):
self._put_nested_key(k, v)
@ -115,7 +146,7 @@ class Config(object):
def _put_nested_key(self, key, value):
chain = re.split(r'[\[\]]+', key)
chain = [key for key in chain if key]
chain = [k for k in chain if k]
outer = self.__dict__
last_key = chain.pop()
for inner_key in chain:
@ -128,7 +159,21 @@ class Config(object):
if isinstance(value, list):
value = value[0]
outer[last_key] = value
@staticmethod
def _is_url_scheme_valid(url):
"""
Helper function. Validates url scheme
:param url: A named tuple containing URL components
:return: bool True on success or False on failure
"""
if not url.scheme or url.scheme.lower() != URI_SCHEME:
return False
return True
_config = Config()
@ -143,8 +188,35 @@ def reset_config():
_config = Config()
_http_client = HttpClient()
# FIXME: circular import issue
from cloudinary.search import Search
@python_2_unicode_compatible
class CloudinaryResource(object):
"""
Recommended sources for video tag
"""
default_video_sources = [
{
"type": "mp4",
"codecs": "hev1",
"transformations": {"video_codec": "h265"}
}, {
"type": "webm",
"codecs": "vp9",
"transformations": {"video_codec": "vp9"}
}, {
"type": "mp4",
"transformations": {"video_codec": "auto"}
}, {
"type": "webm",
"transformations": {"video_codec": "auto"}
},
]
def __init__(self, public_id=None, format=None, version=None,
signature=None, url_options=None, metadata=None, type=None, resource_type=None,
default_resource_type=None):
@ -174,9 +246,11 @@ class CloudinaryResource(object):
return None
prep = ''
prep = prep + self.resource_type + '/' + self.type + '/'
if self.version: prep = prep + 'v' + str(self.version) + '/'
if self.version:
prep = prep + 'v' + str(self.version) + '/'
prep = prep + self.public_id
if self.format: prep = prep + '.' + self.format
if self.format:
prep = prep + '.' + self.format
return prep
def get_presigned(self):
@ -199,28 +273,283 @@ class CloudinaryResource(object):
def build_url(self, **options):
return self.__build_url(**options)[0]
def default_poster_options(self, options):
@staticmethod
def default_poster_options(options):
options["format"] = options.get("format", "jpg")
def default_source_types(self):
@staticmethod
def default_source_types():
return ['webm', 'mp4', 'ogv']
@staticmethod
def _validate_srcset_data(srcset_data):
"""
Helper function. Validates srcset_data parameters
:param srcset_data: A dictionary containing the following keys:
breakpoints A list of breakpoints.
min_width Minimal width of the srcset images
max_width Maximal width of the srcset images.
max_images Number of srcset images to generate.
:return: bool True on success or False on failure
"""
if not all(k in srcset_data and isinstance(srcset_data[k], numbers.Number) for k in ("min_width", "max_width",
"max_images")):
logger.warning("Either valid (min_width, max_width, max_images)" +
"or breakpoints must be provided to the image srcset attribute")
return False
if srcset_data["min_width"] > srcset_data["max_width"]:
logger.warning("min_width must be less than max_width")
return False
if srcset_data["max_images"] <= 0:
logger.warning("max_images must be a positive integer")
return False
return True
def _generate_breakpoints(self, srcset_data):
"""
Helper function. Calculates static responsive breakpoints using provided parameters.
Either the breakpoints or min_width, max_width, max_images must be provided.
:param srcset_data: A dictionary containing the following keys:
breakpoints A list of breakpoints.
min_width Minimal width of the srcset images
max_width Maximal width of the srcset images.
max_images Number of srcset images to generate.
:return: A list of breakpoints
:raises ValueError: In case of invalid or missing parameters
"""
breakpoints = srcset_data.get("breakpoints", list())
if breakpoints:
return breakpoints
if not self._validate_srcset_data(srcset_data):
return None
min_width, max_width, max_images = srcset_data["min_width"], srcset_data["max_width"], srcset_data["max_images"]
if max_images == 1:
# if user requested only 1 image in srcset, we return max_width one
min_width = max_width
step_size = int(ceil(float(max_width - min_width) / (max_images - 1 if max_images > 1 else 1)))
curr_breakpoint = min_width
while curr_breakpoint < max_width:
breakpoints.append(curr_breakpoint)
curr_breakpoint += step_size
breakpoints.append(max_width)
return breakpoints
def _fetch_breakpoints(self, srcset_data=None, **options):
"""
Helper function. Retrieves responsive breakpoints list from cloudinary server
When passing special string to transformation `width` parameter of form `auto:breakpoints{parameters}:json`,
the response contains JSON with data of the responsive breakpoints
:param srcset_data: A dictionary containing the following keys:
min_width Minimal width of the srcset images
max_width Maximal width of the srcset images
bytes_step Minimal bytes step between images
max_images Number of srcset images to generate
:param options: Additional options
:return: Resulting breakpoints
"""
if srcset_data is None:
srcset_data = dict()
min_width = srcset_data.get("min_width", 50)
max_width = srcset_data.get("max_width", 1000)
bytes_step = srcset_data.get("bytes_step", 20000)
max_images = srcset_data.get("max_images", 20)
transformation = srcset_data.get("transformation")
kbytes_step = int(ceil(float(bytes_step)/1024))
breakpoints_width_param = "auto:breakpoints_{min_width}_{max_width}_{kbytes_step}_{max_images}:json".format(
min_width=min_width, max_width=max_width, kbytes_step=kbytes_step, max_images=max_images)
breakpoints_url = utils.cloudinary_scaled_url(self.public_id, breakpoints_width_param, transformation, options)
return _http_client.get_json(breakpoints_url).get("breakpoints", None)
def _get_or_generate_breakpoints(self, srcset_data, **options):
"""
Helper function. Gets from cache or calculates srcset breakpoints using provided parameters
:param srcset_data: A dictionary containing the following keys:
breakpoints A list of breakpoints.
min_width Minimal width of the srcset images
max_width Maximal width of the srcset images
max_images Number of srcset images to generate
:param options: Additional options
:return: Resulting breakpoints
"""
breakpoints = srcset_data.get("breakpoints")
if breakpoints:
return breakpoints
if srcset_data.get("use_cache"):
breakpoints = responsive_breakpoints_cache.instance.get(self.public_id, **options)
if not breakpoints:
try:
breakpoints = self._fetch_breakpoints(srcset_data, **options)
except GeneralError as e:
logger.warning("Failed getting responsive breakpoints: {error}".format(error=e.message))
if breakpoints:
responsive_breakpoints_cache.instance.set(self.public_id, breakpoints, **options)
if not breakpoints:
# Static calculation if cache is not enabled or we failed to fetch breakpoints
breakpoints = self._generate_breakpoints(srcset_data)
return breakpoints
def _generate_srcset_attribute(self, breakpoints, transformation=None, **options):
"""
Helper function. Generates srcset attribute value of the HTML img tag.
:param breakpoints: A list of breakpoints.
:param transformation: Custom transformation
:param options: Additional options
:return: Resulting srcset attribute value
:raises ValueError: In case of invalid or missing parameters
"""
if not breakpoints:
return None
if transformation is None:
transformation = dict()
return ", ".join(["{0} {1}w".format(utils.cloudinary_scaled_url(
self.public_id, w, transformation, options), w) for w in breakpoints])
@staticmethod
def _generate_sizes_attribute(breakpoints):
"""
Helper function. Generates sizes attribute value of the HTML img tag.
:param breakpoints: A list of breakpoints.
:return: Resulting 'sizes' attribute value
"""
if not breakpoints:
return None
return ", ".join("(max-width: {bp}px) {bp}px".format(bp=bp) for bp in breakpoints)
def _generate_image_responsive_attributes(self, attributes, srcset_data, **options):
"""
Helper function. Generates srcset and sizes attributes of the image tag
Create both srcset and sizes here to avoid fetching breakpoints twice
:param attributes: Existing attributes
:param srcset_data: A dictionary containing the following keys:
breakpoints A list of breakpoints.
min_width Minimal width of the srcset images
max_width Maximal width of the srcset images.
max_images Number of srcset images to generate.
:param options: Additional options
:return: The responsive attributes
"""
responsive_attributes = dict()
if not srcset_data:
return responsive_attributes
breakpoints = None
if "srcset" not in attributes:
breakpoints = self._get_or_generate_breakpoints(srcset_data, **options)
transformation = srcset_data.get("transformation")
srcset_attr = self._generate_srcset_attribute(breakpoints, transformation, **options)
if srcset_attr:
responsive_attributes["srcset"] = srcset_attr
if "sizes" not in attributes and srcset_data.get("sizes") is True:
if not breakpoints:
breakpoints = self._get_or_generate_breakpoints(srcset_data, **options)
sizes_attr = self._generate_sizes_attribute(breakpoints)
if sizes_attr:
responsive_attributes["sizes"] = sizes_attr
return responsive_attributes
def image(self, **options):
"""
Generates HTML img tag
:param options: Additional options
:return: Resulting img tag
"""
if options.get("resource_type", self.resource_type) == "video":
self.default_poster_options(options)
custom_attributes = options.pop("attributes", dict())
srcset_option = options.pop("srcset", dict())
srcset_data = dict()
if isinstance(srcset_option, dict):
srcset_data = config().srcset or dict()
srcset_data = srcset_data.copy()
srcset_data.update(srcset_option)
else:
if "srcset" not in custom_attributes:
custom_attributes["srcset"] = srcset_option
src, attrs = self.__build_url(**options)
client_hints = attrs.pop("client_hints", config().client_hints)
responsive = attrs.pop("responsive", False)
hidpi = attrs.pop("hidpi", False)
if (responsive or hidpi) and not client_hints:
attrs["data-src"] = src
classes = "cld-responsive" if responsive else "cld-hidpi"
if "class" in attrs: classes += " " + attrs["class"]
attrs["class"] = classes
src = attrs.pop("responsive_placeholder", config().responsive_placeholder)
if src == "blank": src = CL_BLANK
if src: attrs["src"] = src
classes = "cld-responsive" if responsive else "cld-hidpi"
if "class" in attrs:
classes += " " + attrs["class"]
attrs["class"] = classes
src = attrs.pop("responsive_placeholder", config().responsive_placeholder)
if src == "blank":
src = CL_BLANK
responsive_attrs = self._generate_image_responsive_attributes(custom_attributes, srcset_data, **options)
if responsive_attrs:
# width and height attributes override srcset behavior, they should be removed from html attributes.
for key in {"width", "height"}:
attrs.pop(key, None)
attrs.update(responsive_attrs)
# Explicitly provided attributes override options
attrs.update(custom_attributes)
if src:
attrs["src"] = src
return u"<img {0}/>".format(utils.html_attrs(attrs))
@ -228,69 +557,231 @@ class CloudinaryResource(object):
self.default_poster_options(options)
return self.build_url(**options)
# Creates an HTML video tag for the provided +source+
#
# ==== Options
# * <tt>source_types</tt> - Specify which source type the tag should include. defaults to webm, mp4 and ogv.
# * <tt>source_transformation</tt> - specific transformations to use for a specific source type.
# * <tt>poster</tt> - override default thumbnail:
# * url: provide an ad hoc url
# * options: with specific poster transformations and/or Cloudinary +:public_id+
#
# ==== Examples
# CloudinaryResource("mymovie.mp4").video()
# CloudinaryResource("mymovie.mp4").video(source_types = 'webm')
# CloudinaryResource("mymovie.ogv").video(poster = "myspecialplaceholder.jpg")
# CloudinaryResource("mymovie.webm").video(source_types = ['webm', 'mp4'], poster = {'effect': 'sepia'})
def video(self, **options):
public_id = options.get('public_id', self.public_id)
source = re.sub("\.({0})$".format("|".join(self.default_source_types())), '', public_id)
@staticmethod
def _video_mime_type(video_type, codecs=None):
"""
Helper function for video(), generates video MIME type string from video_type and codecs.
Example: video/mp4; codecs=mp4a.40.2
:param video_type: mp4, webm, ogg etc.
:param codecs: List or string of codecs. E.g.: "avc1.42E01E" or "avc1.42E01E, mp4a.40.2" or
["avc1.42E01E", "mp4a.40.2"]
:return: Resulting mime type
"""
video_type = 'ogg' if video_type == 'ogv' else video_type
if not video_type:
return ""
codecs_str = ", ".join(codecs) if isinstance(codecs, (list, tuple)) else codecs
codecs_attr = "; codecs={codecs_str}".format(codecs_str=codecs_str) if codecs_str else ""
return "video/{}{}".format(video_type, codecs_attr)
@staticmethod
def _collect_video_tag_attributes(video_options):
"""
Helper function for video tag, collects remaining options and returns them as attributes
:param video_options: Remaining options
:return: Resulting attributes
"""
attributes = video_options.copy()
if 'html_width' in attributes:
attributes['width'] = attributes.pop('html_width')
if 'html_height' in attributes:
attributes['height'] = attributes.pop('html_height')
if "poster" in attributes and not attributes["poster"]:
attributes.pop("poster", None)
return attributes
def _generate_video_poster_attr(self, source, video_options):
"""
Helper function for video tag, generates video poster URL
:param source: The public ID of the resource
:param video_options: Additional options
:return: Resulting video poster URL
"""
if 'poster' not in video_options:
return self.video_thumbnail(public_id=source, **video_options)
poster_options = video_options['poster']
if not isinstance(poster_options, dict):
return poster_options
if 'public_id' not in poster_options:
return self.video_thumbnail(public_id=source, **poster_options)
return utils.cloudinary_url(poster_options['public_id'], **poster_options)[0]
def _populate_video_source_tags(self, source, options):
"""
Helper function for video tag, populates source tags from provided options.
source_types and sources are mutually exclusive, only one of them can be used.
If both are not provided, source types are used (for backwards compatibility)
:param source: The public ID of the video
:param options: Additional options
:return: Resulting source tags (may be empty)
"""
source_tags = []
# Consume all relevant options, otherwise they are left and passed as attributes
video_sources = options.pop('sources', [])
source_types = options.pop('source_types', [])
source_transformation = options.pop('source_transformation', {})
if video_sources and isinstance(video_sources, list):
# processing new source structure with codecs
for source_data in video_sources:
transformation = options.copy()
transformation.update(source_data.get("transformations", {}))
source_type = source_data.get("type", '')
src = utils.cloudinary_url(source, format=source_type, **transformation)[0]
codecs = source_data.get("codecs", [])
source_tags.append("<source {attributes}>".format(
attributes=utils.html_attrs({'src': src, 'type': self._video_mime_type(source_type, codecs)})))
return source_tags
# processing old source_types structure with out codecs
if not source_types:
source_types = self.default_source_types()
if not isinstance(source_types, (list, tuple)):
return source_tags
for source_type in source_types:
transformation = options.copy()
transformation.update(source_transformation.get(source_type, {}))
src = utils.cloudinary_url(source, format=source_type, **transformation)[0]
source_tags.append("<source {attributes}>".format(
attributes=utils.html_attrs({'src': src, 'type': self._video_mime_type(source_type)})))
return source_tags
def video(self, **options):
"""
Creates an HTML video tag for the provided +source+
Examples:
CloudinaryResource("mymovie.mp4").video()
CloudinaryResource("mymovie.mp4").video(source_types = 'webm')
CloudinaryResource("mymovie.ogv").video(poster = "myspecialplaceholder.jpg")
CloudinaryResource("mymovie.webm").video(source_types = ['webm', 'mp4'], poster = {'effect': 'sepia'})
:param options:
* <tt>source_types</tt> - Specify which source type the tag should include.
defaults to webm, mp4 and ogv.
* <tt>sources</tt> - Similar to source_types, but may contain codecs list.
source_types and sources are mutually exclusive, only one of
them can be used. If both are not provided, default source types
are used.
* <tt>source_transformation</tt> - specific transformations to use
for a specific source type.
* <tt>poster</tt> - override default thumbnail:
* url: provide an ad hoc url
* options: with specific poster transformations and/or Cloudinary +:public_id+
:return: Video tag
"""
public_id = options.get('public_id', self.public_id)
source = re.sub(r"\.({0})$".format("|".join(self.default_source_types())), '', public_id)
custom_attributes = options.pop("attributes", dict())
fallback = options.pop('fallback_content', '')
options['resource_type'] = options.pop('resource_type', self.resource_type or 'video')
if not source_types: source_types = self.default_source_types()
video_options = options.copy()
# Save source types for a single video source handling (it can be a single type)
source_types = options.get('source_types', "")
if 'poster' in video_options:
poster_options = video_options['poster']
if isinstance(poster_options, dict):
if 'public_id' in poster_options:
video_options['poster'] = utils.cloudinary_url(poster_options['public_id'], **poster_options)[0]
else:
video_options['poster'] = self.video_thumbnail(public_id=source, **poster_options)
else:
video_options['poster'] = self.video_thumbnail(public_id=source, **options)
poster_options = options.copy()
if "poster" not in custom_attributes:
options["poster"] = self._generate_video_poster_attr(source, poster_options)
if not video_options['poster']: del video_options['poster']
if "resource_type" not in options:
options["resource_type"] = self.resource_type or "video"
nested_source_types = isinstance(source_types, list) and len(source_types) > 1
if not nested_source_types:
# populate video source tags
source_tags = self._populate_video_source_tags(source, options)
if not source_tags:
source = source + '.' + utils.build_array(source_types)[0]
video_url = utils.cloudinary_url(source, **video_options)
video_options = video_url[1]
if not nested_source_types:
video_options['src'] = video_url[0]
if 'html_width' in video_options: video_options['width'] = video_options.pop('html_width')
if 'html_height' in video_options: video_options['height'] = video_options.pop('html_height')
video_url, video_options = utils.cloudinary_url(source, **options)
sources = ""
if nested_source_types:
for source_type in source_types:
transformation = options.copy()
transformation.update(source_transformation.get(source_type, {}))
src = utils.cloudinary_url(source, format=source_type, **transformation)[0]
video_type = "ogg" if source_type == 'ogv' else source_type
mime_type = "video/" + video_type
sources += "<source {attributes}>".format(attributes=utils.html_attrs({'src': src, 'type': mime_type}))
if not source_tags:
custom_attributes['src'] = video_url
attributes = self._collect_video_tag_attributes(video_options)
attributes.update(custom_attributes)
sources_str = ''.join(str(x) for x in source_tags)
html = "<video {attributes}>{sources}{fallback}</video>".format(
attributes=utils.html_attrs(video_options), sources=sources, fallback=fallback)
attributes=utils.html_attrs(attributes), sources=sources_str, fallback=fallback)
return html
@staticmethod
def __generate_media_attr(**media_options):
media_query_conditions = []
if "min_width" in media_options:
media_query_conditions.append("(min-width: {}px)".format(media_options["min_width"]))
if "max_width" in media_options:
media_query_conditions.append("(max-width: {}px)".format(media_options["max_width"]))
return " and ".join(media_query_conditions)
def source(self, **options):
attrs = options.get("attributes") or {}
srcset_data = config().srcset or dict()
srcset_data = srcset_data.copy()
srcset_data.update(options.pop("srcset", dict()))
responsive_attrs = self._generate_image_responsive_attributes(attrs, srcset_data, **options)
attrs.update(responsive_attrs)
# `source` tag under `picture` tag uses `srcset` attribute for both `srcset` and `src` urls
if "srcset" not in attrs:
attrs["srcset"], _ = self.__build_url(**options)
if "media" not in attrs:
media_attr = self.__generate_media_attr(**(options.get("media", {})))
if media_attr:
attrs["media"] = media_attr
return u"<source {0}>".format(utils.html_attrs(attrs))
def picture(self, **options):
sub_tags = []
sources = options.pop("sources") or list()
for source in sources:
curr_options = deepcopy(options)
if "transformation" in source:
curr_options = utils.chain_transformations(curr_options, source["transformation"])
curr_options["media"] = dict((k, source[k]) for k in ['min_width', 'max_width'] if k in source)
sub_tags.append(self.source(**curr_options))
sub_tags.append(self.image(**options))
return u"<picture>{}</picture>".format("".join(sub_tags))
class CloudinaryImage(CloudinaryResource):
def __init__(self, public_id=None, **kwargs):

View file

@ -4,28 +4,24 @@ import email.utils
import json
import socket
import cloudinary
from six import string_types
import urllib3
import certifi
from cloudinary import utils
from six import string_types
from urllib3.exceptions import HTTPError
import cloudinary
from cloudinary import utils
from cloudinary.exceptions import (
BadRequest,
AuthorizationRequired,
NotAllowed,
NotFound,
AlreadyExists,
RateLimited,
GeneralError
)
logger = cloudinary.logger
# intentionally one-liners
class Error(Exception): pass
class NotFound(Error): pass
class NotAllowed(Error): pass
class AlreadyExists(Error): pass
class RateLimited(Error): pass
class BadRequest(Error): pass
class GeneralError(Error): pass
class AuthorizationRequired(Error): pass
EXCEPTION_CODES = {
400: BadRequest,
401: AuthorizationRequired,
@ -45,10 +41,8 @@ class Response(dict):
self.rate_limit_reset_at = email.utils.parsedate(response.headers["x-featureratelimit-reset"])
self.rate_limit_remaining = int(response.headers["x-featureratelimit-remaining"])
_http = urllib3.PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
_http = utils.get_http_connector(cloudinary.config(), cloudinary.CERT_KWARGS)
def ping(**options):
@ -67,23 +61,26 @@ def resources(**options):
resource_type = options.pop("resource_type", "image")
upload_type = options.pop("type", None)
uri = ["resources", resource_type]
if upload_type: uri.append(upload_type)
params = only(options,
"next_cursor", "max_results", "prefix", "tags", "context", "moderations", "direction", "start_at")
if upload_type:
uri.append(upload_type)
params = only(options, "next_cursor", "max_results", "prefix", "tags",
"context", "moderations", "direction", "start_at")
return call_api("get", uri, params, **options)
def resources_by_tag(tag, **options):
resource_type = options.pop("resource_type", "image")
uri = ["resources", resource_type, "tags", tag]
params = only(options, "next_cursor", "max_results", "tags", "context", "moderations", "direction")
params = only(options, "next_cursor", "max_results", "tags",
"context", "moderations", "direction")
return call_api("get", uri, params, **options)
def resources_by_moderation(kind, status, **options):
resource_type = options.pop("resource_type", "image")
uri = ["resources", resource_type, "moderations", kind, status]
params = only(options, "next_cursor", "max_results", "tags", "context", "moderations", "direction")
params = only(options, "next_cursor", "max_results", "tags",
"context", "moderations", "direction")
return call_api("get", uri, params, **options)
@ -99,7 +96,8 @@ def resource(public_id, **options):
resource_type = options.pop("resource_type", "image")
upload_type = options.pop("type", "upload")
uri = ["resources", resource_type, upload_type, public_id]
params = only(options, "exif", "faces", "colors", "image_metadata", "pages", "phash", "coordinates", "max_results")
params = only(options, "exif", "faces", "colors", "image_metadata", "cinemagraph_analysis",
"pages", "phash", "coordinates", "max_results", "quality_analysis", "derived_next_cursor")
return call_api("get", uri, params, **options)
@ -114,9 +112,11 @@ def update(public_id, **options):
if "tags" in options:
params["tags"] = ",".join(utils.build_array(options["tags"]))
if "face_coordinates" in options:
params["face_coordinates"] = utils.encode_double_array(options.get("face_coordinates"))
params["face_coordinates"] = utils.encode_double_array(
options.get("face_coordinates"))
if "custom_coordinates" in options:
params["custom_coordinates"] = utils.encode_double_array(options.get("custom_coordinates"))
params["custom_coordinates"] = utils.encode_double_array(
options.get("custom_coordinates"))
if "context" in options:
params["context"] = utils.encode_context(options.get("context"))
if "auto_tagging" in options:
@ -167,8 +167,7 @@ def delete_derived_resources(derived_resource_ids, **options):
def delete_derived_by_transformation(public_ids, transformations,
resource_type='image', type='upload', invalidate=None,
**options):
"""
Delete derived resources of public ids, identified by transformations
"""Delete derived resources of public ids, identified by transformations
:param public_ids: the base resources
:type public_ids: list of str
@ -202,33 +201,49 @@ def tags(**options):
def transformations(**options):
uri = ["transformations"]
return call_api("get", uri, only(options, "next_cursor", "max_results"), **options)
params = only(options, "named", "next_cursor", "max_results")
return call_api("get", uri, params, **options)
def transformation(transformation, **options):
uri = ["transformations", transformation_string(transformation)]
return call_api("get", uri, only(options, "next_cursor", "max_results"), **options)
uri = ["transformations"]
params = only(options, "next_cursor", "max_results")
params["transformation"] = utils.build_single_eager(transformation)
return call_api("get", uri, params, **options)
def delete_transformation(transformation, **options):
uri = ["transformations", transformation_string(transformation)]
return call_api("delete", uri, {}, **options)
uri = ["transformations"]
params = {"transformation": utils.build_single_eager(transformation)}
return call_api("delete", uri, params, **options)
# updates - currently only supported update is the "allowed_for_strict" boolean flag and unsafe_update
# updates - currently only supported update is the "allowed_for_strict"
# boolean flag and unsafe_update
def update_transformation(transformation, **options):
uri = ["transformations", transformation_string(transformation)]
uri = ["transformations"]
updates = only(options, "allowed_for_strict")
if "unsafe_update" in options:
updates["unsafe_update"] = transformation_string(options.get("unsafe_update"))
if not updates: raise Exception("No updates given")
updates["transformation"] = utils.build_single_eager(transformation)
return call_api("put", uri, updates, **options)
def create_transformation(name, definition, **options):
uri = ["transformations", name]
return call_api("post", uri, {"transformation": transformation_string(definition)}, **options)
uri = ["transformations"]
params = {"name": name, "transformation": utils.build_single_eager(definition)}
return call_api("post", uri, params, **options)
def publish_by_ids(public_ids, **options):
@ -271,7 +286,7 @@ def update_upload_preset(name, **options):
uri = ["upload_presets", name]
params = utils.build_upload_params(**options)
params = utils.cleanup_params(params)
params.update(only(options, "unsigned", "disallow_public_id"))
params.update(only(options, "unsigned", "disallow_public_id", "live"))
return call_api("put", uri, params, **options)
@ -279,16 +294,33 @@ def create_upload_preset(**options):
uri = ["upload_presets"]
params = utils.build_upload_params(**options)
params = utils.cleanup_params(params)
params.update(only(options, "unsigned", "disallow_public_id", "name"))
params.update(only(options, "unsigned", "disallow_public_id", "name", "live"))
return call_api("post", uri, params, **options)
def create_folder(path, **options):
return call_api("post", ["folders", path], {}, **options)
def root_folders(**options):
return call_api("get", ["folders"], {}, **options)
return call_api("get", ["folders"], only(options, "next_cursor", "max_results"), **options)
def subfolders(of_folder_path, **options):
return call_api("get", ["folders", of_folder_path], {}, **options)
return call_api("get", ["folders", of_folder_path], only(options, "next_cursor", "max_results"), **options)
def delete_folder(path, **options):
"""Deletes folder
Deleted folder must be empty, but can have descendant empty sub folders
:param path: The folder to delete
:param options: Additional options
:rtype: Response
"""
return call_api("delete", ["folders", path], {}, **options)
def restore(public_ids, **options):
@ -361,29 +393,48 @@ def update_streaming_profile(name, **options):
def call_json_api(method, uri, jsonBody, **options):
logger.debug(jsonBody)
data = json.dumps(jsonBody).encode('utf-8')
return _call_api(method, uri, body=data, headers={'Content-Type': 'application/json'}, **options)
return _call_api(method, uri, body=data,
headers={'Content-Type': 'application/json'}, **options)
def call_api(method, uri, params, **options):
return _call_api(method, uri, params=params, **options)
def call_metadata_api(method, uri, params, **options):
"""Private function that assists with performing an API call to the
metadata_fields part of the Admin API
:param method: The HTTP method. Valid methods: get, post, put, delete
:param uri: REST endpoint of the API (without 'metadata_fields')
:param params: Query/body parameters passed to the method
:param options: Additional options
:rtype: Response
"""
uri = ["metadata_fields"] + (uri or [])
return call_json_api(method, uri, params, **options)
def _call_api(method, uri, params=None, body=None, headers=None, **options):
prefix = options.pop("upload_prefix",
cloudinary.config().upload_prefix) or "https://api.cloudinary.com"
cloud_name = options.pop("cloud_name", cloudinary.config().cloud_name)
if not cloud_name: raise Exception("Must supply cloud_name")
if not cloud_name:
raise Exception("Must supply cloud_name")
api_key = options.pop("api_key", cloudinary.config().api_key)
if not api_key: raise Exception("Must supply api_key")
if not api_key:
raise Exception("Must supply api_key")
api_secret = options.pop("api_secret", cloudinary.config().api_secret)
if not cloud_name: raise Exception("Must supply api_secret")
if not cloud_name:
raise Exception("Must supply api_secret")
api_url = "/".join([prefix, "v1_1", cloud_name] + uri)
processed_params = None
if isinstance(params, dict):
processed_params = {}
for key, value in params.items():
if isinstance(value, list):
if isinstance(value, list) or isinstance(value, tuple):
value_list = {"{}[{}]".format(key, i): i_value for i, i_value in enumerate(value)}
processed_params.update(value_list)
elif value:
@ -437,12 +488,166 @@ def transformation_string(transformation):
def __prepare_streaming_profile_params(**options):
params = only(options, "display_name")
if "representations" in options:
representations = [{"transformation": transformation_string(trans)} for trans in options["representations"]]
representations = [{"transformation": transformation_string(trans)}
for trans in options["representations"]]
params["representations"] = json.dumps(representations)
return params
def __delete_resource_params(options, **params):
p = dict(transformations=utils.build_eager(options.get('transformations')),
**only(options, "keep_original", "next_cursor", "invalidate"))
p.update(params)
return p
def list_metadata_fields(**options):
"""Returns a list of all metadata field definitions
See: `Get metadata fields API reference <https://cloudinary.com/documentation/admin_api#get_metadata_fields>`_
:param options: Additional options
:rtype: Response
"""
return call_metadata_api("get", [], {}, **options)
def metadata_field_by_field_id(field_external_id, **options):
"""Gets a metadata field by external id
See: `Get metadata field by external ID API reference
<https://cloudinary.com/documentation/admin_api#get_a_metadata_field_by_external_id>`_
:param field_external_id: The ID of the metadata field to retrieve
:param options: Additional options
:rtype: Response
"""
uri = [field_external_id]
return call_metadata_api("get", uri, {}, **options)
def add_metadata_field(field, **options):
"""Creates a new metadata field definition
See: `Create metadata field API reference <https://cloudinary.com/documentation/admin_api#create_a_metadata_field>`_
:param field: The field to add
:param options: Additional options
:rtype: Response
"""
params = only(field, "type", "external_id", "label", "mandatory",
"default_value", "validation", "datasource")
return call_metadata_api("post", [], params, **options)
def update_metadata_field(field_external_id, field, **options):
"""Updates a metadata field by external id
Updates a metadata field definition (partially, no need to pass the entire
object) passed as JSON data.
See `Generic structure of a metadata field
<https://cloudinary.com/documentation/admin_api#generic_structure_of_a_metadata_field>`_ for details.
:param field_external_id: The id of the metadata field to update
:param field: The field definition
:param options: Additional options
:rtype: Response
"""
uri = [field_external_id]
params = only(field, "label", "mandatory", "default_value", "validation")
return call_metadata_api("put", uri, params, **options)
def delete_metadata_field(field_external_id, **options):
"""Deletes a metadata field definition.
The field should no longer be considered a valid candidate for all other endpoints
See: `Delete metadata field API reference
<https://cloudinary.com/documentation/admin_api#delete_a_metadata_field_by_external_id>`_
:param field_external_id: The external id of the field to delete
:param options: Additional options
:return: An array with a "message" key. "ok" value indicates a successful deletion.
:rtype: Response
"""
uri = [field_external_id]
return call_metadata_api("delete", uri, {}, **options)
def delete_datasource_entries(field_external_id, entries_external_id, **options):
"""Deletes entries in a metadata field datasource
Deletes (blocks) the datasource entries for a specified metadata field
definition. Sets the state of the entries to inactive. This is a soft delete,
the entries still exist under the hood and can be activated again with the
restore datasource entries method.
See: `Delete entries in a metadata field datasource API reference
<https://cloudinary.com/documentation/admin_api#delete_entries_in_a_metadata_field_datasource>`_
:param field_external_id: The id of the field to update
:param entries_external_id: The ids of all the entries to delete from the
datasource
:param options: Additional options
:rtype: Response
"""
uri = [field_external_id, "datasource"]
params = {"external_ids": entries_external_id}
return call_metadata_api("delete", uri, params, **options)
def update_metadata_field_datasource(field_external_id, entries_external_id, **options):
"""Updates a metadata field datasource
Updates the datasource of a supported field type (currently only enum and set),
passed as JSON data. The update is partial: datasource entries with an
existing external_id will be updated and entries with new external_id's (or
without external_id's) will be appended.
See: `Update a metadata field datasource API reference
<https://cloudinary.com/documentation/admin_api#update_a_metadata_field_datasource>`_
:param field_external_id: The external id of the field to update
:param entries_external_id:
:param options: Additional options
:rtype: Response
"""
values = []
for item in entries_external_id:
external = only(item, "external_id", "value")
if external:
values.append(external)
uri = [field_external_id, "datasource"]
params = {"values": values}
return call_metadata_api("put", uri, params, **options)
def restore_metadata_field_datasource(field_external_id, entries_external_ids, **options):
"""Restores entries in a metadata field datasource
Restores (unblocks) any previously deleted datasource entries for a specified
metadata field definition.
Sets the state of the entries to active.
See: `Restore entries in a metadata field datasource API reference
<https://cloudinary.com/documentation/admin_api#restore_entries_in_a_metadata_field_datasource>`_
:param field_external_id: The ID of the metadata field
:param entries_external_ids: An array of IDs of datasource entries to restore
(unblock)
:param options: Additional options
:rtype: Response
"""
uri = [field_external_id, 'datasource_restore']
params = {"external_ids": entries_external_ids}
return call_metadata_api("post", uri, params, **options)

View file

@ -3,33 +3,37 @@ import hmac
import re
import time
from binascii import a2b_hex
from cloudinary.compat import quote_plus
AUTH_TOKEN_NAME = "__cld_token__"
AUTH_TOKEN_SEPARATOR = "~"
AUTH_TOKEN_UNSAFE_RE = r'([ "#%&\'\/:;<=>?@\[\\\]^`{\|}~]+)'
def generate(url=None, acl=None, start_time=None, duration=None, expiration=None, ip=None, key=None,
token_name=AUTH_TOKEN_NAME):
def generate(url=None, acl=None, start_time=None, duration=None,
expiration=None, ip=None, key=None, token_name=AUTH_TOKEN_NAME):
if expiration is None:
if duration is not None:
start = start_time if start_time is not None else int(time.mktime(time.gmtime()))
start = start_time if start_time is not None else int(time.time())
expiration = start + duration
else:
raise Exception("Must provide either expiration or duration")
token_parts = []
if ip is not None: token_parts.append("ip=" + ip)
if start_time is not None: token_parts.append("st=%d" % start_time)
if ip is not None:
token_parts.append("ip=" + ip)
if start_time is not None:
token_parts.append("st=%d" % start_time)
token_parts.append("exp=%d" % expiration)
if acl is not None: token_parts.append("acl=%s" % _escape_to_lower(acl))
if acl is not None:
token_parts.append("acl=%s" % _escape_to_lower(acl))
to_sign = list(token_parts)
if url is not None:
if url is not None and acl is None:
to_sign.append("url=%s" % _escape_to_lower(url))
auth = _digest("~".join(to_sign), key)
auth = _digest(AUTH_TOKEN_SEPARATOR.join(to_sign), key)
token_parts.append("hmac=%s" % auth)
return "%(token_name)s=%(token)s" % {"token_name": token_name, "token": "~".join(token_parts)}
return "%(token_name)s=%(token)s" % {"token_name": token_name, "token": AUTH_TOKEN_SEPARATOR.join(token_parts)}
def _digest(message, key):
@ -38,10 +42,8 @@ def _digest(message, key):
def _escape_to_lower(url):
escaped_url = quote_plus(url)
def toLowercase(match):
return match.group(0).lower()
escaped_url = re.sub(r'%..', toLowercase, escaped_url)
# There is a circular import issue in this file, need to resolve it in the next major release
from cloudinary.utils import smart_escape
escaped_url = smart_escape(url, unsafe=AUTH_TOKEN_UNSAFE_RE)
escaped_url = re.sub(r"%[0-9A-F]{2}", lambda x: x.group(0).lower(), escaped_url)
return escaped_url

0
lib/cloudinary/cache/__init__.py vendored Normal file
View file

View file

View file

@ -0,0 +1,63 @@
from abc import ABCMeta, abstractmethod
class CacheAdapter:
"""
CacheAdapter Abstract Base Class
"""
__metaclass__ = ABCMeta
@abstractmethod
def get(self, public_id, type, resource_type, transformation, format):
"""
Gets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: None|mixed value, None if not found
"""
raise NotImplementedError
@abstractmethod
def set(self, public_id, type, resource_type, transformation, format, value):
"""
Sets value specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:param value: The value to set
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def delete(self, public_id, type, resource_type, transformation, format):
"""
Deletes entry specified by parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def flush_all(self):
"""
Flushes all entries from cache
:return: bool True on success or False on failure
"""
raise NotImplementedError

View file

@ -0,0 +1,61 @@
import json
from hashlib import sha1
from cloudinary.cache.adapter.cache_adapter import CacheAdapter
from cloudinary.cache.storage.key_value_storage import KeyValueStorage
from cloudinary.utils import check_property_enabled
class KeyValueCacheAdapter(CacheAdapter):
"""
A cache adapter for a key-value storage type
"""
def __init__(self, storage):
"""Create a new adapter for the provided storage interface"""
if not isinstance(storage, KeyValueStorage):
raise ValueError("An instance of valid KeyValueStorage must be provided")
self._key_value_storage = storage
@property
def enabled(self):
return self._key_value_storage is not None
@check_property_enabled
def get(self, public_id, type, resource_type, transformation, format):
key = self.generate_cache_key(public_id, type, resource_type, transformation, format)
value_str = self._key_value_storage.get(key)
return json.loads(value_str) if value_str else value_str
@check_property_enabled
def set(self, public_id, type, resource_type, transformation, format, value):
key = self.generate_cache_key(public_id, type, resource_type, transformation, format)
return self._key_value_storage.set(key, json.dumps(value))
@check_property_enabled
def delete(self, public_id, type, resource_type, transformation, format):
return self._key_value_storage.delete(
self.generate_cache_key(public_id, type, resource_type, transformation, format)
)
@check_property_enabled
def flush_all(self):
return self._key_value_storage.clear()
@staticmethod
def generate_cache_key(public_id, type, resource_type, transformation, format):
"""
Generates key-value storage key from parameters
:param public_id: The public ID of the resource
:param type: The storage type
:param resource_type: The type of the resource
:param transformation: The transformation string
:param format: The format of the resource
:return: Resulting cache key
"""
valid_params = [p for p in [public_id, type, resource_type, transformation, format] if p]
return sha1("/".join(valid_params).encode("utf-8")).hexdigest()

View file

@ -0,0 +1,124 @@
import copy
import collections
import cloudinary
from cloudinary.cache.adapter.cache_adapter import CacheAdapter
from cloudinary.utils import check_property_enabled
class ResponsiveBreakpointsCache:
"""
Caches breakpoint values for image resources
"""
def __init__(self, **cache_options):
"""
Initialize the cache
:param cache_options: Cache configuration options
"""
self._cache_adapter = None
cache_adapter = cache_options.get("cache_adapter")
self.set_cache_adapter(cache_adapter)
def set_cache_adapter(self, cache_adapter):
"""
Assigns cache adapter
:param cache_adapter: The cache adapter used to store and retrieve values
:return: Returns True if the cache_adapter is valid
"""
if cache_adapter is None or not isinstance(cache_adapter, CacheAdapter):
return False
self._cache_adapter = cache_adapter
return True
@property
def enabled(self):
"""
Indicates whether cache is enabled or not
:return: Rrue if a _cache_adapter has been set
"""
return self._cache_adapter is not None
@staticmethod
def _options_to_parameters(**options):
"""
Extract the parameters required in order to calculate the key of the cache.
:param options: Input options
:return: A list of values used to calculate the cache key
"""
options_copy = copy.deepcopy(options)
transformation, _ = cloudinary.utils.generate_transformation_string(**options_copy)
file_format = options.get("format", "")
storage_type = options.get("type", "upload")
resource_type = options.get("resource_type", "image")
return storage_type, resource_type, transformation, file_format
@check_property_enabled
def get(self, public_id, **options):
"""
Retrieve the breakpoints of a particular derived resource identified by the public_id and options
:param public_id: The public ID of the resource
:param options: The public ID of the resource
:return: Array of responsive breakpoints, None if not found
"""
params = self._options_to_parameters(**options)
return self._cache_adapter.get(public_id, *params)
@check_property_enabled
def set(self, public_id, value, **options):
"""
Set responsive breakpoints identified by public ID and options
:param public_id: The public ID of the resource
:param value: Array of responsive breakpoints to set
:param options: Additional options
:return: True on success or False on failure
"""
if not (isinstance(value, (list, tuple))):
raise ValueError("A list of breakpoints is expected")
storage_type, resource_type, transformation, file_format = self._options_to_parameters(**options)
return self._cache_adapter.set(public_id, storage_type, resource_type, transformation, file_format, value)
@check_property_enabled
def delete(self, public_id, **options):
"""
Delete responsive breakpoints identified by public ID and options
:param public_id: The public ID of the resource
:param options: Additional options
:return: True on success or False on failure
"""
params = self._options_to_parameters(**options)
return self._cache_adapter.delete(public_id, *params)
@check_property_enabled
def flush_all(self):
"""
Flush all entries from cache
:return: True on success or False on failure
"""
return self._cache_adapter.flush_all()
instance = ResponsiveBreakpointsCache()

View file

View file

@ -0,0 +1,79 @@
import glob
from tempfile import gettempdir
import os
import errno
from cloudinary.cache.storage.key_value_storage import KeyValueStorage
class FileSystemKeyValueStorage(KeyValueStorage):
"""File-based key-value storage"""
_item_ext = ".cldci"
def __init__(self, root_path):
"""
Create a new Storage object.
All files will be stored under the root_path location
:param root_path: The base folder for all storage files
"""
if root_path is None:
root_path = gettempdir()
if not os.path.isdir(root_path):
os.makedirs(root_path)
self._root_path = root_path
def get(self, key):
if not self._exists(key):
return None
with open(self._get_key_full_path(key), 'r') as f:
value = f.read()
return value
def set(self, key, value):
with open(self._get_key_full_path(key), 'w') as f:
f.write(value)
return True
def delete(self, key):
try:
os.remove(self._get_key_full_path(key))
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT - no such file or directory
raise # re-raise exception if a different error occurred
return True
def clear(self):
for cache_item_path in glob.iglob(os.path.join(self._root_path, '*' + self._item_ext)):
os.remove(cache_item_path)
return True
def _get_key_full_path(self, key):
"""
Generate the file path for the key
:param key: The key
:return: The absolute path of the value file associated with the key
"""
return os.path.join(self._root_path, key + self._item_ext)
def _exists(self, key):
"""
Indicate whether key exists
:param key: The key
:return: bool True if the file for the given key exists
"""
return os.path.isfile(self._get_key_full_path(key))

View file

@ -0,0 +1,51 @@
from abc import ABCMeta, abstractmethod
class KeyValueStorage:
"""
A simple key-value storage abstract base class
"""
__metaclass__ = ABCMeta
@abstractmethod
def get(self, key):
"""
Get a value identified by the given key
:param key: The unique identifier
:return: The value identified by key or None if no value was found
"""
raise NotImplementedError
@abstractmethod
def set(self, key, value):
"""
Store the value identified by the key
:param key: The unique identifier
:param value: Value to store
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def delete(self, key):
"""
Deletes item by key
:param key: The unique identifier
:return: bool True on success or False on failure
"""
raise NotImplementedError
@abstractmethod
def clear(self):
"""
Clears all entries
:return: bool True on success or False on failure
"""
raise NotImplementedError

View file

@ -1,5 +1,7 @@
# Copyright Cloudinary
import six.moves.urllib.parse
from six import PY3, string_types, StringIO, BytesIO
urlencode = six.moves.urllib.parse.urlencode
unquote = six.moves.urllib.parse.unquote
urlparse = six.moves.urllib.parse.urlparse
@ -7,7 +9,6 @@ parse_qs = six.moves.urllib.parse.parse_qs
parse_qsl = six.moves.urllib.parse.parse_qsl
quote_plus = six.moves.urllib.parse.quote_plus
httplib = six.moves.http_client
from six import PY3, string_types, StringIO, BytesIO
urllib2 = six.moves.urllib.request
NotConnected = six.moves.http_client.NotConnected

View file

@ -0,0 +1,33 @@
# Copyright Cloudinary
class Error(Exception):
pass
class NotFound(Error):
pass
class NotAllowed(Error):
pass
class AlreadyExists(Error):
pass
class RateLimited(Error):
pass
class BadRequest(Error):
pass
class GeneralError(Error):
pass
class AuthorizationRequired(Error):
pass

View file

@ -1,9 +1,10 @@
from django import forms
from cloudinary import CloudinaryResource
import json
import re
import cloudinary.uploader
import cloudinary.utils
import re
import json
from cloudinary import CloudinaryResource
from django import forms
from django.utils.translation import ugettext_lazy as _
@ -16,8 +17,8 @@ def cl_init_js_callbacks(form, request):
class CloudinaryInput(forms.TextInput):
input_type = 'file'
def render(self, name, value, attrs=None):
attrs = self.build_attrs(attrs)
def render(self, name, value, attrs=None, renderer=None):
attrs = dict(self.attrs, **attrs)
options = attrs.get('options', {})
attrs["options"] = ''
@ -27,14 +28,16 @@ class CloudinaryInput(forms.TextInput):
else:
params = cloudinary.utils.sign_request(params, options)
if 'resource_type' not in options: options['resource_type'] = 'auto'
if 'resource_type' not in options:
options['resource_type'] = 'auto'
cloudinary_upload_url = cloudinary.utils.cloudinary_api_url("upload", **options)
attrs["data-url"] = cloudinary_upload_url
attrs["data-form-data"] = json.dumps(params)
attrs["data-cloudinary-field"] = name
chunk_size = options.get("chunk_size", None)
if chunk_size: attrs["data-max-chunk-size"] = chunk_size
if chunk_size:
attrs["data-max-chunk-size"] = chunk_size
attrs["class"] = " ".join(["cloudinary-fileupload", attrs.get("class", "")])
widget = super(CloudinaryInput, self).render("file", None, attrs=attrs)
@ -53,8 +56,10 @@ class CloudinaryJsFileField(forms.Field):
}
def __init__(self, attrs=None, options=None, autosave=True, *args, **kwargs):
if attrs is None: attrs = {}
if options is None: options = {}
if attrs is None:
attrs = {}
if options is None:
options = {}
self.autosave = autosave
attrs = attrs.copy()
attrs["options"] = options.copy()
@ -70,7 +75,8 @@ class CloudinaryJsFileField(forms.Field):
def to_python(self, value):
"""Convert to CloudinaryResource"""
if not value: return None
if not value:
return None
m = re.search(r'^([^/]+)/([^/]+)/v(\d+)/([^#]+)#([^/]+)$', value)
if not m:
raise forms.ValidationError("Invalid format")
@ -95,7 +101,8 @@ class CloudinaryJsFileField(forms.Field):
"""Validate the signature"""
# Use the parent's handling of required fields, etc.
super(CloudinaryJsFileField, self).validate(value)
if not value: return
if not value:
return
if not value.validate():
raise forms.ValidationError("Signature mismatch")
@ -108,7 +115,8 @@ class CloudinaryUnsignedJsFileField(CloudinaryJsFileField):
options = {}
options = options.copy()
options.update({"unsigned": True, "upload_preset": upload_preset})
super(CloudinaryUnsignedJsFileField, self).__init__(attrs, options, autosave, *args, **kwargs)
super(CloudinaryUnsignedJsFileField, self).__init__(
attrs, options, autosave, *args, **kwargs)
class CloudinaryFileField(forms.FileField):
@ -117,7 +125,7 @@ class CloudinaryFileField(forms.FileField):
}
default_error_messages = forms.FileField.default_error_messages.copy()
default_error_messages.update(my_default_error_messages)
def __init__(self, options=None, autosave=True, *args, **kwargs):
self.autosave = autosave
self.options = options or {}

View file

@ -0,0 +1,43 @@
import json
import socket
import certifi
from urllib3 import PoolManager
from urllib3.exceptions import HTTPError
from cloudinary.exceptions import GeneralError
class HttpClient:
DEFAULT_HTTP_TIMEOUT = 60
def __init__(self, **options):
# Lazy initialization of the client, to improve performance when HttpClient is initialized but not used
self._http_client_instance = None
self.timeout = options.get("timeout", self.DEFAULT_HTTP_TIMEOUT)
@property
def _http_client(self):
if self._http_client_instance is None:
self._http_client_instance = PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
return self._http_client_instance
def get_json(self, url):
try:
response = self._http_client.request("GET", url, timeout=self.timeout)
body = response.data
except HTTPError as e:
raise GeneralError("Unexpected error %s" % str(e))
except socket.error as e:
raise GeneralError("Socket Error: %s" % str(e))
if response.status != 200:
raise GeneralError("Server returned unexpected status code - {} - {}".format(response.status,
response.data))
try:
result = json.loads(body.decode('utf-8'))
except Exception as e:
# Error is parsing json
raise GeneralError("Error parsing server response (%d) - %s. Got - %s" % (response.status, body, e))
return result

View file

@ -1,10 +1,10 @@
import re
from cloudinary import CloudinaryResource, forms, uploader
from django.core.files.uploadedfile import UploadedFile
from django.db import models
from cloudinary.uploader import upload_options
from cloudinary.utils import upload_params
# Add introspection rules for South, if it's installed.
try:
@ -13,15 +13,23 @@ try:
except ImportError:
pass
CLOUDINARY_FIELD_DB_RE = r'(?:(?P<resource_type>image|raw|video)/(?P<type>upload|private|authenticated)/)?(?:v(?P<version>\d+)/)?(?P<public_id>.*?)(\.(?P<format>[^.]+))?$'
CLOUDINARY_FIELD_DB_RE = r'(?:(?P<resource_type>image|raw|video)/' \
r'(?P<type>upload|private|authenticated)/)?' \
r'(?:v(?P<version>\d+)/)?' \
r'(?P<public_id>.*?)' \
r'(\.(?P<format>[^.]+))?$'
# Taken from six - https://pythonhosted.org/six/
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
"""
Create a base class with a metaclass.
This requires a bit of explanation: the basic idea is to make a dummy
metaclass for one level of class instantiation that replaces itself with
the actual metaclass.
Taken from six - https://pythonhosted.org/six/
"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@ -32,23 +40,32 @@ class CloudinaryField(models.Field):
description = "A resource stored in Cloudinary"
def __init__(self, *args, **kwargs):
options = {'max_length': 255}
self.default_form_class = kwargs.pop("default_form_class", forms.CloudinaryFileField)
options.update(kwargs)
self.type = options.pop("type", "upload")
self.resource_type = options.pop("resource_type", "image")
self.width_field = options.pop("width_field", None)
self.height_field = options.pop("height_field", None)
super(CloudinaryField, self).__init__(*args, **options)
self.type = kwargs.pop("type", "upload")
self.resource_type = kwargs.pop("resource_type", "image")
self.width_field = kwargs.pop("width_field", None)
self.height_field = kwargs.pop("height_field", None)
# Collect all options related to Cloudinary upload
self.options = {key: kwargs.pop(key) for key in set(kwargs.keys()) if key in upload_params + upload_options}
field_options = kwargs
field_options['max_length'] = 255
super(CloudinaryField, self).__init__(*args, **field_options)
def get_internal_type(self):
return 'CharField'
def value_to_string(self, obj):
# We need to support both legacy `_get_val_from_obj` and new `value_from_object` models.Field methods.
# It would be better to wrap it with try -> except AttributeError -> fallback to legacy.
# Unfortunately, we can catch AttributeError exception from `value_from_object` function itself.
# Parsing exception string is an overkill here, that's why we check for attribute existence
"""
We need to support both legacy `_get_val_from_obj` and new `value_from_object` models.Field methods.
It would be better to wrap it with try -> except AttributeError -> fallback to legacy.
Unfortunately, we can catch AttributeError exception from `value_from_object` function itself.
Parsing exception string is an overkill here, that's why we check for attribute existence
:param obj: Value to serialize
:return: Serialized value
"""
if hasattr(self, 'value_from_object'):
value = self.value_from_object(obj)
@ -69,38 +86,33 @@ class CloudinaryField(models.Field):
format=m.group('format')
)
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
return self.parse_cloudinary_resource(value)
def from_db_value(self, value, expression, connection, *args, **kwargs):
# TODO: when dropping support for versions prior to 2.0, you may return
# the signature to from_db_value(value, expression, connection)
if value is not None:
return self.parse_cloudinary_resource(value)
def to_python(self, value):
if isinstance(value, CloudinaryResource):
return value
elif isinstance(value, UploadedFile):
return value
elif value is None:
elif value is None or value is False:
return value
else:
return self.parse_cloudinary_resource(value)
def upload_options_with_filename(self, model_instance, filename):
return self.upload_options(model_instance)
def upload_options(self, model_instance):
return {}
def pre_save(self, model_instance, add):
value = super(CloudinaryField, self).pre_save(model_instance, add)
if isinstance(value, UploadedFile):
options = {"type": self.type, "resource_type": self.resource_type}
options.update(self.upload_options_with_filename(model_instance, value.name))
options.update(self.options)
instance_value = uploader.upload_resource(value, **options)
setattr(model_instance, self.attname, instance_value)
if self.width_field:
setattr(model_instance, self.width_field, instance_value.metadata['width'])
setattr(model_instance, self.width_field, instance_value.metadata.get('width'))
if self.height_field:
setattr(model_instance, self.height_field, instance_value.metadata['height'])
setattr(model_instance, self.height_field, instance_value.metadata.get('height'))
return self.get_prep_value(instance_value)
else:
return value

View file

@ -1,17 +1,17 @@
# MIT licensed code copied from https://bitbucket.org/chrisatlee/poster
#
# Copyright (c) 2011 Chris AtLee
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@ -28,7 +28,4 @@ New releases of poster will always have a version number that compares greater
than an older version of poster.
New in version 0.6."""
import cloudinary.poster.streaminghttp
import cloudinary.poster.encode
version = (0, 8, 2) # Thanks JP!
version = (0, 8, 2) # Thanks JP!

View file

@ -6,9 +6,17 @@ as multipart/form-data suitable for a HTTP POST or PUT request.
multipart/form-data is the standard way to upload files over HTTP"""
__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
'multipart_encode']
import mimetypes
import os
import re
from email.header import Header
from cloudinary.compat import (PY3, advance_iterator, quote_plus, to_bytearray,
to_bytes, to_string)
__all__ = [
'gen_boundary', 'encode_and_quote', 'MultipartParam', 'encode_string',
'encode_file_header', 'get_body_size', 'get_headers', 'multipart_encode']
try:
from io import UnsupportedOperation
@ -17,25 +25,19 @@ except ImportError:
try:
import uuid
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
return uuid.uuid4().hex
except ImportError:
import random, sha
import random
import sha
def gen_boundary():
"""Returns a random string to use as the boundary for a message"""
bits = random.getrandbits(160)
return sha.new(str(bits)).hexdigest()
import re, os, mimetypes
from cloudinary.compat import (PY3, string_types, to_bytes, to_string,
to_bytearray, quote_plus, advance_iterator)
try:
from email.header import Header
except ImportError:
# Python 2.4
from email.Header import Header
if PY3:
def encode_and_quote(data):
if data is None:
@ -47,7 +49,7 @@ else:
"""If ``data`` is unicode, return quote_plus(data.encode("utf-8")) otherwise return quote_plus(data)"""
if data is None:
return None
if isinstance(data, unicode):
data = data.encode("utf-8")
return quote_plus(data)
@ -65,13 +67,15 @@ if PY3:
return to_bytes(str(s))
else:
def _strify(s):
"""If s is a unicode string, encode it to UTF-8 and return the results, otherwise return str(s), or None if s is None"""
"""If s is a unicode string, encode it to UTF-8 and return the results,
otherwise return str(s), or None if s is None"""
if s is None:
return None
if isinstance(s, unicode):
return s.encode("utf-8")
return str(s)
class MultipartParam(object):
"""Represents a single parameter in a multipart/form-data request
@ -105,7 +109,7 @@ class MultipartParam(object):
transferred, and the total size.
"""
def __init__(self, name, value=None, filename=None, filetype=None,
filesize=None, fileobj=None, cb=None):
filesize=None, fileobj=None, cb=None):
self.name = Header(name).encode()
self.value = _strify(value)
if filename is None:
@ -141,7 +145,7 @@ class MultipartParam(object):
fileobj.seek(0, 2)
self.filesize = fileobj.tell()
fileobj.seek(0)
except:
except Exception:
raise ValueError("Could not determine filesize")
def __cmp__(self, other):
@ -169,9 +173,9 @@ class MultipartParam(object):
"""
return cls(paramname, filename=os.path.basename(filename),
filetype=mimetypes.guess_type(filename)[0],
filesize=os.path.getsize(filename),
fileobj=open(filename, "rb"))
filetype=mimetypes.guess_type(filename)[0],
filesize=os.path.getsize(filename),
fileobj=open(filename, "rb"))
@classmethod
def from_params(cls, params):
@ -204,7 +208,7 @@ class MultipartParam(object):
filetype = None
retval.append(cls(name=name, filename=filename,
filetype=filetype, fileobj=value))
filetype=filetype, fileobj=value))
else:
retval.append(cls(name, value))
return retval
@ -216,8 +220,8 @@ class MultipartParam(object):
headers = ["--%s" % boundary]
if self.filename:
disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
to_string(self.filename))
disposition = 'form-data; name="%s"; filename="%s"' % (
self.name, to_string(self.filename))
else:
disposition = 'form-data; name="%s"' % self.name
@ -267,8 +271,8 @@ class MultipartParam(object):
self.cb(self, current, total)
last_block = to_bytearray("")
encoded_boundary = "--%s" % encode_and_quote(boundary)
boundary_exp = re.compile(to_bytes("^%s$" % re.escape(encoded_boundary)),
re.M)
boundary_exp = re.compile(
to_bytes("^%s$" % re.escape(encoded_boundary)), re.M)
while True:
block = self.fileobj.read(blocksize)
if not block:
@ -296,6 +300,7 @@ class MultipartParam(object):
return len(self.encode_hdr(boundary)) + 2 + valuesize
def encode_string(boundary, name, value):
"""Returns ``name`` and ``value`` encoded as a multipart/form-data
variable. ``boundary`` is the boundary string used throughout
@ -303,8 +308,8 @@ def encode_string(boundary, name, value):
return MultipartParam(name, value).encode(boundary)
def encode_file_header(boundary, paramname, filesize, filename=None,
filetype=None):
def encode_file_header(boundary, paramname, filesize, filename=None, filetype=None):
"""Returns the leading data for a multipart/form-data field that contains
file data.
@ -324,7 +329,8 @@ def encode_file_header(boundary, paramname, filesize, filename=None,
"""
return MultipartParam(paramname, filesize=filesize, filename=filename,
filetype=filetype).encode_hdr(boundary)
filetype=filetype).encode_hdr(boundary)
def get_body_size(params, boundary):
"""Returns the number of bytes that the multipart/form-data encoding
@ -332,6 +338,7 @@ def get_body_size(params, boundary):
size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
return size + len(boundary) + 6
def get_headers(params, boundary):
"""Returns a dictionary with Content-Type and Content-Length headers
for the multipart/form-data encoding of ``params``."""
@ -341,6 +348,7 @@ def get_headers(params, boundary):
headers['Content-Length'] = str(get_body_size(params, boundary))
return headers
class multipart_yielder:
def __init__(self, params, boundary, cb):
self.params = params
@ -396,6 +404,7 @@ class multipart_yielder:
for param in self.params:
param.reset()
def multipart_encode(params, boundary=None, cb=None):
"""Encode ``params`` as multipart/form-data.

View file

@ -27,15 +27,18 @@ Example usage:
... {'Content-Length': str(len(s))})
"""
import sys, socket
from cloudinary.compat import httplib, urllib2, NotConnected
import socket
import sys
from cloudinary.compat import NotConnected, httplib, urllib2
__all__ = ['StreamingHTTPConnection', 'StreamingHTTPRedirectHandler',
'StreamingHTTPHandler', 'register_openers']
'StreamingHTTPHandler', 'register_openers']
if hasattr(httplib, 'HTTPS'):
__all__.extend(['StreamingHTTPSHandler', 'StreamingHTTPSConnection'])
class _StreamingHTTPMixin:
"""Mixin class for HTTP and HTTPS connections that implements a streaming
send method."""
@ -62,7 +65,7 @@ class _StreamingHTTPMixin:
print("send:", repr(value))
try:
blocksize = 8192
if hasattr(value, 'read') :
if hasattr(value, 'read'):
if hasattr(value, 'seek'):
value.seek(0)
if self.debuglevel > 0:
@ -86,10 +89,12 @@ class _StreamingHTTPMixin:
self.close()
raise
class StreamingHTTPConnection(_StreamingHTTPMixin, httplib.HTTPConnection):
"""Subclass of `httplib.HTTPConnection` that overrides the `send()` method
to support iterable body objects"""
class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""Subclass of `urllib2.HTTPRedirectHandler` that overrides the
`redirect_request` method to properly handle redirected POST requests
@ -114,7 +119,7 @@ class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
@ -125,14 +130,16 @@ class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in (
"content-length", "content-type")
)
return urllib2.Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
)
return urllib2.Request(
newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
class StreamingHTTPHandler(urllib2.HTTPHandler):
"""Subclass of `urllib2.HTTPHandler` that uses
StreamingHTTPConnection as its http connection class."""
@ -156,9 +163,9 @@ class StreamingHTTPHandler(urllib2.HTTPHandler):
"No Content-Length specified for iterable body")
return urllib2.HTTPHandler.do_request_(self, req)
if hasattr(httplib, 'HTTPS'):
class StreamingHTTPSConnection(_StreamingHTTPMixin,
httplib.HTTPSConnection):
class StreamingHTTPSConnection(_StreamingHTTPMixin, httplib.HTTPSConnection):
"""Subclass of `httplib.HTTSConnection` that overrides the `send()`
method to support iterable body objects"""
@ -179,7 +186,7 @@ if hasattr(httplib, 'HTTPS'):
if hasattr(data, 'read') or hasattr(data, 'next'):
if not req.has_header('Content-length'):
raise ValueError(
"No Content-Length specified for iterable body")
"No Content-Length specified for iterable body")
return urllib2.HTTPSHandler.do_request_(self, req)
@ -188,7 +195,8 @@ def get_handlers():
if hasattr(httplib, "HTTPS"):
handlers.append(StreamingHTTPSHandler)
return handlers
def register_openers():
"""Register the streaming http handlers in the global urllib2 default
opener object.

View file

@ -1,6 +1,7 @@
import json
from copy import deepcopy
from . import api
from cloudinary.api import call_json_api
class Search:
@ -46,8 +47,8 @@ class Search:
def execute(self, **options):
"""Execute the search and return results."""
options["content_type"] = 'application/json'
uri = ['resources','search']
return api.call_json_api('post', uri, self.as_dict(), **options)
uri = ['resources', 'search']
return call_json_api('post', uri, self.as_dict(), **options)
def _add(self, name, value):
if name not in self.query:
@ -56,4 +57,4 @@ class Search:
return self
def as_dict(self):
return deepcopy(self.query)
return deepcopy(self.query)

View file

@ -802,7 +802,7 @@ var slice = [].slice,
function TextLayer(options) {
var keys;
TextLayer.__super__.constructor.call(this, options);
keys = ["resourceType", "resourceType", "fontFamily", "fontSize", "fontWeight", "fontStyle", "textDecoration", "textAlign", "stroke", "letterSpacing", "lineSpacing", "text"];
keys = ["resourceType", "resourceType", "fontFamily", "fontSize", "fontWeight", "fontStyle", "textDecoration", "textAlign", "stroke", "letterSpacing", "lineSpacing", "fontHinting", "fontAntialiasing", "text"];
if (options != null) {
keys.forEach((function(_this) {
return function(key) {
@ -871,6 +871,16 @@ var slice = [].slice,
return this;
};
TextLayer.prototype.fontAntialiasing = function(fontAntialiasing){
this.options.fontAntialiasing = fontAntialiasing;
return this;
};
TextLayer.prototype.fontHinting = function(fontHinting ){
this.options.fontHinting = fontHinting ;
return this;
};
TextLayer.prototype.text = function(text) {
this.options.text = text;
return this;
@ -932,6 +942,12 @@ var slice = [].slice,
if (!(Util.isEmpty(this.options.lineSpacing) && !Util.isNumberLike(this.options.lineSpacing))) {
components.push("line_spacing_" + this.options.lineSpacing);
}
if (this.options.fontAntialiasing !== "none") {
components.push("antialias_"+this.options.fontAntialiasing);
}
if (this.options.fontHinting !== "none") {
components.push("hinting_"+this.options.fontHinting);
}
if (!Util.isEmpty(Util.compact(components))) {
if (Util.isEmpty(this.options.fontFamily)) {
throw "Must supply fontFamily. " + components;
@ -2780,6 +2796,20 @@ var slice = [].slice,
return this.param(value, "gravity", "g");
};
Transformation.prototype.fps = function(value) {
return this.param(value, "fps", "fps", (function(_this) {
return function(fps) {
if (Util.isString(fps)) {
return fps;
} else if (Util.isArray(fps)) {
return fps.join("-");
} else {
return fps;
}
};
})(this));
};
Transformation.prototype.height = function(value) {
return this.param(value, "height", "h", (function(_this) {
return function() {

View file

@ -43,7 +43,7 @@
'|(Kindle/(1\\.0|2\\.[05]|3\\.0))'
).test(window.navigator.userAgent) ||
// Feature detection for all other devices:
$('<input type="file">').prop('disabled'));
$('<input type="file"/>').prop('disabled'));
// The FileReader API is not actually used, but works as feature detection,
// as some Safari versions (5?) support XHR file uploads via the FormData API,
@ -261,6 +261,9 @@
// Callback for dragover events of the dropZone(s):
// dragover: function (e) {}, // .bind('fileuploaddragover', func);
// Callback before the start of each chunk upload request (before form data initialization):
// chunkbeforesend: function (e, data) {}, // .bind('fileuploadchunkbeforesend', func);
// Callback for the start of each chunk upload request:
// chunksend: function (e, data) {}, // .bind('fileuploadchunksend', func);
@ -434,6 +437,13 @@
}
},
_deinitProgressListener: function (options) {
var xhr = options.xhr ? options.xhr() : $.ajaxSettings.xhr();
if (xhr.upload) {
$(xhr.upload).unbind('progress');
}
},
_isInstanceOf: function (type, obj) {
// Cross-frame instanceof check
return Object.prototype.toString.call(obj) === '[object ' + type + ']';
@ -453,7 +463,7 @@
}
if (!multipart || options.blob || !this._isInstanceOf('File', file)) {
options.headers['Content-Disposition'] = 'attachment; filename="' +
encodeURI(file.name) + '"';
encodeURI(file.uploadName || file.name) + '"';
}
if (!multipart) {
options.contentType = file.type || 'application/octet-stream';
@ -489,7 +499,11 @@
});
}
if (options.blob) {
formData.append(paramName, options.blob, file.name);
formData.append(
paramName,
options.blob,
file.uploadName || file.name
);
} else {
$.each(options.files, function (index, file) {
// This check allows the tests to run with
@ -762,6 +776,8 @@
// Expose the chunk bytes position range:
o.contentRange = 'bytes ' + ub + '-' +
(ub + o.chunkSize - 1) + '/' + fs;
// Trigger chunkbeforesend to allow form data to be updated for this chunk
that._trigger('chunkbeforesend', null, o);
// Process the upload data (the blob and potential form data):
that._initXHRData(o);
// Add progress listeners for this chunk upload:
@ -808,6 +824,9 @@
o.context,
[jqXHR, textStatus, errorThrown]
);
})
.always(function () {
that._deinitProgressListener(o);
});
};
this._enhancePromise(promise);
@ -909,6 +928,7 @@
}).fail(function (jqXHR, textStatus, errorThrown) {
that._onFail(jqXHR, textStatus, errorThrown, options);
}).always(function (jqXHRorResult, textStatus, jqXHRorError) {
that._deinitProgressListener(options);
that._onAlways(
jqXHRorResult,
textStatus,
@ -1126,7 +1146,7 @@
dirReader = entry.createReader();
readEntries();
} else {
// Return an empy list for file system items
// Return an empty list for file system items
// other than files or directories:
dfd.resolve([]);
}

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -2,15 +2,14 @@ from __future__ import absolute_import
import json
import cloudinary
from cloudinary import CloudinaryResource, utils
from cloudinary.compat import PY3
from cloudinary.forms import CloudinaryJsFileField, cl_init_js_callbacks
from django import template
from django.forms import Form
from django.utils.safestring import mark_safe
import cloudinary
from cloudinary import CloudinaryResource, utils, uploader
from cloudinary.forms import CloudinaryJsFileField, cl_init_js_callbacks
from cloudinary.compat import PY3
register = template.Library()
@ -57,9 +56,9 @@ def cloudinary_direct_upload_field(field_name="image", request=None):
return value
"""Deprecated - please use cloudinary_direct_upload_field, or a proper form"""
@register.inclusion_tag('cloudinary_direct_upload.html')
def cloudinary_direct_upload(callback_url, **options):
"""Deprecated - please use cloudinary_direct_upload_field, or a proper form"""
params = utils.build_upload_params(callback=callback_url, **options)
params = utils.sign_request(params, options)
@ -75,6 +74,8 @@ def cloudinary_includes(processing=False):
CLOUDINARY_JS_CONFIG_PARAMS = ("api_key", "cloud_name", "private_cdn", "secure_distribution", "cdn_subdomain")
@register.inclusion_tag('cloudinary_js_config.html')
def cloudinary_js_config():
config = cloudinary.config()

View file

@ -1,17 +1,17 @@
# Copyright Cloudinary
import json
import re
import os
import socket
from os.path import getsize
import certifi
from six import string_types
from urllib3 import PoolManager, ProxyManager
from urllib3.exceptions import HTTPError
import cloudinary
import urllib3
import certifi
from cloudinary import utils
from cloudinary.api import Error
from cloudinary.compat import string_types
from urllib3.exceptions import HTTPError
from urllib3 import PoolManager
from cloudinary.exceptions import Error
from cloudinary.cache.responsive_breakpoints_cache import instance as responsive_breakpoints_cache_instance
try:
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
@ -29,15 +29,21 @@ if is_appengine_sandbox():
_http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
_http = PoolManager(
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
_http = utils.get_http_connector(cloudinary.config(), cloudinary.CERT_KWARGS)
upload_options = [
"filename",
"timeout",
"chunk_size",
"use_cache"
]
UPLOAD_LARGE_CHUNK_SIZE = 20000000
def upload(file, **options):
params = utils.build_upload_params(**options)
return call_api("upload", params, file=file, **options)
return call_cacheable_api("upload", params, file=file, **options)
def unsigned_upload(file, upload_preset, **options):
@ -55,35 +61,56 @@ def upload_resource(file, **options):
result = upload(file, **options)
return cloudinary.CloudinaryResource(
result["public_id"], version=str(result["version"]),
format=result.get("format"), type=result["type"], resource_type=result["resource_type"], metadata=result)
format=result.get("format"), type=result["type"],
resource_type=result["resource_type"], metadata=result)
def upload_large(file, **options):
""" Upload large files. """
upload_id = utils.random_public_id()
with open(file, 'rb') as file_io:
results = None
current_loc = 0
chunk_size = options.get("chunk_size", 20000000)
file_size = getsize(file)
chunk = file_io.read(chunk_size)
while chunk:
range = "bytes {0}-{1}/{2}".format(current_loc, current_loc + len(chunk) - 1, file_size)
current_loc += len(chunk)
if utils.is_remote_url(file):
return upload(file, **options)
if hasattr(file, 'read') and callable(file.read):
file_io = file
else:
file_io = open(file, 'rb')
upload_result = None
with file_io:
upload_id = utils.random_public_id()
current_loc = 0
chunk_size = options.get("chunk_size", UPLOAD_LARGE_CHUNK_SIZE)
file_size = utils.file_io_size(file_io)
file_name = options.get(
"filename",
file_io.name if hasattr(file_io, 'name') and isinstance(file_io.name, str) else "stream")
chunk = file_io.read(chunk_size)
while chunk:
content_range = "bytes {0}-{1}/{2}".format(current_loc, current_loc + len(chunk) - 1, file_size)
current_loc += len(chunk)
http_headers = {"Content-Range": content_range, "X-Unique-Upload-Id": upload_id}
upload_result = upload_large_part((file_name, chunk), http_headers=http_headers, **options)
options["public_id"] = upload_result.get("public_id")
results = upload_large_part((file, chunk),
http_headers={"Content-Range": range, "X-Unique-Upload-Id": upload_id},
**options)
options["public_id"] = results.get("public_id")
chunk = file_io.read(chunk_size)
return results
return upload_result
def upload_large_part(file, **options):
""" Upload large files. """
params = utils.build_upload_params(**options)
if 'resource_type' not in options: options['resource_type'] = "raw"
return call_api("upload", params, file=file, **options)
if 'resource_type' not in options:
options['resource_type'] = "raw"
return call_cacheable_api("upload", params, file=file, **options)
def destroy(public_id, **options):
@ -91,7 +118,7 @@ def destroy(public_id, **options):
"timestamp": utils.now(),
"type": options.get("type"),
"invalidate": options.get("invalidate"),
"public_id": public_id
"public_id": public_id
}
return call_api("destroy", params, **options)
@ -103,15 +130,43 @@ def rename(from_public_id, to_public_id, **options):
"overwrite": options.get("overwrite"),
"invalidate": options.get("invalidate"),
"from_public_id": from_public_id,
"to_public_id": to_public_id
"to_public_id": to_public_id,
"to_type": options.get("to_type")
}
return call_api("rename", params, **options)
def update_metadata(metadata, public_ids, **options):
"""
Populates metadata fields with the given values. Existing values will be overwritten.
Any metadata-value pairs given are merged with any existing metadata-value pairs
(an empty value for an existing metadata field clears the value)
:param metadata: A list of custom metadata fields (by external_id) and the values to assign to each
of them.
:param public_ids: An array of Public IDs of assets uploaded to Cloudinary.
:param options: Options such as
*resource_type* (the type of file. Default: image. Valid values: image, raw, or video) and
*type* (The storage type. Default: upload. Valid values: upload, private, or authenticated.)
:return: A list of public IDs that were updated
:rtype: mixed
"""
params = {
"timestamp": utils.now(),
"metadata": utils.encode_context(metadata),
"public_ids": utils.build_array(public_ids),
"type": options.get("type")
}
return call_api("metadata", params, **options)
def explicit(public_id, **options):
params = utils.build_upload_params(**options)
params["public_id"] = public_id
return call_api("explicit", params, **options)
return call_cacheable_api("explicit", params, **options)
def create_archive(**options):
@ -131,7 +186,8 @@ def generate_sprite(tag, **options):
"tag": tag,
"async": options.get("async"),
"notification_url": options.get("notification_url"),
"transformation": utils.generate_transformation_string(fetch_format=options.get("format"), **options)[0]
"transformation": utils.generate_transformation_string(
fetch_format=options.get("format"), **options)[0]
}
return call_api("sprite", params, **options)
@ -177,8 +233,10 @@ def replace_tag(tag, public_ids=None, **options):
def remove_all_tags(public_ids, **options):
"""
Remove all tags from the specified public IDs.
:param public_ids: the public IDs of the resources to update
:param options: additional options passed to the request
:return: dictionary with a list of public IDs that were updated
"""
return call_tags_api(None, "remove_all", public_ids, **options)
@ -187,9 +245,11 @@ def remove_all_tags(public_ids, **options):
def add_context(context, public_ids, **options):
"""
Add a context keys and values. If a particular key already exists, the value associated with the key is updated.
:param context: dictionary of context
:param public_ids: the public IDs of the resources to update
:param options: additional options passed to the request
:return: dictionary with a list of public IDs that were updated
"""
return call_context_api(context, "add", public_ids, **options)
@ -198,8 +258,10 @@ def add_context(context, public_ids, **options):
def remove_all_context(public_ids, **options):
"""
Remove all custom context from the specified public IDs.
:param public_ids: the public IDs of the resources to update
:param options: additional options passed to the request
:return: dictionary with a list of public IDs that were updated
"""
return call_context_api(None, "remove_all", public_ids, **options)
@ -227,17 +289,18 @@ def call_context_api(context, command, public_ids=None, **options):
return call_api("context", params, **options)
TEXT_PARAMS = ["public_id",
"font_family",
"font_size",
"font_color",
"text_align",
"font_weight",
"font_style",
"background",
"opacity",
"text_decoration"
]
TEXT_PARAMS = [
"public_id",
"font_family",
"font_size",
"font_color",
"text_align",
"font_weight",
"font_style",
"background",
"opacity",
"text_decoration"
]
def text(text, **options):
@ -247,6 +310,42 @@ def text(text, **options):
return call_api("text", params, **options)
def _save_responsive_breakpoints_to_cache(result):
"""
Saves responsive breakpoints parsed from upload result to cache
:param result: Upload result
"""
if "responsive_breakpoints" not in result:
return
if "public_id" not in result:
# We have some faulty result, nothing to cache
return
options = dict((k, result[k]) for k in ["type", "resource_type"] if k in result)
for transformation in result.get("responsive_breakpoints", []):
options["raw_transformation"] = transformation.get("transformation", "")
options["format"] = os.path.splitext(transformation["breakpoints"][0]["url"])[1][1:]
breakpoints = [bp["width"] for bp in transformation["breakpoints"]]
responsive_breakpoints_cache_instance.set(result["public_id"], breakpoints, **options)
def call_cacheable_api(action, params, http_headers=None, return_error=False, unsigned=False, file=None, timeout=None,
**options):
"""
Calls Upload API and saves results to cache (if enabled)
"""
result = call_api(action, params, http_headers, return_error, unsigned, file, timeout, **options)
if "use_cache" in options or cloudinary.config().use_cache:
_save_responsive_breakpoints_to_cache(result)
return result
def call_api(action, params, http_headers=None, return_error=False, unsigned=False, file=None, timeout=None, **options):
if http_headers is None:
http_headers = {}
@ -267,26 +366,27 @@ def call_api(action, params, http_headers=None, return_error=False, unsigned=Fal
api_url = utils.cloudinary_api_url(action, **options)
if file:
filename = options.get("filename") # Custom filename provided by user (relevant only for streams and files)
if isinstance(file, string_types):
if re.match(r'ftp:|https?:|s3:|data:[^;]*;base64,([a-zA-Z0-9\/+\n=]+)$', file):
if utils.is_remote_url(file):
# URL
name = None
data = file
else:
# file path
name = file
name = filename or file
with open(file, "rb") as opened:
data = opened.read()
elif hasattr(file, 'read') and callable(file.read):
# stream
data = file.read()
name = file.name if hasattr(file, 'name') and isinstance(file.name, str) else "stream"
name = filename or (file.name if hasattr(file, 'name') and isinstance(file.name, str) else "stream")
elif isinstance(file, tuple):
name = None
data = file
name, data = file
else:
# Not a string, not a stream
name = "file"
name = filename or "file"
data = file
param_list["file"] = (name, data) if name else data
@ -310,16 +410,17 @@ def call_api(action, params, http_headers=None, return_error=False, unsigned=Fal
result = json.loads(response.data.decode('utf-8'))
except Exception as e:
# Error is parsing json
raise Error("Error parsing server response (%d) - %s. Got - %s", response.status, response, e)
raise Error("Error parsing server response (%d) - %s. Got - %s" % (response.status, response.data, e))
if "error" in result:
if response.status not in [200, 400, 401, 403, 404, 500]:
code = response.status
if return_error:
result["error"]["http_code"] = code
result["error"]["http_code"] = code
else:
raise Error(result["error"]["message"])
return result
finally:
if file_io: file_io.close()
if file_io:
file_io.close()

View file

@ -3,15 +3,19 @@ import base64
import copy
import hashlib
import json
import os
import random
import re
import string
import struct
import time
import urllib
import zlib
from collections import OrderedDict
from datetime import datetime, date
from fractions import Fraction
from numbers import Number
from urllib3 import ProxyManager, PoolManager
import six.moves.urllib.parse
from six import iteritems
@ -33,12 +37,101 @@ DEFAULT_RESPONSIVE_WIDTH_TRANSFORMATION = {"width": "auto", "crop": "limit"}
RANGE_VALUE_RE = r'^(?P<value>(\d+\.)?\d+)(?P<modifier>[%pP])?$'
RANGE_RE = r'^(\d+\.)?\d+[%pP]?\.\.(\d+\.)?\d+[%pP]?$'
FLOAT_RE = r'^(\d+)\.(\d+)?$'
REMOTE_URL_RE = r'ftp:|https?:|s3:|gs:|data:([\w-]+\/[\w-]+)?(;[\w-]+=[\w-]+)*;base64,([a-zA-Z0-9\/+\n=]+)$'
__LAYER_KEYWORD_PARAMS = [("font_weight", "normal"),
("font_style", "normal"),
("text_decoration", "none"),
("text_align", None),
("stroke", "none")]
# a list of keys used by the cloudinary_url function
__URL_KEYS = [
'api_secret',
'auth_token',
'cdn_subdomain',
'cloud_name',
'cname',
'format',
'private_cdn',
'resource_type',
'secure',
'secure_cdn_subdomain',
'secure_distribution',
'shorten',
'sign_url',
'ssl_detected',
'type',
'url_suffix',
'use_root_path',
'version'
]
__SIMPLE_UPLOAD_PARAMS = [
"public_id",
"callback",
"format",
"type",
"backup",
"faces",
"image_metadata",
"exif",
"colors",
"use_filename",
"unique_filename",
"discard_original_filename",
"invalidate",
"notification_url",
"eager_notification_url",
"eager_async",
"proxy",
"folder",
"overwrite",
"moderation",
"raw_convert",
"quality_override",
"quality_analysis",
"ocr",
"categorization",
"detection",
"similarity_search",
"background_removal",
"upload_preset",
"phash",
"return_delete_token",
"auto_tagging",
"async",
"cinemagraph_analysis",
]
__SERIALIZED_UPLOAD_PARAMS = [
"timestamp",
"transformation",
"headers",
"eager",
"tags",
"allowed_formats",
"face_coordinates",
"custom_coordinates",
"context",
"auto_tagging",
"responsive_breakpoints",
"access_control",
"metadata",
]
upload_params = __SIMPLE_UPLOAD_PARAMS + __SERIALIZED_UPLOAD_PARAMS
def compute_hex_hash(s):
"""
Compute hash and convert the result to HEX string
:param s: string to process
:return: HEX string
"""
return hashlib.sha1(to_bytes(s)).hexdigest()
def build_array(arg):
if isinstance(arg, list):
@ -133,6 +226,22 @@ def json_encode(value):
return json.dumps(value, default=__json_serializer, separators=(',', ':'))
def patch_fetch_format(options):
"""
When upload type is fetch, remove the format options.
In addition, set the fetch_format options to the format value unless it was already set.
Mutates the options parameter!
:param options: URL and transformation options
"""
if options.get("type", "upload") != "fetch":
return
resource_format = options.pop("format", None)
if "fetch_format" not in options:
options["fetch_format"] = resource_format
def generate_transformation_string(**options):
responsive_width = options.pop("responsive_width", cloudinary.config().responsive_width)
size = options.pop("size", None)
@ -165,6 +274,7 @@ def generate_transformation_string(**options):
return generate_transformation_string(**bs)[0]
else:
return generate_transformation_string(transformation=bs)[0]
base_transformations = list(map(recurse, base_transformations))
named_transformation = None
else:
@ -186,11 +296,11 @@ def generate_transformation_string(**options):
flags = ".".join(build_array(options.pop("flags", None)))
dpr = options.pop("dpr", cloudinary.config().dpr)
duration = norm_range_value(options.pop("duration", None))
start_offset = norm_range_value(options.pop("start_offset", None))
start_offset = norm_auto_range_value(options.pop("start_offset", None))
end_offset = norm_range_value(options.pop("end_offset", None))
offset = split_range(options.pop("offset", None))
if offset:
start_offset = norm_range_value(offset[0])
start_offset = norm_auto_range_value(offset[0])
end_offset = norm_range_value(offset[1])
video_codec = process_video_codec_param(options.pop("video_codec", None))
@ -202,6 +312,9 @@ def generate_transformation_string(**options):
overlay = process_layer(options.pop("overlay", None), "overlay")
underlay = process_layer(options.pop("underlay", None), "underlay")
if_value = process_conditional(options.pop("if", None))
custom_function = process_custom_function(options.pop("custom_function", None))
custom_pre_function = process_custom_pre_function(options.pop("custom_pre_function", None))
fps = process_fps(options.pop("fps", None))
params = {
"a": normalize_expression(angle),
@ -215,19 +328,22 @@ def generate_transformation_string(**options):
"e": normalize_expression(effect),
"eo": normalize_expression(end_offset),
"fl": flags,
"fn": custom_function or custom_pre_function,
"fps": fps,
"h": normalize_expression(height),
"ki": process_ki(options.pop("keyframe_interval", None)),
"l": overlay,
"o": normalize_expression(options.pop('opacity',None)),
"q": normalize_expression(options.pop('quality',None)),
"r": normalize_expression(options.pop('radius',None)),
"o": normalize_expression(options.pop('opacity', None)),
"q": normalize_expression(options.pop('quality', None)),
"r": process_radius(options.pop('radius', None)),
"so": normalize_expression(start_offset),
"t": named_transformation,
"u": underlay,
"w": normalize_expression(width),
"x": normalize_expression(options.pop('x',None)),
"y": normalize_expression(options.pop('y',None)),
"x": normalize_expression(options.pop('x', None)),
"y": normalize_expression(options.pop('y', None)),
"vc": video_codec,
"z": normalize_expression(options.pop('zoom',None))
"z": normalize_expression(options.pop('zoom', None))
}
simple_params = {
"ac": "audio_codec",
@ -239,7 +355,6 @@ def generate_transformation_string(**options):
"dn": "density",
"f": "fetch_format",
"g": "gravity",
"ki": "keyframe_interval",
"p": "prefix",
"pg": "page",
"sp": "streaming_profile",
@ -249,9 +364,9 @@ def generate_transformation_string(**options):
for param, option in simple_params.items():
params[param] = options.pop(option, None)
variables = options.pop('variables',{})
variables = options.pop('variables', {})
var_params = []
for key,value in options.items():
for key, value in options.items():
if re.match(r'^\$', key):
var_params.append(u"{0}_{1}".format(key, normalize_expression(str(value))))
@ -261,7 +376,6 @@ def generate_transformation_string(**options):
for var in variables:
var_params.append(u"{0}_{1}".format(var[0], normalize_expression(str(var[1]))))
variables = ','.join(var_params)
sorted_params = sorted([param + "_" + str(value) for param, value in params.items() if (value or value == 0)])
@ -270,10 +384,14 @@ def generate_transformation_string(**options):
if if_value is not None:
sorted_params.insert(0, "if_" + str(if_value))
if "raw_transformation" in options and (options["raw_transformation"] or options["raw_transformation"] == 0):
sorted_params.append(options.pop("raw_transformation"))
transformation = ",".join(sorted_params)
if "raw_transformation" in options:
transformation = transformation + "," + options.pop("raw_transformation")
transformations = base_transformations + [transformation]
if responsive_width:
responsive_width_transformation = cloudinary.config().responsive_width_transformation \
or DEFAULT_RESPONSIVE_WIDTH_TRANSFORMATION
@ -287,6 +405,31 @@ def generate_transformation_string(**options):
return url, options
def chain_transformations(options, transformations):
"""
Helper function, allows chaining transformations to the end of transformations list
The result of this function is an updated options parameter
:param options: Original options
:param transformations: Transformations to chain at the end
:return: Resulting options
"""
transformations = copy.deepcopy(transformations)
transformations = build_array(transformations)
# preserve url options
url_options = dict((o, options[o]) for o in __URL_KEYS if o in options)
transformations.insert(0, options)
url_options["transformation"] = transformations
return url_options
def is_fraction(width):
width = str(width)
return re.match(FLOAT_RE, width) and float(width) < 1
@ -302,18 +445,26 @@ def split_range(range):
def norm_range_value(value):
if value is None: return None
if value is None:
return None
match = re.match(RANGE_VALUE_RE, str(value))
if match is None: return None
if match is None:
return None
modifier = ''
if match.group('modifier') is not None:
modifier = 'p'
modifier = 'p'
return match.group('value') + modifier
def norm_auto_range_value(value):
if value == "auto":
return value
return norm_range_value(value)
def process_video_codec_param(param):
out_param = param
if isinstance(out_param, dict):
@ -325,15 +476,29 @@ def process_video_codec_param(param):
return out_param
def process_radius(param):
if param is None:
return
if isinstance(param, (list, tuple)):
if not 1 <= len(param) <= 4:
raise ValueError("Invalid radius param")
return ':'.join(normalize_expression(t) for t in param)
return str(param)
def cleanup_params(params):
return dict([(k, __safe_value(v)) for (k, v) in params.items() if v is not None and not v == ""])
def sign_request(params, options):
api_key = options.get("api_key", cloudinary.config().api_key)
if not api_key: raise ValueError("Must supply api_key")
if not api_key:
raise ValueError("Must supply api_key")
api_secret = options.get("api_secret", cloudinary.config().api_secret)
if not api_secret: raise ValueError("Must supply api_secret")
if not api_secret:
raise ValueError("Must supply api_secret")
params = cleanup_params(params)
params["signature"] = api_sign_request(params, api_secret)
@ -345,7 +510,7 @@ def sign_request(params, options):
def api_sign_request(params_to_sign, api_secret):
params = [(k + "=" + (",".join(v) if isinstance(v, list) else str(v))) for k, v in params_to_sign.items() if v]
to_sign = "&".join(sorted(params))
return hashlib.sha1(to_bytes(to_sign + api_secret)).hexdigest()
return compute_hex_hash(to_sign + api_secret)
def breakpoint_settings_mapper(breakpoint_settings):
@ -370,11 +535,13 @@ def finalize_source(source, format, url_suffix):
source_to_sign = source
else:
source = unquote(source)
if not PY3: source = source.encode('utf8')
if not PY3:
source = source.encode('utf8')
source = smart_escape(source)
source_to_sign = source
if url_suffix is not None:
if re.search(r'[\./]', url_suffix): raise ValueError("url_suffix should not include . or /")
if re.search(r'[\./]', url_suffix):
raise ValueError("url_suffix should not include . or /")
source = source + "/" + url_suffix
if format is not None:
source = source + "." + format
@ -396,7 +563,8 @@ def finalize_resource_type(resource_type, type, url_suffix, use_root_path, short
raise ValueError("URL Suffix only supported for image/upload and raw/upload")
if use_root_path:
if (resource_type == "image" and upload_type == "upload") or (resource_type == "images" and upload_type is None):
if (resource_type == "image" and upload_type == "upload") or (
resource_type == "images" and upload_type is None):
resource_type = None
upload_type = None
else:
@ -409,28 +577,33 @@ def finalize_resource_type(resource_type, type, url_suffix, use_root_path, short
return resource_type, upload_type
def unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain, cname, secure,
secure_distribution):
def unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain,
secure_cdn_subdomain, cname, secure, secure_distribution):
"""cdn_subdomain and secure_cdn_subdomain
1) Customers in shared distribution (e.g. res.cloudinary.com)
if cdn_domain is true uses res-[1-5].cloudinary.com for both http and https. Setting secure_cdn_subdomain to false disables this for https.
if cdn_domain is true uses res-[1-5].cloudinary.com for both http and https.
Setting secure_cdn_subdomain to false disables this for https.
2) Customers with private cdn
if cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for http
if secure_cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for https (please contact support if you require this)
if secure_cdn_domain is true uses cloudname-res-[1-5].cloudinary.com for https
(please contact support if you require this)
3) Customers with cname
if cdn_domain is true uses a[1-5].cname for http. For https, uses the same naming scheme as 1 for shared distribution and as 2 for private distribution."""
if cdn_domain is true uses a[1-5].cname for http. For https, uses the same naming scheme
as 1 for shared distribution and as 2 for private distribution."""
shared_domain = not private_cdn
shard = __crc(source)
if secure:
if secure_distribution is None or secure_distribution == cloudinary.OLD_AKAMAI_SHARED_CDN:
secure_distribution = cloud_name + "-res.cloudinary.com" if private_cdn else cloudinary.SHARED_CDN
secure_distribution = cloud_name + "-res.cloudinary.com" \
if private_cdn else cloudinary.SHARED_CDN
shared_domain = shared_domain or secure_distribution == cloudinary.SHARED_CDN
if secure_cdn_subdomain is None and shared_domain:
secure_cdn_subdomain = cdn_subdomain
if secure_cdn_subdomain:
secure_distribution = re.sub('res.cloudinary.com', "res-" + shard + ".cloudinary.com", secure_distribution)
secure_distribution = re.sub('res.cloudinary.com', "res-" + shard + ".cloudinary.com",
secure_distribution)
prefix = "https://" + secure_distribution
elif cname:
@ -438,10 +611,12 @@ def unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain,
prefix = "http://" + subdomain + cname
else:
subdomain = cloud_name + "-res" if private_cdn else "res"
if cdn_subdomain: subdomain = subdomain + "-" + shard
if cdn_subdomain:
subdomain = subdomain + "-" + shard
prefix = "http://" + subdomain + ".cloudinary.com"
if shared_domain: prefix += "/" + cloud_name
if shared_domain:
prefix += "/" + cloud_name
return prefix
@ -460,16 +635,23 @@ def merge(*dict_args):
def cloudinary_url(source, **options):
original_source = source
patch_fetch_format(options)
type = options.pop("type", "upload")
if type == 'fetch':
options["fetch_format"] = options.get("fetch_format", options.pop("format", None))
transformation, options = generate_transformation_string(**options)
resource_type = options.pop("resource_type", "image")
force_version = options.pop("force_version", cloudinary.config().force_version)
if force_version is None:
force_version = True
version = options.pop("version", None)
format = options.pop("format", None)
cdn_subdomain = options.pop("cdn_subdomain", cloudinary.config().cdn_subdomain)
secure_cdn_subdomain = options.pop("secure_cdn_subdomain", cloudinary.config().secure_cdn_subdomain)
secure_cdn_subdomain = options.pop("secure_cdn_subdomain",
cloudinary.config().secure_cdn_subdomain)
cname = options.pop("cname", cloudinary.config().cname)
shorten = options.pop("shorten", cloudinary.config().shorten)
@ -478,7 +660,8 @@ def cloudinary_url(source, **options):
raise ValueError("Must supply cloud_name in tag or in configuration")
secure = options.pop("secure", cloudinary.config().secure)
private_cdn = options.pop("private_cdn", cloudinary.config().private_cdn)
secure_distribution = options.pop("secure_distribution", cloudinary.config().secure_distribution)
secure_distribution = options.pop("secure_distribution",
cloudinary.config().secure_distribution)
sign_url = options.pop("sign_url", cloudinary.config().sign_url)
api_secret = options.pop("api_secret", cloudinary.config().api_secret)
url_suffix = options.pop("url_suffix", None)
@ -490,15 +673,19 @@ def cloudinary_url(source, **options):
if (not source) or type == "upload" and re.match(r'^https?:', source):
return original_source, options
resource_type, type = finalize_resource_type(resource_type, type, url_suffix, use_root_path, shorten)
resource_type, type = finalize_resource_type(
resource_type, type, url_suffix, use_root_path, shorten)
source, source_to_sign = finalize_source(source, format, url_suffix)
if source_to_sign.find("/") >= 0 \
if not version and force_version \
and source_to_sign.find("/") >= 0 \
and not re.match(r'^https?:/', source_to_sign) \
and not re.match(r'^v[0-9]+', source_to_sign) \
and not version:
and not re.match(r'^v[0-9]+', source_to_sign):
version = "1"
if version: version = "v" + str(version)
if version:
version = "v" + str(version)
else:
version = None
transformation = re.sub(r'([^:])/+', r'\1/', transformation)
@ -506,35 +693,84 @@ def cloudinary_url(source, **options):
if sign_url and not auth_token:
to_sign = "/".join(__compact([transformation, source_to_sign]))
signature = "s--" + to_string(
base64.urlsafe_b64encode(hashlib.sha1(to_bytes(to_sign + api_secret)).digest())[0:8]) + "--"
base64.urlsafe_b64encode(
hashlib.sha1(to_bytes(to_sign + api_secret)).digest())[0:8]) + "--"
prefix = unsigned_download_url_prefix(source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain, cname,
secure, secure_distribution)
source = "/".join(__compact([prefix, resource_type, type, signature, transformation, version, source]))
prefix = unsigned_download_url_prefix(
source, cloud_name, private_cdn, cdn_subdomain, secure_cdn_subdomain,
cname, secure, secure_distribution)
source = "/".join(__compact(
[prefix, resource_type, type, signature, transformation, version, source]))
if sign_url and auth_token:
path = urlparse(source).path
token = cloudinary.auth_token.generate( **merge(auth_token, {"url": path}))
token = cloudinary.auth_token.generate(**merge(auth_token, {"url": path}))
source = "%s?%s" % (source, token)
return source, options
def cloudinary_api_url(action='upload', **options):
cloudinary_prefix = options.get("upload_prefix", cloudinary.config().upload_prefix) or "https://api.cloudinary.com"
cloudinary_prefix = options.get("upload_prefix", cloudinary.config().upload_prefix)\
or "https://api.cloudinary.com"
cloud_name = options.get("cloud_name", cloudinary.config().cloud_name)
if not cloud_name: raise ValueError("Must supply cloud_name")
if not cloud_name:
raise ValueError("Must supply cloud_name")
resource_type = options.get("resource_type", "image")
return "/".join([cloudinary_prefix, "v1_1", cloud_name, resource_type, action])
return encode_unicode_url("/".join([cloudinary_prefix, "v1_1", cloud_name, resource_type, action]))
# Based on ruby's CGI::unescape. In addition does not escape / :
def smart_escape(source,unsafe = r"([^a-zA-Z0-9_.\-\/:]+)"):
def cloudinary_scaled_url(source, width, transformation, options):
"""
Generates a cloudinary url scaled to specified width.
:param source: The resource
:param width: Width in pixels of the srcset item
:param transformation: Custom transformation that overrides transformations provided in options
:param options: A dict with additional options
:return: Resulting URL of the item
"""
# preserve options from being destructed
options = copy.deepcopy(options)
if transformation:
if isinstance(transformation, string_types):
transformation = {"raw_transformation": transformation}
# Remove all transformation related options
options = dict((o, options[o]) for o in __URL_KEYS if o in options)
options.update(transformation)
scale_transformation = {"crop": "scale", "width": width}
url_options = options
patch_fetch_format(url_options)
url_options = chain_transformations(url_options, scale_transformation)
return cloudinary_url(source, **url_options)[0]
def smart_escape(source, unsafe=r"([^a-zA-Z0-9_.\-\/:]+)"):
"""
Based on ruby's CGI::unescape. In addition does not escape / :
:param source: Source string to escape
:param unsafe: Unsafe characters
:return: Escaped string
"""
def pack(m):
return to_bytes('%' + "%".join(["%02X" % x for x in struct.unpack('B' * len(m.group(1)), m.group(1))]).upper())
return to_bytes('%' + "%".join(
["%02X" % x for x in struct.unpack('B' * len(m.group(1)), m.group(1))]
).upper())
return to_string(re.sub(to_bytes(unsafe), pack, to_bytes(source)))
def random_public_id():
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(16))
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits)
for _ in range(16))
def signed_preloaded_image(result):
@ -584,7 +820,8 @@ def download_archive_url(**options):
params = options.copy()
params.update(mode="download")
cloudinary_params = sign_request(archive_params(**params), options)
return cloudinary_api_url("generate_archive", **options) + "?" + urlencode(bracketize_seq(cloudinary_params), True)
return cloudinary_api_url("generate_archive", **options) + "?" + \
urlencode(bracketize_seq(cloudinary_params), True)
def download_zip_url(**options):
@ -592,10 +829,12 @@ def download_zip_url(**options):
new_options.update(target_format="zip")
return download_archive_url(**new_options)
def generate_auth_token(**options):
token_options = merge(cloudinary.config().auth_token, options)
return auth_token.generate(**token_options)
def archive_params(**options):
if options.get("timestamp") is None:
timestamp = now()
@ -613,6 +852,8 @@ def archive_params(**options):
"phash": options.get("phash"),
"prefixes": options.get("prefixes") and build_array(options.get("prefixes")),
"public_ids": options.get("public_ids") and build_array(options.get("public_ids")),
"fully_qualified_public_ids": options.get("fully_qualified_public_ids") and build_array(
options.get("fully_qualified_public_ids")),
"skip_transformation_name": options.get("skip_transformation_name"),
"tags": options.get("tags") and build_array(options.get("tags")),
"target_format": options.get("target_format"),
@ -629,15 +870,32 @@ def archive_params(**options):
def build_eager(transformations):
if transformations is None:
return None
eager = []
for tr in build_array(transformations):
if isinstance(tr, string_types):
single_eager = tr
else:
ext = tr.get("format")
single_eager = "/".join([x for x in [generate_transformation_string(**tr)[0], ext] if x])
eager.append(single_eager)
return "|".join(eager)
return "|".join([build_single_eager(et) for et in build_array(transformations)])
def build_single_eager(options):
"""
Builds a single eager transformation which consists of transformation and (optionally) format joined by "/"
:param options: Options containing transformation parameters and (optionally) a "format" key
format can be a string value (jpg, gif, etc) or can be set to "" (empty string).
The latter leads to transformation ending with "/", which means "No extension, use original format"
If format is not provided or set to None, only transformation is used (without the trailing "/")
:return: Resulting eager transformation string
"""
if isinstance(options, string_types):
return options
trans_str = generate_transformation_string(**options)[0]
if not trans_str:
return ""
file_format = options.get("format")
return trans_str + ("/" + file_format if file_format is not None else "")
def build_custom_headers(headers):
@ -653,49 +911,30 @@ def build_custom_headers(headers):
def build_upload_params(**options):
params = {"timestamp": now(),
"transformation": generate_transformation_string(**options)[0],
"public_id": options.get("public_id"),
"callback": options.get("callback"),
"format": options.get("format"),
"type": options.get("type"),
"backup": options.get("backup"),
"faces": options.get("faces"),
"image_metadata": options.get("image_metadata"),
"exif": options.get("exif"),
"colors": options.get("colors"),
"headers": build_custom_headers(options.get("headers")),
"eager": build_eager(options.get("eager")),
"use_filename": options.get("use_filename"),
"unique_filename": options.get("unique_filename"),
"discard_original_filename": options.get("discard_original_filename"),
"invalidate": options.get("invalidate"),
"notification_url": options.get("notification_url"),
"eager_notification_url": options.get("eager_notification_url"),
"eager_async": options.get("eager_async"),
"proxy": options.get("proxy"),
"folder": options.get("folder"),
"overwrite": options.get("overwrite"),
"tags": options.get("tags") and ",".join(build_array(options["tags"])),
"allowed_formats": options.get("allowed_formats") and ",".join(build_array(options["allowed_formats"])),
"face_coordinates": encode_double_array(options.get("face_coordinates")),
"custom_coordinates": encode_double_array(options.get("custom_coordinates")),
"context": encode_context(options.get("context")),
"moderation": options.get("moderation"),
"raw_convert": options.get("raw_convert"),
"quality_override": options.get("quality_override"),
"ocr": options.get("ocr"),
"categorization": options.get("categorization"),
"detection": options.get("detection"),
"similarity_search": options.get("similarity_search"),
"background_removal": options.get("background_removal"),
"upload_preset": options.get("upload_preset"),
"phash": options.get("phash"),
"return_delete_token": options.get("return_delete_token"),
"auto_tagging": options.get("auto_tagging") and str(options.get("auto_tagging")),
"responsive_breakpoints": generate_responsive_breakpoints_string(options.get("responsive_breakpoints")),
"async": options.get("async"),
"access_control": options.get("access_control") and json_encode(build_list_of_dicts(options.get("access_control")))}
params = {param_name: options.get(param_name) for param_name in __SIMPLE_UPLOAD_PARAMS}
serialized_params = {
"timestamp": now(),
"metadata": encode_context(options.get("metadata")),
"transformation": generate_transformation_string(**options)[0],
"headers": build_custom_headers(options.get("headers")),
"eager": build_eager(options.get("eager")),
"tags": options.get("tags") and ",".join(build_array(options["tags"])),
"allowed_formats": options.get("allowed_formats") and ",".join(build_array(options["allowed_formats"])),
"face_coordinates": encode_double_array(options.get("face_coordinates")),
"custom_coordinates": encode_double_array(options.get("custom_coordinates")),
"context": encode_context(options.get("context")),
"auto_tagging": options.get("auto_tagging") and str(options.get("auto_tagging")),
"responsive_breakpoints": generate_responsive_breakpoints_string(options.get("responsive_breakpoints")),
"access_control": options.get("access_control") and json_encode(
build_list_of_dicts(options.get("access_control")))
}
# make sure that we are in-sync with __SERIALIZED_UPLOAD_PARAMS which are in use by other methods
serialized_params = {param_name: serialized_params[param_name] for param_name in __SERIALIZED_UPLOAD_PARAMS}
params.update(serialized_params)
return params
@ -716,6 +955,14 @@ def __process_text_options(layer, layer_parameter):
if line_spacing is not None:
keywords.append("line_spacing_" + str(line_spacing))
font_antialiasing = layer.get("font_antialiasing")
if font_antialiasing is not None:
keywords.append("antialias_" + str(font_antialiasing))
font_hinting = layer.get("font_hinting")
if font_hinting is not None:
keywords.append("hinting_" + str(font_hinting))
if font_size is None and font_family is None and len(keywords) == 0:
return None
@ -778,12 +1025,12 @@ def process_layer(layer, layer_parameter):
if text is not None:
var_pattern = VAR_NAME_RE
match = re.findall(var_pattern,text)
match = re.findall(var_pattern, text)
parts= filter(lambda p: p is not None, re.split(var_pattern,text))
parts = filter(lambda p: p is not None, re.split(var_pattern, text))
encoded_text = []
for part in parts:
if re.match(var_pattern,part):
if re.match(var_pattern, part):
encoded_text.append(part)
else:
encoded_text.append(smart_escape(smart_escape(part, r"([,/])")))
@ -801,6 +1048,7 @@ def process_layer(layer, layer_parameter):
return ':'.join(components)
IF_OPERATORS = {
"=": 'eq',
"!=": 'ne',
@ -813,7 +1061,8 @@ IF_OPERATORS = {
"*": 'mul',
"/": 'div',
"+": 'add',
"-": 'sub'
"-": 'sub',
"^": 'pow'
}
PREDEFINED_VARS = {
@ -828,17 +1077,69 @@ PREDEFINED_VARS = {
"page_x": "px",
"page_y": "py",
"tags": "tags",
"width": "w"
"width": "w",
"duration": "du",
"initial_duration": "idu",
}
replaceRE = "((\\|\\||>=|<=|&&|!=|>|=|<|/|-|\\+|\\*)(?=[ _])|" + '|'.join(PREDEFINED_VARS.keys())+ ")"
replaceRE = "((\\|\\||>=|<=|&&|!=|>|=|<|/|-|\\+|\\*|\^)(?=[ _])|(?<!\$)(" + '|'.join(PREDEFINED_VARS.keys()) + "))"
def translate_if(match):
name = match.group(0)
return IF_OPERATORS.get(name,
PREDEFINED_VARS.get(name,
name))
name))
def process_custom_function(custom_function):
if not isinstance(custom_function, dict):
return custom_function
function_type = custom_function.get("function_type")
source = custom_function.get("source")
if function_type == "remote":
source = base64url_encode(source)
return ":".join([function_type, source])
def process_custom_pre_function(custom_function):
value = process_custom_function(custom_function)
return "pre:{0}".format(value) if value else None
def process_fps(fps):
"""
Serializes fps transformation parameter
:param fps: A single number, a list of mixed type, a string, including open-ended and closed range values
Examples: '24-29.97', 24, 24.973, '-24', [24, 29.97]
:return: string
"""
if not isinstance(fps, (list, tuple)):
return fps
return "-".join(normalize_expression(f) for f in fps)
def process_ki(ki):
"""
Serializes keyframe_interval parameter
:param ki: Keyframe interval. Should be either a string or a positive real number.
:return: string
"""
if ki is None:
return None
if isinstance(ki, string_types):
return ki
if not isinstance(ki, Number):
raise ValueError("Keyframe interval should be a number or a string")
if ki <= 0:
raise ValueError("Keyframe interval should be greater than zero")
return str(float(ki))
def process_conditional(conditional):
if conditional is None:
@ -846,8 +1147,9 @@ def process_conditional(conditional):
result = normalize_expression(conditional)
return result
def normalize_expression(expression):
if re.match(r'^!.+!$',str(expression)): # quoted string
if re.match(r'^!.+!$', str(expression)): # quoted string
return expression
elif expression:
result = str(expression)
@ -857,6 +1159,7 @@ def normalize_expression(expression):
else:
return expression
def __join_pair(key, value):
if value is None or value == "":
return None
@ -898,15 +1201,134 @@ def base64_encode_url(url):
try:
url = unquote(url)
except:
except Exception:
pass
url = smart_escape(url)
b64 = base64.b64encode(url.encode('utf-8'))
return b64.decode('ascii')
def base64url_encode(data):
"""
Url safe version of urlsafe_b64encode with stripped `=` sign at the end.
:param data: input data
:return: Base64 URL safe encoded string
"""
return to_string(base64.urlsafe_b64encode(to_bytes(data)))
def encode_unicode_url(url_str):
"""
Quote and encode possible unicode url string (applicable for python2)
:param url_str: Url string to encode
:return: Encoded string
"""
if six.PY2:
url_str = urllib.quote(url_str.encode('utf-8'), ":/?#[]@!$&'()*+,;=")
return url_str
def __json_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Object of type %s is not JSON serializable" % type(obj))
def is_remote_url(file):
"""Basic URL scheme check to define if it's remote URL"""
return isinstance(file, string_types) and re.match(REMOTE_URL_RE, file)
def file_io_size(file_io):
"""
Helper function for getting file-like object size(suitable for both files and streams)
:param file_io: io.IOBase
:return: size
"""
initial_position = file_io.tell()
file_io.seek(0, os.SEEK_END)
size = file_io.tell()
file_io.seek(initial_position, os.SEEK_SET)
return size
def check_property_enabled(f):
"""
Used as a class method decorator to check whether class is enabled(self.enabled is True)
:param f: function to call
:return: None if not enabled, otherwise calls function f
"""
def wrapper(*args, **kwargs):
if not args[0].enabled:
return None
return f(*args, **kwargs)
return wrapper
def verify_api_response_signature(public_id, version, signature):
"""
Verifies the authenticity of an API response signature
:param public_id: The public id of the asset as returned in the API response
:param version: The version of the asset as returned in the API response
:param signature: Actual signature. Can be retrieved from the X-Cld-Signature header
:return: Boolean result of the validation
"""
if not cloudinary.config().api_secret:
raise Exception('Api secret key is empty')
parameters_to_sign = {'public_id': public_id,
'version': version}
return signature == api_sign_request(parameters_to_sign, cloudinary.config().api_secret)
def verify_notification_signature(body, timestamp, signature, valid_for=7200):
"""
Verifies the authenticity of a notification signature
:param body: Json of the request's body
:param timestamp: Unix timestamp. Can be retrieved from the X-Cld-Timestamp header
:param signature: Actual signature. Can be retrieved from the X-Cld-Signature header
:param valid_for: The desired time in seconds for considering the request valid
:return: Boolean result of the validation
"""
if not cloudinary.config().api_secret:
raise Exception('Api secret key is empty')
if timestamp < time.time() - valid_for:
return False
if not isinstance(body, str):
raise ValueError('Body should be type of string')
return signature == compute_hex_hash('{}{}{}'.format(body, timestamp, cloudinary.config().api_secret))
def get_http_connector(conf, options):
"""
Used to create http connector, depends on api_proxy configuration parameter
:param conf: configuration object
:param options: additional options
:return: ProxyManager if api_proxy is set, otherwise PoolManager object
"""
if conf.api_proxy:
return ProxyManager(conf.api_proxy, **options)
else:
return PoolManager(**options)