repo
stringlengths 5
75
| commit
stringlengths 40
40
| message
stringlengths 6
18.2k
| diff
stringlengths 60
262k
|
---|---|---|---|
adieu/django-mediagenerator | e9502dcd52f97ebc157dead13f0849cf59e1b503 | Added handlebars.js filter | diff --git a/mediagenerator/filters/handlebars.py b/mediagenerator/filters/handlebars.py
new file mode 100644
index 0000000..bfb686f
--- /dev/null
+++ b/mediagenerator/filters/handlebars.py
@@ -0,0 +1,97 @@
+from django.conf import settings
+from django.utils.encoding import smart_str
+
+from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import get_media_dirs, find_file
+
+import os, sys
+from hashlib import sha1
+from subprocess import Popen, PIPE
+
+class HandlebarsFilter(Filter):
+ def __init__(self, **kwargs):
+ self.config(kwargs, name=kwargs["name"], path=(), template_name=kwargs.get("template_name"))
+ if isinstance(self.path, basestring):
+ self.path = (self.path,)
+
+ # we need to be able to mutate self.path
+ self.path = list(self.path)
+
+ super(HandlebarsFilter, self).__init__(**kwargs)
+
+ media_dirs = [directory for directory in get_media_dirs()
+ if os.path.exists(directory)]
+ self.path += tuple(media_dirs)
+
+ # search from template directories first
+ from django.template.loaders.app_directories import app_template_dirs
+ self.path = list(app_template_dirs) + self.path
+
+ self._compiled = None
+ self._compiled_hash = None
+ self._dependencies = {}
+
+
+ @classmethod
+ def from_default(cls, name):
+ return {'name': name}
+
+ def get_output(self, variation):
+ self._regenerate(debug=False)
+ yield self._compiled
+
+ def get_dev_output(self, name, variation):
+ self._regenerate(debug=True)
+ return self._compiled
+
+ def get_dev_output_names(self, variation):
+ self._regenerate(debug=True)
+ yield self.name + '.js', self._compiled_hash
+
+
+ def _regenerate(self, debug=False):
+ file_path = self._find_file(self.name)
+ self._compiled = self._compile(file_path, debug=debug)
+ self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
+
+ def _compile(self, path, debug=False):
+ # check if already compiled
+ if hasattr(self, "mtime") and self.mtime == os.path.getmtime(path):
+ return self._compiled
+
+ # compile with handlebars
+ try:
+ shell = sys.platform == 'win32'
+
+ relative_path = self._get_relative_path(path)
+
+ cmd = Popen(['handlebars', relative_path],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ shell=shell, universal_newlines=True,
+ cwd=settings.PROJECT_ROOT)
+ output, error = cmd.communicate()
+
+ self.mtime = os.path.getmtime(path)
+
+ assert cmd.wait() == 0, ('handlebars returned errors:\n%s\n%s' % (error, output))
+ return output.decode('utf-8')
+ except Exception, e:
+ raise ValueError("Failed to run handlebars.js compiler for this "
+ "file. Please confirm that the \"handlebars\" application is "
+ "on your path and that you can run it from your own command "
+ "line.\n"
+ "Error was: %s" % e)
+
+
+ def _find_file(self, name):
+ return find_file(name, media_dirs=self.path)
+
+
+ def _get_relative_path(self, abs_path):
+ """Given an absolute path, return a path relative to the
+ project root.
+ 'subdir/foo'
+
+ """
+ relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)
+ return relative_path
diff --git a/mediagenerator/generators/bundles/settings.py b/mediagenerator/generators/bundles/settings.py
index 1b9209b..85a6a07 100644
--- a/mediagenerator/generators/bundles/settings.py
+++ b/mediagenerator/generators/bundles/settings.py
@@ -1,26 +1,27 @@
from django.conf import settings
DEFAULT_MEDIA_FILTERS = getattr(settings, 'DEFAULT_MEDIA_FILTERS', {
'ccss': 'mediagenerator.filters.clever.CleverCSS',
'coffee': 'mediagenerator.filters.coffeescript.CoffeeScript',
+ 'handlebars': 'mediagenerator.filters.handlebars.HandlebarsFilter',
'css': 'mediagenerator.filters.cssurl.CSSURLFileFilter',
'html': 'mediagenerator.filters.template.Template',
'py': 'mediagenerator.filters.pyjs_filter.Pyjs',
'pyva': 'mediagenerator.filters.pyvascript_filter.PyvaScript',
'sass': 'mediagenerator.filters.sass.Sass',
'scss': 'mediagenerator.filters.sass.Sass',
'less': 'mediagenerator.filters.less.Less',
})
ROOT_MEDIA_FILTERS = getattr(settings, 'ROOT_MEDIA_FILTERS', {})
# These are applied in addition to ROOT_MEDIA_FILTERS.
# The separation is done because we don't want users to
# always specify the default filters when they merely want
# to configure YUICompressor or Closure.
BASE_ROOT_MEDIA_FILTERS = getattr(settings, 'BASE_ROOT_MEDIA_FILTERS', {
'*': 'mediagenerator.filters.concat.Concat',
'css': 'mediagenerator.filters.cssurl.CSSURL',
})
MEDIA_BUNDLES = getattr(settings, 'MEDIA_BUNDLES', ())
|
adieu/django-mediagenerator | 495da73f305a2a0e79a28d251b5b93caea06656d | Add UglifyJS as a filter. | diff --git a/mediagenerator/filters/uglifier.py b/mediagenerator/filters/uglifier.py
new file mode 100644
index 0000000..4906f1b
--- /dev/null
+++ b/mediagenerator/filters/uglifier.py
@@ -0,0 +1,33 @@
+from django.conf import settings
+from django.utils.encoding import smart_str
+from mediagenerator.generators.bundles.base import Filter
+
+class Uglifier(Filter):
+ def __init__(self, **kwargs):
+ super(Uglifier, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'Uglifier only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_output(self, variation):
+ # We import this here, so App Engine Helper users don't get import
+ # errors.
+ from subprocess import Popen, PIPE
+ for input in self.get_input(variation):
+ args = ['uglifyjs']
+ try:
+ args = args + settings.UGLIFIER_OPTIONS
+ except AttributeError:
+ pass
+ try:
+ cmd = Popen(args,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ universal_newlines=True)
+ output, error = cmd.communicate(smart_str(input))
+ assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
+ yield output.decode('utf-8')
+ except Exception, e:
+ raise ValueError("Failed to run UglifyJs. "
+ "Please make sure you have Node.js and UglifyJS installed "
+ "and that it's in your PATH.\n"
+ "Error was: %s" % e)
|
adieu/django-mediagenerator | f7ecb8fd5cf7dfd2c0ea8f0c6a7c5c5a8dbada5b | don't overdo it | diff --git a/README.rst b/README.rst
index 98e29a2..845a445 100644
--- a/README.rst
+++ b/README.rst
@@ -1,24 +1,24 @@
-Improve your user experience with amazingly fast page loads by combining,
+Improve your user experience with fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
django-mediagenerator_ eliminates unnecessary HTTP requests
and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
Most important changes in version 1.11
=============================================================
* Added LESS support
* Fixed an incompatibility with App Engine 1.6.0 on Python 2.7
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
|
adieu/django-mediagenerator | 67977e2f2b0d5b8281eac40a632a138eddc520a9 | bumped version to 1.11 | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 51a8343..f55316f 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,156 +1,162 @@
Changelog
=============================================================
+Version 1.11
+-------------------------------------------------------------
+
+* Added LESS support
+* Fixed an incompatibility with App Engine 1.6.0 on Python 2.7
+
Version 1.10.4
-------------------------------------------------------------
* Fixed remapping of CSS url()s that contain a "?"
* Fixed serving of unicode content by media middleware
Version 1.10.3
-------------------------------------------------------------
* Fixed lots of unicode issues
Version 1.10.2
-------------------------------------------------------------
**Upgrade notes:** If you've specified a custom ``SASS_FRAMEWORKS`` in your ``settings.py`` you now also have to list ``compass`` and ``blueprint`` in that setting.
* All Compass/Sass frameworks (including ``compass`` and ``blueprint``) now have to be listed explictily in the ``SASS_FRAMEWORKS`` setting.
Version 1.10.1
-------------------------------------------------------------
* Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters.
Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/README.rst b/README.rst
index 2f0d6ab..98e29a2 100644
--- a/README.rst
+++ b/README.rst
@@ -1,25 +1,24 @@
Improve your user experience with amazingly fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
django-mediagenerator_ eliminates unnecessary HTTP requests
and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
-Most important changes in version 1.10.4
+Most important changes in version 1.11
=============================================================
-* Fixed remapping of CSS url()s that contain a "?"
-* Fixed serving of unicode content by media middleware
-
+* Added LESS support
+* Fixed an incompatibility with App Engine 1.6.0 on Python 2.7
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/setup.py b/setup.py
index ac59b73..15e49ad 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.10.4',
+ version='1.11',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | b58fb270da1a9fe00d2f4fcee5a17feba1718891 | added lesscss support. thanks a lot to Wilfred Hughes for the patch! | diff --git a/mediagenerator/filters/less.py b/mediagenerator/filters/less.py
new file mode 100644
index 0000000..4c10f3c
--- /dev/null
+++ b/mediagenerator/filters/less.py
@@ -0,0 +1,196 @@
+from django.utils.encoding import smart_str
+from django.conf import settings
+from hashlib import sha1
+from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import find_file, read_text_file, get_media_dirs
+from subprocess import Popen, PIPE
+import os
+import sys
+import re
+import posixpath
+
+
+_RE_FLAGS = re.MULTILINE | re.UNICODE
+multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
+one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
+import_re = re.compile(r'''@import\s* # import keyword
+ ["'] # opening quote
+ (.+?) # the module name
+ ["'] # closing quote
+ \s*; # statement terminator
+ ''',
+ _RE_FLAGS | re.VERBOSE)
+
+if not hasattr(os.path, 'relpath'):
+ # backport os.path.relpath from Python 2.6
+ # Copyright (c) 2001-2010 Python Software Foundation; All Rights Reserved
+
+ # Return the longest prefix of all list elements.
+ def commonprefix(m):
+ "Given a list of pathnames, returns the longest common leading component"
+ if not m: return ''
+ s1 = min(m)
+ s2 = max(m)
+ for i, c in enumerate(s1):
+ if c != s2[i]:
+ return s1[:i]
+ return s1
+
+ def relpath(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
+ path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(commonprefix([start_list, path_list]))
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+ os.path.relpath = relpath
+
+
+class Less(Filter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ self.config(kwargs, path=(), main_module=None)
+ if isinstance(self.path, basestring):
+ self.path = (self.path,)
+
+ # we need to be able to mutate self.path,
+ self.path = list(self.path)
+
+ super(Less, self).__init__(**kwargs)
+
+ assert self.filetype == 'css', (
+ 'Less only supports compilation to CSS. '
+ 'The parent filter expects "%s".' % self.filetype)
+ assert self.main_module, \
+ 'You must provide a main module'
+
+ # lessc can't cope with nonexistent directories, so filter them
+ media_dirs = [directory for directory in get_media_dirs()
+ if os.path.exists(directory)]
+ self.path += tuple(media_dirs)
+
+ self._compiled = None
+ self._compiled_hash = None
+ self._dependencies = {}
+
+ @classmethod
+ def from_default(cls, name):
+ return {'main_module': name}
+
+ def get_output(self, variation):
+ self._regenerate(debug=False)
+ yield self._compiled
+
+ def get_dev_output(self, name, variation):
+ assert name == self.main_module + '.css'
+ self._regenerate(debug=True)
+ return self._compiled
+
+ def get_dev_output_names(self, variation):
+ self._regenerate(debug=True)
+ yield self.main_module + '.css', self._compiled_hash
+
+ def _regenerate(self, debug=False):
+ if self._dependencies:
+ for name, mtime in self._dependencies.items():
+ path = self._find_file(name)
+ if not path or os.path.getmtime(path) != mtime:
+ # Just recompile everything
+ self._dependencies = {}
+ break
+ else:
+ # No changes
+ return
+
+ modules = [self.main_module]
+ # get all the transitive dependencies of this module
+ while True:
+ if not modules:
+ break
+
+ module_name = modules.pop()
+ path = self._find_file(module_name)
+ assert path, 'Could not find the Less module %s' % module_name
+ mtime = os.path.getmtime(path)
+ self._dependencies[module_name] = mtime
+
+ source = read_text_file(path)
+ dependencies = self._get_dependencies(source)
+
+ for name in dependencies:
+ # Try relative import, first
+ transformed = posixpath.join(posixpath.dirname(module_name), name)
+ path = self._find_file(transformed)
+ if path:
+ name = transformed
+ else:
+ path = self._find_file(name)
+ assert path, ('The Less module %s could not find the '
+ 'dependency %s' % (module_name, name))
+ if name not in self._dependencies:
+ modules.append(name)
+
+ main_module_path = self._find_file(self.main_module)
+ self._compiled = self._compile(main_module_path, debug=debug)
+ self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
+
+ def _compile(self, path, debug=False):
+ try:
+ relative_paths = [self._get_relative_path(directory)
+ for directory in self.path]
+
+ shell = sys.platform == 'win32'
+
+ cmd = Popen(['lessc',
+ '--include-path=%s' % ':'.join(relative_paths),
+ path],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ shell=shell, universal_newlines=True,
+ cwd=settings.PROJECT_ROOT)
+ output, error = cmd.communicate()
+
+ # some lessc errors output to stdout, so we put both in the assertion message
+ assert cmd.wait() == 0, ('Less command returned bad '
+ 'result:\n%s\n%s' % (error, output))
+ return output.decode('utf-8')
+ except Exception, e:
+ raise ValueError("Failed to run Less compiler for this "
+ "file. Please confirm that the \"lessc\" application is "
+ "on your path and that you can run it from your own command "
+ "line.\n"
+ "Error was: %s" % e)
+
+ def _get_dependencies(self, source):
+ clean_source = multi_line_comment_re.sub('\n', source)
+ clean_source = one_line_comment_re.sub('', clean_source)
+
+ return [name for name in import_re.findall(clean_source)
+ if not name.endswith('.css')]
+
+ def _find_file(self, name):
+ if not name.endswith('.less'):
+ name = name + '.less'
+
+ return find_file(name, media_dirs=self.path)
+
+ def _get_relative_path(self, abs_path):
+ """Given an absolute path, return a path relative to the
+ project root.
+
+ >>> self._get_relative_path('/home/bob/bobs_project/subdir/foo')
+ 'subdir/foo'
+
+ """
+ relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)
+ return relative_path
diff --git a/mediagenerator/generators/bundles/settings.py b/mediagenerator/generators/bundles/settings.py
index 679fa0b..1b9209b 100644
--- a/mediagenerator/generators/bundles/settings.py
+++ b/mediagenerator/generators/bundles/settings.py
@@ -1,25 +1,26 @@
from django.conf import settings
DEFAULT_MEDIA_FILTERS = getattr(settings, 'DEFAULT_MEDIA_FILTERS', {
'ccss': 'mediagenerator.filters.clever.CleverCSS',
'coffee': 'mediagenerator.filters.coffeescript.CoffeeScript',
'css': 'mediagenerator.filters.cssurl.CSSURLFileFilter',
'html': 'mediagenerator.filters.template.Template',
'py': 'mediagenerator.filters.pyjs_filter.Pyjs',
'pyva': 'mediagenerator.filters.pyvascript_filter.PyvaScript',
'sass': 'mediagenerator.filters.sass.Sass',
'scss': 'mediagenerator.filters.sass.Sass',
+ 'less': 'mediagenerator.filters.less.Less',
})
ROOT_MEDIA_FILTERS = getattr(settings, 'ROOT_MEDIA_FILTERS', {})
# These are applied in addition to ROOT_MEDIA_FILTERS.
# The separation is done because we don't want users to
# always specify the default filters when they merely want
# to configure YUICompressor or Closure.
BASE_ROOT_MEDIA_FILTERS = getattr(settings, 'BASE_ROOT_MEDIA_FILTERS', {
'*': 'mediagenerator.filters.concat.Concat',
'css': 'mediagenerator.filters.cssurl.CSSURL',
})
MEDIA_BUNDLES = getattr(settings, 'MEDIA_BUNDLES', ())
|
adieu/django-mediagenerator | bdbbce9482d246e9bbe02f4901ade3af65244d71 | fixed __main__ detection on GAE/Python 2.7 | diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
index d6ffb55..fbdd187 100644
--- a/mediagenerator/settings.py
+++ b/mediagenerator/settings.py
@@ -1,38 +1,40 @@
from django.conf import settings
from django.utils.encoding import force_unicode
import os
-import __main__
+import sys
+
+__main__ = sys.modules.get('__main__')
_map_file_path = '_generated_media_names.py'
_media_dir = '_generated_media'
# __main__ is not guaranteed to have the __file__ attribute
if hasattr(__main__, '__file__'):
_root = os.path.dirname(__main__.__file__)
_map_file_path = os.path.join(_root, _map_file_path)
_media_dir = os.path.join(_root, _media_dir)
GENERATED_MEDIA_DIR = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_DIR', _media_dir))
GENERATED_MEDIA_NAMES_MODULE = getattr(settings, 'GENERATED_MEDIA_NAMES_MODULE',
'_generated_media_names')
GENERATED_MEDIA_NAMES_FILE = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_NAMES_FILE', _map_file_path))
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
))
_global_media_dirs = getattr(settings, 'GLOBAL_MEDIA_DIRS',
getattr(settings, 'STATICFILES_DIRS', ()))
GLOBAL_MEDIA_DIRS = [os.path.normcase(os.path.normpath(force_unicode(path)))
for path in _global_media_dirs]
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
|
adieu/django-mediagenerator | 85f1a4a4dd8cd9c59e2141da7c6b3576b42a4926 | forgot to mention a change | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 6801909..51a8343 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,155 +1,156 @@
Changelog
=============================================================
Version 1.10.4
-------------------------------------------------------------
+* Fixed remapping of CSS url()s that contain a "?"
* Fixed serving of unicode content by media middleware
Version 1.10.3
-------------------------------------------------------------
* Fixed lots of unicode issues
Version 1.10.2
-------------------------------------------------------------
**Upgrade notes:** If you've specified a custom ``SASS_FRAMEWORKS`` in your ``settings.py`` you now also have to list ``compass`` and ``blueprint`` in that setting.
* All Compass/Sass frameworks (including ``compass`` and ``blueprint``) now have to be listed explictily in the ``SASS_FRAMEWORKS`` setting.
Version 1.10.1
-------------------------------------------------------------
* Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters.
Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/README.rst b/README.rst
index ae47d80..2f0d6ab 100644
--- a/README.rst
+++ b/README.rst
@@ -1,24 +1,25 @@
Improve your user experience with amazingly fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
django-mediagenerator_ eliminates unnecessary HTTP requests
and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
Most important changes in version 1.10.4
=============================================================
+* Fixed remapping of CSS url()s that contain a "?"
* Fixed serving of unicode content by media middleware
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
|
adieu/django-mediagenerator | 108fa31f52a12cc41b803c2366ea1b7fd616545b | improved an error message and bumped version | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 7098eb5..6801909 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,150 +1,155 @@
Changelog
=============================================================
+Version 1.10.4
+-------------------------------------------------------------
+
+* Fixed serving of unicode content by media middleware
+
Version 1.10.3
-------------------------------------------------------------
* Fixed lots of unicode issues
Version 1.10.2
-------------------------------------------------------------
**Upgrade notes:** If you've specified a custom ``SASS_FRAMEWORKS`` in your ``settings.py`` you now also have to list ``compass`` and ``blueprint`` in that setting.
* All Compass/Sass frameworks (including ``compass`` and ``blueprint``) now have to be listed explictily in the ``SASS_FRAMEWORKS`` setting.
Version 1.10.1
-------------------------------------------------------------
* Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters.
Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/README.rst b/README.rst
index 2b9c524..ae47d80 100644
--- a/README.rst
+++ b/README.rst
@@ -1,23 +1,24 @@
Improve your user experience with amazingly fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
django-mediagenerator_ eliminates unnecessary HTTP requests
and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
-Most important changes in version 1.10.3
+Most important changes in version 1.10.4
=============================================================
-* Fixed lots of unicode issues
+* Fixed serving of unicode content by media middleware
+
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 40b178f..d8b50c9 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,61 +1,62 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
TEXT_MIME_TYPES = (
'application/x-javascript',
'application/xhtml+xml',
'application/xml',
)
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
- raise Http404('No such media file "%s"' % filename)
+ raise Http404('The mediagenerator could not find the media file "%s"'
+ % filename)
content, mimetype = backend.get_dev_output(filename)
if not mimetype:
mimetype = 'application/octet-stream'
if isinstance(content, unicode):
content = content.encode('utf-8')
if mimetype.startswith('text/') or mimetype in TEXT_MIME_TYPES:
mimetype += '; charset=utf-8'
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
diff --git a/setup.py b/setup.py
index e0c7282..ac59b73 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.10.3',
+ version='1.10.4',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 8c9932da0083c430adac4ed30ddd4c04d86a1aa9 | added default mimetype | diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 71a4496..40b178f 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,59 +1,61 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
TEXT_MIME_TYPES = (
'application/x-javascript',
'application/xhtml+xml',
'application/xml',
)
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
+ if not mimetype:
+ mimetype = 'application/octet-stream'
if isinstance(content, unicode):
content = content.encode('utf-8')
if mimetype.startswith('text/') or mimetype in TEXT_MIME_TYPES:
mimetype += '; charset=utf-8'
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
|
adieu/django-mediagenerator | cefcf83d8ebaf62142574a8917a2d56e4ffb5ccc | also use utf-8 when serving a few application/ mime types like JS, XML, XHTML | diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 9df7252..71a4496 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,53 +1,59 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
+TEXT_MIME_TYPES = (
+ 'application/x-javascript',
+ 'application/xhtml+xml',
+ 'application/xml',
+)
+
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
if isinstance(content, unicode):
content = content.encode('utf-8')
- if mimetype.startswith('text/'):
+ if mimetype.startswith('text/') or mimetype in TEXT_MIME_TYPES:
mimetype += '; charset=utf-8'
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
|
adieu/django-mediagenerator | 3ff38dcf91d0aaaef6dd1455d9bd029381677647 | fixed handling of query parameters in CSS url()s | diff --git a/mediagenerator/filters/cssurl.py b/mediagenerator/filters/cssurl.py
index fed4f17..d59f45a 100644
--- a/mediagenerator/filters/cssurl.py
+++ b/mediagenerator/filters/cssurl.py
@@ -1,84 +1,98 @@
from base64 import b64encode
from django.conf import settings
from mediagenerator.generators.bundles.base import Filter, FileFilter
from mediagenerator.utils import media_url, prepare_patterns, find_file
from mimetypes import guess_type
import logging
import os
import posixpath
import re
url_re = re.compile(r'url\s*\(["\']?([\w\.][^:]*?)["\']?\)', re.UNICODE)
# Whether to rewrite CSS URLs, at all
REWRITE_CSS_URLS = getattr(settings, 'REWRITE_CSS_URLS', True)
# Whether to rewrite CSS URLs relative to the respective source file
# or whether to use "absolute" URL rewriting (i.e., relative URLs are
# considered absolute with regards to STATICFILES_URL)
REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = getattr(settings,
'REWRITE_CSS_URLS_RELATIVE_TO_SOURCE', True)
GENERATE_DATA_URIS = getattr(settings, 'GENERATE_DATA_URIS', False)
MAX_DATA_URI_FILE_SIZE = getattr(settings, 'MAX_DATA_URI_FILE_SIZE', 12 * 1024)
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_DATA_URI_PATTERNS', (r'.*\.htc',)), 'IGNORE_DATA_URI_PATTERNS')
class URLRewriter(object):
def __init__(self, base_path='./'):
if not base_path:
base_path = './'
self.base_path = base_path
def rewrite_urls(self, content):
if not REWRITE_CSS_URLS:
return content
return url_re.sub(self.fixurls, content)
def fixurls(self, match):
url = match.group(1)
+
hashid = ''
if '#' in url:
url, hashid = url.split('#', 1)
hashid = '#' + hashid
+
+ url_query = None
+ if '?' in url:
+ url, url_query = url.split('?', 1)
+
if ':' not in url and not url.startswith('/'):
rebased_url = posixpath.join(self.base_path, url)
rebased_url = posixpath.normpath(rebased_url)
try:
if GENERATE_DATA_URIS:
path = find_file(rebased_url)
if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \
not IGNORE_PATTERN.match(rebased_url):
data = b64encode(open(path, 'rb').read())
mime = guess_type(path)[0] or 'application/octet-stream'
return 'url(data:%s;base64,%s)' % (mime, data)
url = media_url(rebased_url)
except:
logging.error('URL not found: %s' % url)
- return 'url(%s%s)' % (url, hashid)
+
+ if url_query is None:
+ url_query = ''
+ elif '?' in url:
+ url_query = '&' + url_query
+ else:
+ url_query = '?' + url_query
+
+ return 'url(%s%s%s)' % (url, url_query, hashid)
class CSSURL(Filter):
"""Rewrites URLs relative to media folder ("absolute" rewriting)."""
def __init__(self, **kwargs):
super(CSSURL, self).__init__(**kwargs)
assert self.filetype == 'css', (
'CSSURL only supports CSS output. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
rewriter = URLRewriter()
for input in self.get_input(variation):
yield rewriter.rewrite_urls(input)
def get_dev_output(self, name, variation):
rewriter = URLRewriter()
content = super(CSSURL, self).get_dev_output(name, variation)
return rewriter.rewrite_urls(content)
class CSSURLFileFilter(FileFilter):
"""Rewrites URLs relative to input file's location."""
def get_dev_output(self, name, variation):
content = super(CSSURLFileFilter, self).get_dev_output(name, variation)
if not REWRITE_CSS_URLS_RELATIVE_TO_SOURCE:
return content
rewriter = URLRewriter(posixpath.dirname(name))
return rewriter.rewrite_urls(content)
|
adieu/django-mediagenerator | 315fae185093824014bfe4868f1ac1d3100e5341 | add charset when serving text/* mime types | diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 06ec3e9..9df7252 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,51 +1,53 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
if isinstance(content, unicode):
content = content.encode('utf-8')
+ if mimetype.startswith('text/'):
+ mimetype += '; charset=utf-8'
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
|
adieu/django-mediagenerator | 052d9f1ae075940819345ace5df9f3e5c721a7c6 | fixed clevercss import issue | diff --git a/mediagenerator/filters/clevercss.py b/mediagenerator/filters/clever.py
similarity index 100%
rename from mediagenerator/filters/clevercss.py
rename to mediagenerator/filters/clever.py
diff --git a/mediagenerator/generators/bundles/settings.py b/mediagenerator/generators/bundles/settings.py
index eb7741c..679fa0b 100644
--- a/mediagenerator/generators/bundles/settings.py
+++ b/mediagenerator/generators/bundles/settings.py
@@ -1,25 +1,25 @@
from django.conf import settings
DEFAULT_MEDIA_FILTERS = getattr(settings, 'DEFAULT_MEDIA_FILTERS', {
- 'ccss': 'mediagenerator.filters.clevercss.CleverCSS',
+ 'ccss': 'mediagenerator.filters.clever.CleverCSS',
'coffee': 'mediagenerator.filters.coffeescript.CoffeeScript',
'css': 'mediagenerator.filters.cssurl.CSSURLFileFilter',
'html': 'mediagenerator.filters.template.Template',
'py': 'mediagenerator.filters.pyjs_filter.Pyjs',
'pyva': 'mediagenerator.filters.pyvascript_filter.PyvaScript',
'sass': 'mediagenerator.filters.sass.Sass',
'scss': 'mediagenerator.filters.sass.Sass',
})
ROOT_MEDIA_FILTERS = getattr(settings, 'ROOT_MEDIA_FILTERS', {})
# These are applied in addition to ROOT_MEDIA_FILTERS.
# The separation is done because we don't want users to
# always specify the default filters when they merely want
# to configure YUICompressor or Closure.
BASE_ROOT_MEDIA_FILTERS = getattr(settings, 'BASE_ROOT_MEDIA_FILTERS', {
'*': 'mediagenerator.filters.concat.Concat',
'css': 'mediagenerator.filters.cssurl.CSSURL',
})
MEDIA_BUNDLES = getattr(settings, 'MEDIA_BUNDLES', ())
|
adieu/django-mediagenerator | 48c0250d86688fb0968373893f713c46f3af273e | Fixed python 2.5 not having the followlinks option for os.walk with proper version detection. | diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index 8977308..a70ea04 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,276 +1,287 @@
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, read_text_file
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
+import sys
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
return {'main_module': name.rsplit('.', 1)[0].replace('/', '.')}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
source = read_text_file(self._collected[name])
yield self._compile(name, source, dev_mode=False)[0]
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
source = read_text_file(self._collected[name])
return self._compile(name, source, dev_mode=True)[0]
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
hash = sha1(smart_str(content)).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
hash = sha1(smart_str(content)).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
source = read_text_file(path)
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
hash = sha1(smart_str(content)).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
return INIT_CODE + read_text_file(PYJS_INIT_LIB_PATH)
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
- for root, dirs, files in os.walk(pkgroot, followlinks=True):
+
+ #python 2.5 does not have the followlinks keyword argument
+ has_followlinks = sys.version_info >= (2, 6)
+ if has_followlinks:
+ allfiles = os.walk(pkgroot, followlinks=True)
+ else:
+ allfiles = os.walk(pkgroot)
+
+ for root, dirs, files in allfiles:
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
+ if not has_followlinks:
+ path = os.path.abspath(path)
module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/generators/copyfiles.py b/mediagenerator/generators/copyfiles.py
index fb27a2c..34599a7 100644
--- a/mediagenerator/generators/copyfiles.py
+++ b/mediagenerator/generators/copyfiles.py
@@ -1,44 +1,54 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
+import sys
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
- for root_path, dirs, files in os.walk(root):
+ #python 2.5 does not have the followlinks keyword argument
+ has_followlinks = sys.version_info >= (2, 6)
+ if has_followlinks:
+ allfiles = os.walk(root, followlinks=True)
+ else:
+ allfiles = os.walk(root)
+
+ for root_path, dirs, files in allfiles:
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
- path = os.path.abspath(os.path.join(root_path, file))
+ path = os.path.join(root_path, file)
+ if not has_followlinks:
+ path = os.path.abspath(path)
media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
|
adieu/django-mediagenerator | 608b1df5f3f6a8b5e6ff5c5825740ce96b4920e3 | Fixed python 2.5 does not having the followlinks option for os.walk. | diff --git a/mediagenerator/generators/copyfiles.py b/mediagenerator/generators/copyfiles.py
index 1e9285f..fb27a2c 100644
--- a/mediagenerator/generators/copyfiles.py
+++ b/mediagenerator/generators/copyfiles.py
@@ -1,43 +1,44 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
+
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
- for root_path, dirs, files in os.walk(root, followlinks=True):
+ for root_path, dirs, files in os.walk(root):
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
- path = os.path.join(root_path, file)
+ path = os.path.abspath(os.path.join(root_path, file))
media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
|
adieu/django-mediagenerator | 296787e662703ec0c6c54beff7dea06ff2943b2f | Fixed unix issue concerning os.walk and softlinks. | diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index 25d388d..8977308 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,276 +1,276 @@
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, read_text_file
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
return {'main_module': name.rsplit('.', 1)[0].replace('/', '.')}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
source = read_text_file(self._collected[name])
yield self._compile(name, source, dev_mode=False)[0]
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
source = read_text_file(self._collected[name])
return self._compile(name, source, dev_mode=True)[0]
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
hash = sha1(smart_str(content)).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
hash = sha1(smart_str(content)).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
source = read_text_file(path)
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
hash = sha1(smart_str(content)).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
return INIT_CODE + read_text_file(PYJS_INIT_LIB_PATH)
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
- for root, dirs, files in os.walk(pkgroot):
+ for root, dirs, files in os.walk(pkgroot, followlinks=True):
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/generators/copyfiles.py b/mediagenerator/generators/copyfiles.py
index f275767..1e9285f 100644
--- a/mediagenerator/generators/copyfiles.py
+++ b/mediagenerator/generators/copyfiles.py
@@ -1,43 +1,43 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
- for root_path, dirs, files in os.walk(root):
+ for root_path, dirs, files in os.walk(root, followlinks=True):
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
path = os.path.join(root_path, file)
media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
|
adieu/django-mediagenerator | 8cc91c8985b4e49b95012f1bd3fed41eeb97ce03 | fixed pyjs support for main module in subfolder | diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index 5c3a3c1..25d388d 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,276 +1,276 @@
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, read_text_file
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
- return {'main_module': name.rsplit('.', 1)[0]}
+ return {'main_module': name.rsplit('.', 1)[0].replace('/', '.')}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
source = read_text_file(self._collected[name])
yield self._compile(name, source, dev_mode=False)[0]
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
source = read_text_file(self._collected[name])
return self._compile(name, source, dev_mode=True)[0]
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
hash = sha1(smart_str(content)).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
hash = sha1(smart_str(content)).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
source = read_text_file(path)
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
hash = sha1(smart_str(content)).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
return INIT_CODE + read_text_file(PYJS_INIT_LIB_PATH)
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
for root, dirs, files in os.walk(pkgroot):
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
|
adieu/django-mediagenerator | 97e6e402aa6e0e0a10353cd1575810e86199dc16 | fixed dev mode file serving of unicode content (content length was incorrect) | diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index bcb3217..06ec3e9 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,49 +1,51 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
+ if isinstance(content, unicode):
+ content = content.encode('utf-8')
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
|
adieu/django-mediagenerator | 5a945407691a5d39191d6791b77896c868443b22 | bumped version | diff --git a/setup.py b/setup.py
index 8c26c91..e0c7282 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.10.2',
+ version='1.10.3',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | b0765cb56cef369173980a9d72814b26b594f8b6 | fixed Sass filter on Linux (again :) - Sass on Linux doesn't have the -E parameter | diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 4f15d43..1af870b 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,153 +1,156 @@
from django.conf import settings
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS',
('compass', 'blueprint'))
if isinstance(SASS_FRAMEWORKS, basestring):
SASS_FRAMEWORKS = (SASS_FRAMEWORKS,)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
extensions = extensions.replace('\\', '/')
- run = ['sass', '-E', 'utf-8', '-C', '-t', 'expanded',
+ run = ['sass', '-C', '-t', 'expanded',
'--require', extensions]
for framework in SASS_FRAMEWORKS:
# Some frameworks are loaded by default
if framework in ('blueprint', 'compass'):
continue
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
module = self.main_module.rsplit('.', 1)[0]
output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
- return output.decode('utf-8')
+ output = output.decode('utf-8')
+ if output.startswith('@charset '):
+ output = output.split(';', 1)[1]
+ return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index b319cd4..bdc5297 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,146 +1,145 @@
from . import settings as media_settings
from .settings import (GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL,
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL,
GENERATED_MEDIA_NAMES_MODULE)
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
try:
NAMES = import_module(GENERATED_MEDIA_NAMES_MODULE).NAMES
except (ImportError, AttributeError):
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
raise ImportError('Could not import %s. This '
'file is needed for production mode. Please '
'run manage.py generatemedia to create it.'
% GENERATED_MEDIA_NAMES_MODULE)
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
- print repr(get_production_mapping())
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
media_dirs = GLOBAL_MEDIA_DIRS[:]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in (u'static', u'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def read_text_file(path):
fp = open(path, 'r')
output = fp.read()
fp.close()
return output.decode('utf8')
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | ef4ea23cf7764d04b82218fd6f768e87bf529b9e | fixed lots of unicode issues | diff --git a/mediagenerator/filters/closure.py b/mediagenerator/filters/closure.py
index 6069543..c06ac09 100644
--- a/mediagenerator/filters/closure.py
+++ b/mediagenerator/filters/closure.py
@@ -1,36 +1,36 @@
from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
COMPILATION_LEVEL = getattr(settings, 'CLOSURE_COMPILATION_LEVEL',
'SIMPLE_OPTIMIZATIONS')
class Closure(Filter):
def __init__(self, **kwargs):
self.config(kwargs, compilation_level=COMPILATION_LEVEL)
super(Closure, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Closure only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
try:
compressor = settings.CLOSURE_COMPILER_PATH
cmd = Popen(['java', '-jar', compressor,
'--charset', 'utf-8',
'--compilation_level', self.compilation_level],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
- yield output
+ yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to execute Java VM or Closure. "
"Please make sure that you have installed Java "
"and that it's in your PATH and that you've configured "
"CLOSURE_COMPILER_PATH in your settings correctly.\n"
"Error was: %s" % e)
diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index b562535..424be4e 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,66 +1,64 @@
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
-from mediagenerator.utils import find_file
+from mediagenerator.utils import find_file, read_text_file
from subprocess import Popen, PIPE
import os
import sys
class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
def _regenerate(self, debug=False):
path = find_file(self.module)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
- fp = open(path, 'r')
- source = fp.read()
- fp.close()
+ source = read_text_file(path)
self._compiled = self._compile(source, debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
shell = sys.platform == 'win32'
cmd = Popen(['coffee', '--compile', '--print', '--stdio', '--bare'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True)
- output, error = cmd.communicate(input)
+ output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
- return output
+ return output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 5df6fd7..4f15d43 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,152 +1,153 @@
from django.conf import settings
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS',
('compass', 'blueprint'))
if isinstance(SASS_FRAMEWORKS, basestring):
SASS_FRAMEWORKS = (SASS_FRAMEWORKS,)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
extensions = extensions.replace('\\', '/')
- run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
+ run = ['sass', '-E', 'utf-8', '-C', '-t', 'expanded',
+ '--require', extensions]
for framework in SASS_FRAMEWORKS:
# Some frameworks are loaded by default
if framework in ('blueprint', 'compass'):
continue
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
module = self.main_module.rsplit('.', 1)[0]
output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
- return output
+ return output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/filters/yuicompressor.py b/mediagenerator/filters/yuicompressor.py
index bdb5f22..09ddc79 100644
--- a/mediagenerator/filters/yuicompressor.py
+++ b/mediagenerator/filters/yuicompressor.py
@@ -1,31 +1,31 @@
from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class YUICompressor(Filter):
def __init__(self, **kwargs):
super(YUICompressor, self).__init__(**kwargs)
assert self.filetype in ('css', 'js'), (
'YUICompressor only supports compilation to css and js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
try:
compressor = settings.YUICOMPRESSOR_PATH
cmd = Popen(['java', '-jar', compressor,
'--charset', 'utf-8', '--type', self.filetype],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
- yield output
+ yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to execute Java VM or yuicompressor. "
"Please make sure that you have installed Java "
"and that it's in your PATH and that you've configured "
"YUICOMPRESSOR_PATH in your settings correctly.\n"
"Error was: %s" % e)
diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
index 04043ae..d6ffb55 100644
--- a/mediagenerator/settings.py
+++ b/mediagenerator/settings.py
@@ -1,35 +1,38 @@
from django.conf import settings
+from django.utils.encoding import force_unicode
import os
import __main__
_map_file_path = '_generated_media_names.py'
_media_dir = '_generated_media'
# __main__ is not guaranteed to have the __file__ attribute
if hasattr(__main__, '__file__'):
_root = os.path.dirname(__main__.__file__)
_map_file_path = os.path.join(_root, _map_file_path)
_media_dir = os.path.join(_root, _media_dir)
GENERATED_MEDIA_DIR = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_DIR', _media_dir))
GENERATED_MEDIA_NAMES_MODULE = getattr(settings, 'GENERATED_MEDIA_NAMES_MODULE',
'_generated_media_names')
GENERATED_MEDIA_NAMES_FILE = os.path.abspath(
getattr(settings, 'GENERATED_MEDIA_NAMES_FILE', _map_file_path))
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
))
-GLOBAL_MEDIA_DIRS = getattr(settings, 'GLOBAL_MEDIA_DIRS',
- getattr(settings, 'STATICFILES_DIRS', ()))
+_global_media_dirs = getattr(settings, 'GLOBAL_MEDIA_DIRS',
+ getattr(settings, 'STATICFILES_DIRS', ()))
+GLOBAL_MEDIA_DIRS = [os.path.normcase(os.path.normpath(force_unicode(path)))
+ for path in _global_media_dirs]
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 0d1d474..b319cd4 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,146 +1,146 @@
from . import settings as media_settings
from .settings import (GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL,
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL,
GENERATED_MEDIA_NAMES_MODULE)
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
try:
NAMES = import_module(GENERATED_MEDIA_NAMES_MODULE).NAMES
except (ImportError, AttributeError):
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
raise ImportError('Could not import %s. This '
'file is needed for production mode. Please '
'run manage.py generatemedia to create it.'
% GENERATED_MEDIA_NAMES_MODULE)
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
+ print repr(get_production_mapping())
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
- media_dirs = [os.path.normcase(os.path.normpath(root))
- for root in GLOBAL_MEDIA_DIRS]
+ media_dirs = GLOBAL_MEDIA_DIRS[:]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
- for name in ('static', 'media'):
+ for name in (u'static', u'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def read_text_file(path):
fp = open(path, 'r')
output = fp.read()
fp.close()
return output.decode('utf8')
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | 22ffd169cbc36d8ab7fc4e85212f7516b7edb24a | bumped version | diff --git a/setup.py b/setup.py
index 657e411..8c26c91 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.10.1',
+ version='1.10.2',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 226bc484fd10b022a97c17aba45d1dbf2e4cc4af | added link definition | diff --git a/.deps b/.deps
new file mode 100644
index 0000000..e26bd4c
--- /dev/null
+++ b/.deps
@@ -0,0 +1,2 @@
+[links]
+mediagenerator = django-mediagenerator/mediagenerator
|
adieu/django-mediagenerator | 76239672284b79e19ee18ca294dfde01dafc168b | added upgrade notes | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index e460faa..0d6cc99 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,138 +1,145 @@
Changelog
=============================================================
+Version 1.10.2
+-------------------------------------------------------------
+
+**Upgrade notes:** If you've specified a custom ``SASS_FRAMEWORKS`` in your ``settings.py`` you now also have to list ``compass`` and ``blueprint`` in that setting.
+
+* All Compass/Sass frameworks (including ``compass`` and ``blueprint``) now have to be listed explictily in the ``SASS_FRAMEWORKS`` setting.
+
Version 1.10.1
-------------------------------------------------------------
* Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters.
Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
|
adieu/django-mediagenerator | 7c3999a0b0bbe56f51c30b20d95283ee6a3346cb | from now on all Compass frameworks have to be listed explicitly | diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index a6b76c3..5df6fd7 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,148 +1,152 @@
from django.conf import settings
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
-SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
+SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS',
+ ('compass', 'blueprint'))
if isinstance(SASS_FRAMEWORKS, basestring):
SASS_FRAMEWORKS = (SASS_FRAMEWORKS,)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
extensions = extensions.replace('\\', '/')
run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
for framework in SASS_FRAMEWORKS:
+ # Some frameworks are loaded by default
+ if framework in ('blueprint', 'compass'):
+ continue
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
module = self.main_module.rsplit('.', 1)[0]
output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/filters/sass_paths.rb b/mediagenerator/filters/sass_paths.rb
index 6278798..e2a9ad6 100644
--- a/mediagenerator/filters/sass_paths.rb
+++ b/mediagenerator/filters/sass_paths.rb
@@ -1,12 +1,17 @@
require "rubygems"
require "sass"
require "compass"
+DEFAULT_FRAMEWORKS = ["compass", "blueprint"]
+
ARGV.each do |arg|
+ next if arg == "compass"
+ next if arg == "blueprint"
require arg
end
Compass::Frameworks::ALL.each do |framework|
next if framework.name =~ /^_/
+ next if DEFAULT_FRAMEWORKS.include?(framework.name) && !ARGV.include?(framework.name)
print "#{File.expand_path(framework.stylesheets_directory)}\n"
end
|
adieu/django-mediagenerator | 6b046c6b980a37d02246389a91bec41b6efab7da | added support for specifying path of imported-sass-frameworks folder | diff --git a/mediagenerator/management/commands/importsassframeworks.py b/mediagenerator/management/commands/importsassframeworks.py
index 3087ff8..c226173 100644
--- a/mediagenerator/management/commands/importsassframeworks.py
+++ b/mediagenerator/management/commands/importsassframeworks.py
@@ -1,70 +1,73 @@
from ...filters import sass
from ...utils import get_media_dirs
+from django.conf import settings
from django.core.management.base import NoArgsCommand
from subprocess import Popen, PIPE
import os
import shutil
import sys
import __main__
_frameworks_dir = 'imported-sass-frameworks'
if hasattr(__main__, '__file__'):
_root = os.path.dirname(__main__.__file__)
_frameworks_dir = os.path.join(_root, _frameworks_dir)
-FRAMEWORKS_DIR = os.path.normcase(os.path.abspath(_frameworks_dir))
+FRAMEWORKS_DIR = getattr(settings, 'IMPORTED_SASS_FRAMEWORKS_DIR',
+ _frameworks_dir)
+FRAMEWORKS_DIR = os.path.normcase(os.path.abspath(FRAMEWORKS_DIR))
PATHS_SCRIPT = os.path.join(os.path.dirname(sass.__file__), 'sass_paths.rb')
def copy_children(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
copy_fs_node(path, dst)
def copy_fs_node(src, dst):
basename = os.path.basename(src)
dst = os.path.join(dst, basename)
if os.path.isfile(src):
shutil.copy(src, dst)
elif os.path.isdir(src):
shutil.copytree(src, dst)
else:
raise ValueError("Don't know how to copy file system node: %s" % src)
class Command(NoArgsCommand):
help = 'Copies Sass/Compass frameworks into the current project.'
requires_model_validation = False
def handle_noargs(self, **options):
if os.path.exists(FRAMEWORKS_DIR):
shutil.rmtree(FRAMEWORKS_DIR)
os.mkdir(FRAMEWORKS_DIR)
for path in self.get_framework_paths():
copy_children(path, FRAMEWORKS_DIR)
if FRAMEWORKS_DIR not in get_media_dirs():
sys.stderr.write('Please add the "%(dir)s" '
'folder to your GLOBAL_MEDIA_DIRS setting '
'like this:\n\n'
'GLOBAL_MEDIA_DIRS = (\n'
' ...\n'
" os.path.join(os.path.dirname(__file__),\n"
" '%(dir)s'),\n"
" ...\n"
")\n" % {'dir': os.path.basename(FRAMEWORKS_DIR)})
def get_framework_paths(self):
run = ['ruby', PATHS_SCRIPT]
run.extend(sass.SASS_FRAMEWORKS)
try:
cmd = Popen(run, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = cmd.communicate()
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return map(os.path.abspath, filter(None, output.split('\n')))
except Exception, e:
raise ValueError("Failed to execute an internal Ruby script. "
"Please make sure that you have installed Ruby "
"(http://ruby-lang.org), Sass (http://sass-lang.com), and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | 1e0dd8d257573b6986188ef7439ca758131e3845 | changed line ending | diff --git a/.hgeol b/.hgeol
index bad5a49..d5332c3 100644
--- a/.hgeol
+++ b/.hgeol
@@ -1,16 +1,19 @@
[patterns]
+.deps = native
+.hgignore = native
+.hgeol = native
**.txt = native
**.pyva = native
**.py = native
-**.rb = native
+**.ru = native
**.c = native
**.cpp = native
**.cu = native
**.h = native
**.hpp = native
**.tmpl = native
**.html = native
**.htm = native
**.js = native
**.manifest = native
**.yaml = native
diff --git a/mediagenerator/filters/sass_compass.rb b/mediagenerator/filters/sass_compass.rb
index d22534b..23049b6 100644
--- a/mediagenerator/filters/sass_compass.rb
+++ b/mediagenerator/filters/sass_compass.rb
@@ -1,66 +1,66 @@
-require "rubygems"
-require "compass"
-
-module Compass::SassExtensions::Functions::Urls
-
- def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- def font_url(path, only_path = Sass::Script::Bool.new(false))
- path = path.value # get to the string value of the literal.
-
- # Short curcuit if they have provided an absolute url.
- if absolute_path?(path)
- return Sass::Script::String.new("url(#{path})")
- end
-
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- def image_url(path, only_path = Sass::Script::Bool.new(false))
- print "#{@options}\n"
- path = path.value # get to the string value of the literal.
-
- if absolute_path?(path)
- # Short curcuit if they have provided an absolute url.
- return Sass::Script::String.new("url(#{path})")
- end
-
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- private
-
- # Emits a path, taking off any leading "./"
- def clean_path(url)
- url = url.to_s
- url = url[0..1] == "./" ? url[2..-1] : url
- end
-
- # Emits a url, taking off any leading "./"
- def clean_url(url)
- Sass::Script::String.new("url('#{clean_path(url)}')")
- end
-
- def absolute_path?(path)
- path[0..0] == "/" || path[0..3] == "http"
- end
-
-end
-
-module Sass::Script::Functions
- include Compass::SassExtensions::Functions::Urls
-end
+require "rubygems"
+require "compass"
+
+module Compass::SassExtensions::Functions::Urls
+
+ def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def font_url(path, only_path = Sass::Script::Bool.new(false))
+ path = path.value # get to the string value of the literal.
+
+ # Short curcuit if they have provided an absolute url.
+ if absolute_path?(path)
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def image_url(path, only_path = Sass::Script::Bool.new(false))
+ print "#{@options}\n"
+ path = path.value # get to the string value of the literal.
+
+ if absolute_path?(path)
+ # Short curcuit if they have provided an absolute url.
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ private
+
+ # Emits a path, taking off any leading "./"
+ def clean_path(url)
+ url = url.to_s
+ url = url[0..1] == "./" ? url[2..-1] : url
+ end
+
+ # Emits a url, taking off any leading "./"
+ def clean_url(url)
+ Sass::Script::String.new("url('#{clean_path(url)}')")
+ end
+
+ def absolute_path?(path)
+ path[0..0] == "/" || path[0..3] == "http"
+ end
+
+end
+
+module Sass::Script::Functions
+ include Compass::SassExtensions::Functions::Urls
+end
diff --git a/mediagenerator/filters/sass_paths.rb b/mediagenerator/filters/sass_paths.rb
index 39900e9..6278798 100644
--- a/mediagenerator/filters/sass_paths.rb
+++ b/mediagenerator/filters/sass_paths.rb
@@ -1,12 +1,12 @@
-require "rubygems"
-require "sass"
-require "compass"
-
-ARGV.each do |arg|
- require arg
-end
-
-Compass::Frameworks::ALL.each do |framework|
- next if framework.name =~ /^_/
- print "#{File.expand_path(framework.stylesheets_directory)}\n"
-end
+require "rubygems"
+require "sass"
+require "compass"
+
+ARGV.each do |arg|
+ require arg
+end
+
+Compass::Frameworks::ALL.each do |framework|
+ next if framework.name =~ /^_/
+ print "#{File.expand_path(framework.stylesheets_directory)}\n"
+end
|
adieu/django-mediagenerator | 7c347ae0229baf45db57e7df9ba25183915bad82 | Fixed unicode support for Closure. Patch contributed by Andrew Shearer. Thanks! | diff --git a/mediagenerator/filters/closure.py b/mediagenerator/filters/closure.py
index 2f61864..6069543 100644
--- a/mediagenerator/filters/closure.py
+++ b/mediagenerator/filters/closure.py
@@ -1,35 +1,36 @@
from django.conf import settings
+from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
COMPILATION_LEVEL = getattr(settings, 'CLOSURE_COMPILATION_LEVEL',
'SIMPLE_OPTIMIZATIONS')
class Closure(Filter):
def __init__(self, **kwargs):
self.config(kwargs, compilation_level=COMPILATION_LEVEL)
super(Closure, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Closure only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
try:
compressor = settings.CLOSURE_COMPILER_PATH
cmd = Popen(['java', '-jar', compressor,
'--charset', 'utf-8',
'--compilation_level', self.compilation_level],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
- output, error = cmd.communicate(input)
+ output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output
except Exception, e:
raise ValueError("Failed to execute Java VM or Closure. "
"Please make sure that you have installed Java "
"and that it's in your PATH and that you've configured "
"CLOSURE_COMPILER_PATH in your settings correctly.\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | fb88d17a923c00d792f44122f53bf4df3022e67d | unicode fix in yuicompressor | diff --git a/mediagenerator/filters/yuicompressor.py b/mediagenerator/filters/yuicompressor.py
index 47f035d..bdb5f22 100644
--- a/mediagenerator/filters/yuicompressor.py
+++ b/mediagenerator/filters/yuicompressor.py
@@ -1,30 +1,31 @@
from django.conf import settings
+from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class YUICompressor(Filter):
def __init__(self, **kwargs):
super(YUICompressor, self).__init__(**kwargs)
assert self.filetype in ('css', 'js'), (
'YUICompressor only supports compilation to css and js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
try:
compressor = settings.YUICOMPRESSOR_PATH
cmd = Popen(['java', '-jar', compressor,
'--charset', 'utf-8', '--type', self.filetype],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
- output, error = cmd.communicate(input)
+ output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output
except Exception, e:
raise ValueError("Failed to execute Java VM or yuicompressor. "
"Please make sure that you have installed Java "
"and that it's in your PATH and that you've configured "
"YUICOMPRESSOR_PATH in your settings correctly.\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | b9795bb75eafe26e252bec42442d3c31c283f35c | fixed line endings | diff --git a/.hgeol b/.hgeol
index b2d9e3b..bad5a49 100644
--- a/.hgeol
+++ b/.hgeol
@@ -1,15 +1,16 @@
[patterns]
**.txt = native
**.pyva = native
**.py = native
+**.rb = native
**.c = native
**.cpp = native
**.cu = native
**.h = native
**.hpp = native
**.tmpl = native
**.html = native
**.htm = native
**.js = native
**.manifest = native
**.yaml = native
diff --git a/base_project/settings.py b/base_project/settings.py
index 162764f..45ab554 100644
--- a/base_project/settings.py
+++ b/base_project/settings.py
@@ -1,77 +1,75 @@
# -*- coding: utf-8 -*-
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MEDIA_BUNDLES = (
('main.css',
'css/reset.css',
'css/style.css',
'css/icons/icon.css',
),
)
# Get project root folder
_project_root = os.path.dirname(__file__)
# Set global media search paths
GLOBAL_MEDIA_DIRS = (
os.path.join(_project_root, 'static'),
)
# Set media URL (important: don't forget the trailing slash!).
# PRODUCTION_MEDIA_URL is used when running manage.py generatemedia
MEDIA_DEV_MODE = DEBUG
DEV_MEDIA_URL = '/devmedia/'
PRODUCTION_MEDIA_URL = '/media/'
# Configure yuicompressor if available
YUICOMPRESSOR_PATH = os.path.join(
os.path.dirname(_project_root), 'yuicompressor.jar')
if os.path.exists(YUICOMPRESSOR_PATH):
ROOT_MEDIA_FILTERS = {
'js': 'mediagenerator.filters.yuicompressor.YUICompressor',
'css': 'mediagenerator.filters.yuicompressor.YUICompressor',
}
ADMIN_MEDIA_PREFIX = '/media/admin/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db',
}
}
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.sites',
'mediagenerator',
)
MIDDLEWARE_CLASSES = (
'mediagenerator.middleware.MediaMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.request',
)
USE_I18N = False
-MEDIA_ROOT = os.path.join(_project_root, 'media')
-
TEMPLATE_DIRS = (os.path.join(_project_root, 'templates'),)
ROOT_URLCONF = 'urls'
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 1854ae9..a6b76c3 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,146 +1,148 @@
from django.conf import settings
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
+if isinstance(SASS_FRAMEWORKS, basestring):
+ SASS_FRAMEWORKS = (SASS_FRAMEWORKS,)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
extensions = extensions.replace('\\', '/')
run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
for framework in SASS_FRAMEWORKS:
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
module = self.main_module.rsplit('.', 1)[0]
output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/filters/sass_compass.rb b/mediagenerator/filters/sass_compass.rb
index 23049b6..d22534b 100644
--- a/mediagenerator/filters/sass_compass.rb
+++ b/mediagenerator/filters/sass_compass.rb
@@ -1,66 +1,66 @@
-require "rubygems"
-require "compass"
-
-module Compass::SassExtensions::Functions::Urls
-
- def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- def font_url(path, only_path = Sass::Script::Bool.new(false))
- path = path.value # get to the string value of the literal.
-
- # Short curcuit if they have provided an absolute url.
- if absolute_path?(path)
- return Sass::Script::String.new("url(#{path})")
- end
-
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- def image_url(path, only_path = Sass::Script::Bool.new(false))
- print "#{@options}\n"
- path = path.value # get to the string value of the literal.
-
- if absolute_path?(path)
- # Short curcuit if they have provided an absolute url.
- return Sass::Script::String.new("url(#{path})")
- end
-
- if only_path.to_bool
- Sass::Script::String.new(clean_path(path))
- else
- clean_url(path)
- end
- end
-
- private
-
- # Emits a path, taking off any leading "./"
- def clean_path(url)
- url = url.to_s
- url = url[0..1] == "./" ? url[2..-1] : url
- end
-
- # Emits a url, taking off any leading "./"
- def clean_url(url)
- Sass::Script::String.new("url('#{clean_path(url)}')")
- end
-
- def absolute_path?(path)
- path[0..0] == "/" || path[0..3] == "http"
- end
-
-end
-
-module Sass::Script::Functions
- include Compass::SassExtensions::Functions::Urls
-end
+require "rubygems"
+require "compass"
+
+module Compass::SassExtensions::Functions::Urls
+
+ def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def font_url(path, only_path = Sass::Script::Bool.new(false))
+ path = path.value # get to the string value of the literal.
+
+ # Short curcuit if they have provided an absolute url.
+ if absolute_path?(path)
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def image_url(path, only_path = Sass::Script::Bool.new(false))
+ print "#{@options}\n"
+ path = path.value # get to the string value of the literal.
+
+ if absolute_path?(path)
+ # Short curcuit if they have provided an absolute url.
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ private
+
+ # Emits a path, taking off any leading "./"
+ def clean_path(url)
+ url = url.to_s
+ url = url[0..1] == "./" ? url[2..-1] : url
+ end
+
+ # Emits a url, taking off any leading "./"
+ def clean_url(url)
+ Sass::Script::String.new("url('#{clean_path(url)}')")
+ end
+
+ def absolute_path?(path)
+ path[0..0] == "/" || path[0..3] == "http"
+ end
+
+end
+
+module Sass::Script::Functions
+ include Compass::SassExtensions::Functions::Urls
+end
diff --git a/mediagenerator/filters/sass_paths.rb b/mediagenerator/filters/sass_paths.rb
index 6278798..39900e9 100644
--- a/mediagenerator/filters/sass_paths.rb
+++ b/mediagenerator/filters/sass_paths.rb
@@ -1,12 +1,12 @@
-require "rubygems"
-require "sass"
-require "compass"
-
-ARGV.each do |arg|
- require arg
-end
-
-Compass::Frameworks::ALL.each do |framework|
- next if framework.name =~ /^_/
- print "#{File.expand_path(framework.stylesheets_directory)}\n"
-end
+require "rubygems"
+require "sass"
+require "compass"
+
+ARGV.each do |arg|
+ require arg
+end
+
+Compass::Frameworks::ALL.each do |framework|
+ next if framework.name =~ /^_/
+ print "#{File.expand_path(framework.stylesheets_directory)}\n"
+end
|
adieu/django-mediagenerator | b6699cc7da9e23fd94c6a9b990430242f7df7557 | added a few more fixes for unicode handling | diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index c7f60c2..b562535 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,65 +1,66 @@
+from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
import os
import sys
class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
def _regenerate(self, debug=False):
path = find_file(self.module)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
fp = open(path, 'r')
source = fp.read()
fp.close()
self._compiled = self._compile(source, debug=debug)
- self._compiled_hash = sha1(self._compiled).hexdigest()
+ self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
shell = sys.platform == 'win32'
cmd = Popen(['coffee', '--compile', '--print', '--stdio', '--bare'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
diff --git a/mediagenerator/filters/concat.py b/mediagenerator/filters/concat.py
index 8db3944..b116390 100644
--- a/mediagenerator/filters/concat.py
+++ b/mediagenerator/filters/concat.py
@@ -1,31 +1,32 @@
+from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
class Concat(Filter):
"""
Simply concatenates multiple files into a single file.
This is also the default root filter.
"""
def __init__(self, **kwargs):
self.config(kwargs, concat_dev_output=False, dev_output_name='concat')
super(Concat, self).__init__(**kwargs)
def get_output(self, variation):
yield '\n\n'.join(input for input in self.get_input(variation))
def get_dev_output(self, name, variation):
if not self.concat_dev_output:
return super(Concat, self).get_dev_output(name, variation)
assert self.dev_output_name == name
names = super(Concat, self).get_dev_output_names(variation)
return '\n\n'.join(super(Concat, self).get_dev_output(name[0], variation)
for name in names)
def get_dev_output_names(self, variation):
if not self.concat_dev_output:
for data in super(Concat, self).get_dev_output_names(variation):
yield data
return
content = self.get_dev_output(self.dev_output_name, variation)
- yield self.dev_output_name, sha1(content).hexdigest()
+ yield self.dev_output_name, sha1(smart_str(content)).hexdigest()
diff --git a/mediagenerator/filters/i18n.py b/mediagenerator/filters/i18n.py
index 86de21d..068913f 100644
--- a/mediagenerator/filters/i18n.py
+++ b/mediagenerator/filters/i18n.py
@@ -1,54 +1,55 @@
from django.conf import settings
from django.http import HttpRequest
+from django.utils.encoding import smart_str
from django.views.i18n import javascript_catalog
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
if settings.USE_I18N:
LANGUAGES = [code for code, _ in settings.LANGUAGES]
else:
LANGUAGES = (settings.LANGUAGE_CODE,)
class I18N(Filter):
takes_input = False
def __init__(self, **kwargs):
super(I18N, self).__init__(**kwargs)
assert self.filetype == 'js', (
'I18N only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_variations(self):
return {'language': LANGUAGES}
def get_output(self, variation):
language = variation['language']
yield self._generate(language)
def get_dev_output(self, name, variation):
language = variation['language']
assert language == name
return self._generate(language)
def get_dev_output_names(self, variation):
language = variation['language']
content = self._generate(language)
- hash = sha1(content).hexdigest()
+ hash = sha1(smart_str(content)).hexdigest()
yield language, hash
def _generate(self, language):
language_bidi = language.split('-')[0] in settings.LANGUAGES_BIDI
request = HttpRequest()
request.GET['language'] = language
# Add some JavaScript data
content = 'var LANGUAGE_CODE = "%s";\n' % language
content += 'var LANGUAGE_BIDI = ' + \
(language_bidi and 'true' or 'false') + ';\n'
content += javascript_catalog(request,
packages=settings.INSTALLED_APPS).content
# The hgettext() function just calls gettext() internally, but
# it won't get indexed by makemessages.
content += '\nwindow.hgettext = function(text) { return gettext(text); };\n'
# Add a similar hngettext() function
content += 'window.hngettext = function(singular, plural, count) { return ngettext(singular, plural, count); };\n'
return content
diff --git a/mediagenerator/filters/media_url.py b/mediagenerator/filters/media_url.py
index 8930d77..1a04eec 100644
--- a/mediagenerator/filters/media_url.py
+++ b/mediagenerator/filters/media_url.py
@@ -1,46 +1,47 @@
+from django.utils.encoding import smart_str
from django.utils.simplejson import dumps
+from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_url_mapping
-from hashlib import sha1
_CODE = """
_$MEDIA_URLS = %s;
media_urls = function(key) {
var urls = _$MEDIA_URLS[key];
if (!urls)
throw 'Could not resolve media url ' + key;
return urls;
};
media_url = function(key) {
var urls = media_urls(key);
if (urls.length == 1)
return urls[0];
throw 'media_url() only works with keys that point to a single entry (e.g. an image), but not bundles. Use media_urls() instead.';
};
""".lstrip()
class MediaURL(Filter):
takes_input = False
def __init__(self, **kwargs):
super(MediaURL, self).__init__(**kwargs)
assert self.filetype == 'js', (
'MediaURL only supports JS output. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
yield self._compile()
def get_dev_output(self, name, variation):
assert name == '.media_url.js'
return self._compile()
def get_dev_output_names(self, variation):
content = self._compile()
- hash = sha1(content).hexdigest()
+ hash = sha1(smart_str(content)).hexdigest()
yield '.media_url.js', hash
def _compile(self):
return _CODE % dumps(get_media_url_mapping())
diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index af8ea70..5c3a3c1 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,275 +1,276 @@
+from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, read_text_file
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
return {'main_module': name.rsplit('.', 1)[0]}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
source = read_text_file(self._collected[name])
yield self._compile(name, source, dev_mode=False)[0]
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
source = read_text_file(self._collected[name])
return self._compile(name, source, dev_mode=True)[0]
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
- hash = sha1(content).hexdigest()
+ hash = sha1(smart_str(content)).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
- hash = sha1(content).hexdigest()
+ hash = sha1(smart_str(content)).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
source = read_text_file(path)
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
- hash = sha1(content).hexdigest()
+ hash = sha1(smart_str(content)).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
return INIT_CODE + read_text_file(PYJS_INIT_LIB_PATH)
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
for root, dirs, files in os.walk(pkgroot):
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 15757aa..1854ae9 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,145 +1,146 @@
from django.conf import settings
+from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
extensions = extensions.replace('\\', '/')
run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
for framework in SASS_FRAMEWORKS:
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
module = self.main_module.rsplit('.', 1)[0]
output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
- self._compiled_hash = sha1(self._compiled).hexdigest()
+ self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/generators/bundles/base.py b/mediagenerator/generators/bundles/base.py
index 607a995..5bdb0cf 100644
--- a/mediagenerator/generators/bundles/base.py
+++ b/mediagenerator/generators/bundles/base.py
@@ -1,188 +1,188 @@
from .settings import DEFAULT_MEDIA_FILTERS
from django.utils.encoding import smart_str
from hashlib import sha1
from mediagenerator.utils import load_backend, find_file, read_text_file
import os
class Filter(object):
takes_input = True
def __init__(self, **kwargs):
self.file_filter = FileFilter
self.config(kwargs, filetype=None, filter=None,
bundle=None, _from_default=None)
# We assume that if this is e.g. a 'js' backend then all input must
# also be 'js'. Subclasses must override this if they expect a special
# input file type. Also, subclasses have to check if their file type
# is supported.
self.input_filetype = self.filetype
if self.takes_input:
self.config(kwargs, input=())
if not isinstance(self.input, (tuple, list)):
self.input = (self.input,)
self._input_filters = None
assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys())
@classmethod
def from_default(cls, name):
return {'input': name}
def should_use_default_filter(self, ext):
return ext != self._from_default
def get_variations(self):
"""
Returns all possible variations that get generated by this filter.
The result must be a dict whose values are tuples.
"""
return {}
def get_output(self, variation):
"""
Yields content for each output item for the given variation.
"""
raise NotImplementedError()
def get_dev_output(self, name, variation):
"""
Returns content for the given file name and variation in development mode.
"""
index, child = name.split('/', 1)
index = int(index)
filter = self.get_input_filters()[index]
return filter.get_dev_output(child, variation)
def get_dev_output_names(self, variation):
"""
Yields file names for the given variation in development mode.
"""
# By default we simply return our input filters' file names
for index, filter in enumerate(self.get_input_filters()):
for name, hash in filter.get_dev_output_names(variation):
yield '%d/%s' % (index, name), hash
def get_input(self, variation):
"""Yields contents for each input item."""
for filter in self.get_input_filters():
for input in filter.get_output(variation):
yield input
def get_input_filters(self):
"""Returns a Filter instance for each input item."""
if not self.takes_input:
raise ValueError("The %s media filter doesn't take any input" %
self.__class__.__name__)
if self._input_filters is not None:
return self._input_filters
self._input_filters = []
for input in self.input:
if isinstance(input, dict):
filter = self.get_filter(input)
else:
filter = self.get_item(input)
self._input_filters.append(filter)
return self._input_filters
def get_filter(self, config):
backend_class = load_backend(config.get('filter'))
return backend_class(filetype=self.input_filetype, bundle=self.bundle,
**config)
def get_item(self, name):
ext = os.path.splitext(name)[1].lstrip('.')
if ext in DEFAULT_MEDIA_FILTERS and self.should_use_default_filter(ext):
backend_class = load_backend(DEFAULT_MEDIA_FILTERS[ext])
else:
backend_class = self.file_filter
config = backend_class.from_default(name)
config.setdefault('filter',
'%s.%s' % (backend_class.__module__, backend_class.__name__))
config.setdefault('filetype', self.input_filetype)
config['bundle'] = self.bundle
# This is added to make really sure we don't instantiate the same
# filter in an endless loop. Normally, the child class should
# take care of this in should_use_default_filter().
config.setdefault('_from_default', ext)
return backend_class(**config)
def _get_variations_with_input(self):
"""Utility function to get variations including input variations"""
variations = self.get_variations()
if not self.takes_input:
return variations
for filter in self.get_input_filters():
subvariations = filter._get_variations_with_input()
for k, v in subvariations.items():
if k in variations and v != variations[k]:
raise ValueError('Conflicting variations for "%s": %r != %r' % (
k, v, variations[k]))
variations.update(subvariations)
return variations
def config(self, init, **defaults):
for key in defaults:
setattr(self, key, init.pop(key, defaults[key]))
class FileFilter(Filter):
"""A filter that just returns the given file."""
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, name=None)
self.mtime = self.hash = None
super(FileFilter, self).__init__(**kwargs)
@classmethod
def from_default(cls, name):
return {'name': name}
def get_output(self, variation):
yield self.get_dev_output(self.name, variation)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
return read_text_file(self._get_path())
def get_dev_output_names(self, variation):
path = self._get_path()
mtime = os.path.getmtime(path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
- hash = sha1(output).hexdigest()
+ hash = sha1(smart_str(output)).hexdigest()
else:
hash = self.hash
yield self.name, hash
def _get_path(self):
path = find_file(self.name)
assert path, """File name "%s" doesn't exist.""" % self.name
return path
class RawFileFilter(FileFilter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=None)
super(RawFileFilter, self).__init__(**kwargs)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
return read_text_file(self.path)
def get_dev_output_names(self, variation):
mtime = os.path.getmtime(self.path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
hash = sha1(smart_str(output)).hexdigest()
else:
hash = self.hash
yield self.name, hash
|
adieu/django-mediagenerator | 93f65f07730ea0c4216804b36fd40436b98b206c | bumped version | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 9378564..e460faa 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,133 +1,138 @@
Changelog
=============================================================
+Version 1.10.1
+-------------------------------------------------------------
+
+* Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters.
+
Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/setup.py b/setup.py
index 6032bd6..657e411 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.10',
+ version='1.10.1',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 8979ca26565d2fddf3b472f10cf2ed4e15ce7814 | Added workaround for Windows bug in Sass 3.1. Backslash characters aren't handled correctly for "-I" import path parameters. | diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 57ea7b5..15757aa 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,143 +1,145 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
- self.path_args.extend(('-I', path))
+ self.path_args.extend(('-I', path.replace('\\', '/')))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
+ extensions = extensions.replace('\\', '/')
run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
for framework in SASS_FRAMEWORKS:
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
- output, error = cmd.communicate('@import %s' % self.main_module)
+ module = self.main_module.rsplit('.', 1)[0]
+ output, error = cmd.communicate('@import "%s"' % module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
|
adieu/django-mediagenerator | cb68b4117004e3c88b3b984683081f6f8cf6974b | marked release 1.10 | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 8d19d25..9378564 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,133 +1,133 @@
Changelog
=============================================================
-Version 1.10 (in development)
+Version 1.10
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
-* Fixed support for non-ascii chars in the input files
+* Fixed support for non-ascii chars in input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/README.rst b/README.rst
index 1cbcfc7..089a89f 100644
--- a/README.rst
+++ b/README.rst
@@ -1,27 +1,25 @@
Improve your user experience with amazingly fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
django-mediagenerator_ eliminates unnecessary HTTP requests
and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
-Most important changes in version 1.9 - 1.9.2
+Most important changes in version 1.10
=============================================================
-* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
-* In cache manifests the ``NETWORK`` section now contains "``*``" by default
-* Fixed relative imports in Sass filter
-* Fixed i18n filter in development mode. Contributed by Simon Payne.
-* Added missing ``base.manifest`` to zip package
+* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
+* Fixed CoffeeScript support on OSX
+* Fixed support for non-ascii chars in input files
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/setup.py b/setup.py
index b370b32..6032bd6 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.9.2',
+ version='1.10',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 6fe106532b17c25ae5867484251597b8c925a9d8 | added changelog entry for previous commit | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 71f15c2..8d19d25 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,132 +1,133 @@
Changelog
=============================================================
Version 1.10 (in development)
-------------------------------------------------------------
* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
+* Fixed support for non-ascii chars in the input files
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
* Added support for overriding the ``_generated_media_names`` module's import path and file system location (only needed for non-standard project structures).
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
|
adieu/django-mediagenerator | e2f2e74bdb23bbe714b990e447c501054de5db67 | fixed support for non-ascii chars in input files | diff --git a/mediagenerator/api.py b/mediagenerator/api.py
index 103b8cc..1bddf3d 100644
--- a/mediagenerator/api.py
+++ b/mediagenerator/api.py
@@ -1,43 +1,45 @@
from . import settings, utils
from .settings import (GENERATED_MEDIA_DIR, GENERATED_MEDIA_NAMES_FILE,
MEDIA_GENERATORS)
from .utils import load_backend
from django.utils.http import urlquote
import os
import shutil
def generate_media():
if os.path.exists(GENERATED_MEDIA_DIR):
shutil.rmtree(GENERATED_MEDIA_DIR)
# This will make media_url() generate production URLs
was_dev_mode = settings.MEDIA_DEV_MODE
settings.MEDIA_DEV_MODE = False
utils.NAMES = {}
for backend_name in MEDIA_GENERATORS:
backend = load_backend(backend_name)()
for key, url, content in backend.get_output():
version = backend.generate_version(key, url, content)
if version:
base, ext = os.path.splitext(url)
url = '%s-%s%s' % (base, version, ext)
path = os.path.join(GENERATED_MEDIA_DIR, url)
parent = os.path.dirname(path)
if not os.path.exists(parent):
os.makedirs(parent)
fp = open(path, 'wb')
+ if isinstance(content, unicode):
+ content = content.encode('utf8')
fp.write(content)
fp.close()
utils.NAMES[key] = urlquote(url)
settings.MEDIA_DEV_MODE = was_dev_mode
# Generate a module with media file name mappings
fp = open(GENERATED_MEDIA_NAMES_FILE, 'w')
fp.write('NAMES = %r' % utils.NAMES)
fp.close()
diff --git a/mediagenerator/base.py b/mediagenerator/base.py
index 0edf5bb..5da5759 100644
--- a/mediagenerator/base.py
+++ b/mediagenerator/base.py
@@ -1,37 +1,38 @@
+from django.utils.encoding import smart_str
from hashlib import sha1
class Generator(object):
def generate_version(self, key, url, content):
- return sha1(content).hexdigest()
+ return sha1(smart_str(content)).hexdigest()
def get_output(self):
"""
Generates content for production mode.
Yields tuples of the form:
key, url, content
Here, key must be the same as for get_dev_output_names().
"""
for key, url, hash in self.get_dev_output_names():
yield key, url, self.get_dev_output(url)[0]
def get_dev_output(self, name):
"""
Generates content for dev mode.
Yields tuples of the form:
content, mimetype
"""
raise NotImplementedError()
def get_dev_output_names(self):
"""
Generates file names for dev mode.
Yields tuples of the form:
key, url, version_hash
Here, key must be the same as for get_output_names().
"""
raise NotImplementedError()
diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index e6fa4bc..af8ea70 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,284 +1,275 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
-from mediagenerator.utils import get_media_dirs
+from mediagenerator.utils import get_media_dirs, read_text_file
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
return {'main_module': name.rsplit('.', 1)[0]}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
- fp = open(self._collected[name], 'r')
- output = self._compile(name, fp.read(), dev_mode=False)[0]
- fp.close()
- yield output
+ source = read_text_file(self._collected[name])
+ yield self._compile(name, source, dev_mode=False)[0]
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
- fp = open(self._collected[name], 'r')
- output = self._compile(name, fp.read(), dev_mode=True)[0]
- fp.close()
- return output
+ source = read_text_file(self._collected[name])
+ return self._compile(name, source, dev_mode=True)[0]
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
hash = sha1(content).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
hash = sha1(content).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
- fp = open(path, 'r')
- source = fp.read()
- fp.close()
+ source = read_text_file(path)
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
hash = sha1(content).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
- fp = open(PYJS_INIT_LIB_PATH, 'r')
- content = fp.read()
- fp.close()
- return INIT_CODE + content
+ return INIT_CODE + read_text_file(PYJS_INIT_LIB_PATH)
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
for root, dirs, files in os.walk(pkgroot):
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index f2c4d25..57ea7b5 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,146 +1,143 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
-from mediagenerator.utils import get_media_dirs, find_file
+from mediagenerator.utils import get_media_dirs, find_file, read_text_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
for framework in SASS_FRAMEWORKS:
run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = cmd.communicate('@import %s' % self.main_module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
"you have installed Sass (http://sass-lang.com) and "
"Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
- fp = open(path, 'r')
- source = fp.read()
- fp.close()
-
+ source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/generators/bundles/base.py b/mediagenerator/generators/bundles/base.py
index fea56f5..607a995 100644
--- a/mediagenerator/generators/bundles/base.py
+++ b/mediagenerator/generators/bundles/base.py
@@ -1,194 +1,188 @@
from .settings import DEFAULT_MEDIA_FILTERS
+from django.utils.encoding import smart_str
from hashlib import sha1
-from mediagenerator.utils import load_backend, find_file
+from mediagenerator.utils import load_backend, find_file, read_text_file
import os
class Filter(object):
takes_input = True
def __init__(self, **kwargs):
self.file_filter = FileFilter
self.config(kwargs, filetype=None, filter=None,
bundle=None, _from_default=None)
# We assume that if this is e.g. a 'js' backend then all input must
# also be 'js'. Subclasses must override this if they expect a special
# input file type. Also, subclasses have to check if their file type
# is supported.
self.input_filetype = self.filetype
if self.takes_input:
self.config(kwargs, input=())
if not isinstance(self.input, (tuple, list)):
self.input = (self.input,)
self._input_filters = None
assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys())
@classmethod
def from_default(cls, name):
return {'input': name}
def should_use_default_filter(self, ext):
return ext != self._from_default
def get_variations(self):
"""
Returns all possible variations that get generated by this filter.
The result must be a dict whose values are tuples.
"""
return {}
def get_output(self, variation):
"""
Yields content for each output item for the given variation.
"""
raise NotImplementedError()
def get_dev_output(self, name, variation):
"""
Returns content for the given file name and variation in development mode.
"""
index, child = name.split('/', 1)
index = int(index)
filter = self.get_input_filters()[index]
return filter.get_dev_output(child, variation)
def get_dev_output_names(self, variation):
"""
Yields file names for the given variation in development mode.
"""
# By default we simply return our input filters' file names
for index, filter in enumerate(self.get_input_filters()):
for name, hash in filter.get_dev_output_names(variation):
yield '%d/%s' % (index, name), hash
def get_input(self, variation):
"""Yields contents for each input item."""
for filter in self.get_input_filters():
for input in filter.get_output(variation):
yield input
def get_input_filters(self):
"""Returns a Filter instance for each input item."""
if not self.takes_input:
raise ValueError("The %s media filter doesn't take any input" %
self.__class__.__name__)
if self._input_filters is not None:
return self._input_filters
self._input_filters = []
for input in self.input:
if isinstance(input, dict):
filter = self.get_filter(input)
else:
filter = self.get_item(input)
self._input_filters.append(filter)
return self._input_filters
def get_filter(self, config):
backend_class = load_backend(config.get('filter'))
return backend_class(filetype=self.input_filetype, bundle=self.bundle,
**config)
def get_item(self, name):
ext = os.path.splitext(name)[1].lstrip('.')
if ext in DEFAULT_MEDIA_FILTERS and self.should_use_default_filter(ext):
backend_class = load_backend(DEFAULT_MEDIA_FILTERS[ext])
else:
backend_class = self.file_filter
config = backend_class.from_default(name)
config.setdefault('filter',
'%s.%s' % (backend_class.__module__, backend_class.__name__))
config.setdefault('filetype', self.input_filetype)
config['bundle'] = self.bundle
# This is added to make really sure we don't instantiate the same
# filter in an endless loop. Normally, the child class should
# take care of this in should_use_default_filter().
config.setdefault('_from_default', ext)
return backend_class(**config)
def _get_variations_with_input(self):
"""Utility function to get variations including input variations"""
variations = self.get_variations()
if not self.takes_input:
return variations
for filter in self.get_input_filters():
subvariations = filter._get_variations_with_input()
for k, v in subvariations.items():
if k in variations and v != variations[k]:
raise ValueError('Conflicting variations for "%s": %r != %r' % (
k, v, variations[k]))
variations.update(subvariations)
return variations
def config(self, init, **defaults):
for key in defaults:
setattr(self, key, init.pop(key, defaults[key]))
class FileFilter(Filter):
"""A filter that just returns the given file."""
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, name=None)
self.mtime = self.hash = None
super(FileFilter, self).__init__(**kwargs)
@classmethod
def from_default(cls, name):
return {'name': name}
def get_output(self, variation):
yield self.get_dev_output(self.name, variation)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
- path = self._get_path()
- fp = open(path, 'r')
- output = fp.read()
- fp.close()
- return output
+ return read_text_file(self._get_path())
def get_dev_output_names(self, variation):
path = self._get_path()
mtime = os.path.getmtime(path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
hash = sha1(output).hexdigest()
else:
hash = self.hash
yield self.name, hash
def _get_path(self):
path = find_file(self.name)
assert path, """File name "%s" doesn't exist.""" % self.name
return path
class RawFileFilter(FileFilter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=None)
super(RawFileFilter, self).__init__(**kwargs)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
- fp = open(self.path, 'r')
- output = fp.read()
- fp.close()
- return output
+ return read_text_file(self.path)
def get_dev_output_names(self, variation):
mtime = os.path.getmtime(self.path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
- hash = sha1(output).hexdigest()
+ hash = sha1(smart_str(output)).hexdigest()
else:
hash = self.hash
yield self.name, hash
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 9b18ff4..0d1d474 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,140 +1,146 @@
from . import settings as media_settings
from .settings import (GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL,
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL,
GENERATED_MEDIA_NAMES_MODULE)
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
try:
NAMES = import_module(GENERATED_MEDIA_NAMES_MODULE).NAMES
except (ImportError, AttributeError):
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
raise ImportError('Could not import %s. This '
'file is needed for production mode. Please '
'run manage.py generatemedia to create it.'
% GENERATED_MEDIA_NAMES_MODULE)
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
media_dirs = [os.path.normcase(os.path.normpath(root))
for root in GLOBAL_MEDIA_DIRS]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in ('static', 'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
+def read_text_file(path):
+ fp = open(path, 'r')
+ output = fp.read()
+ fp.close()
+ return output.decode('utf8')
+
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | 93da92186b86742972c605fd7e203004a2ef7459 | Added require rubygems to the filters sass_compass.rb and sass_paths.rb. This is required for the require statements which follow to work. | diff --git a/mediagenerator/filters/sass_compass.rb b/mediagenerator/filters/sass_compass.rb
index 434e0ca..23049b6 100644
--- a/mediagenerator/filters/sass_compass.rb
+++ b/mediagenerator/filters/sass_compass.rb
@@ -1,65 +1,66 @@
+require "rubygems"
require "compass"
module Compass::SassExtensions::Functions::Urls
def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
if only_path.to_bool
Sass::Script::String.new(clean_path(path))
else
clean_url(path)
end
end
def font_url(path, only_path = Sass::Script::Bool.new(false))
path = path.value # get to the string value of the literal.
# Short curcuit if they have provided an absolute url.
if absolute_path?(path)
return Sass::Script::String.new("url(#{path})")
end
if only_path.to_bool
Sass::Script::String.new(clean_path(path))
else
clean_url(path)
end
end
def image_url(path, only_path = Sass::Script::Bool.new(false))
print "#{@options}\n"
path = path.value # get to the string value of the literal.
if absolute_path?(path)
# Short curcuit if they have provided an absolute url.
return Sass::Script::String.new("url(#{path})")
end
if only_path.to_bool
Sass::Script::String.new(clean_path(path))
else
clean_url(path)
end
end
private
# Emits a path, taking off any leading "./"
def clean_path(url)
url = url.to_s
url = url[0..1] == "./" ? url[2..-1] : url
end
# Emits a url, taking off any leading "./"
def clean_url(url)
Sass::Script::String.new("url('#{clean_path(url)}')")
end
def absolute_path?(path)
path[0..0] == "/" || path[0..3] == "http"
end
end
module Sass::Script::Functions
include Compass::SassExtensions::Functions::Urls
end
diff --git a/mediagenerator/filters/sass_paths.rb b/mediagenerator/filters/sass_paths.rb
index 9749ffe..6278798 100644
--- a/mediagenerator/filters/sass_paths.rb
+++ b/mediagenerator/filters/sass_paths.rb
@@ -1,11 +1,12 @@
+require "rubygems"
require "sass"
require "compass"
ARGV.each do |arg|
require arg
end
Compass::Frameworks::ALL.each do |framework|
next if framework.name =~ /^_/
print "#{File.expand_path(framework.stylesheets_directory)}\n"
-end
+end
|
adieu/django-mediagenerator | 0b41cedb730a0399383faa0bad4ceb2aace1f2ae | added support for overriding the _generated_media_names.py module's location | diff --git a/mediagenerator/api.py b/mediagenerator/api.py
index bafea1f..103b8cc 100644
--- a/mediagenerator/api.py
+++ b/mediagenerator/api.py
@@ -1,42 +1,43 @@
from . import settings, utils
-from .settings import GENERATED_MEDIA_DIR, GENERATED_MEDIA_MAP_FILE, MEDIA_GENERATORS
+from .settings import (GENERATED_MEDIA_DIR, GENERATED_MEDIA_NAMES_FILE,
+ MEDIA_GENERATORS)
from .utils import load_backend
from django.utils.http import urlquote
import os
import shutil
def generate_media():
if os.path.exists(GENERATED_MEDIA_DIR):
shutil.rmtree(GENERATED_MEDIA_DIR)
# This will make media_url() generate production URLs
was_dev_mode = settings.MEDIA_DEV_MODE
settings.MEDIA_DEV_MODE = False
utils.NAMES = {}
for backend_name in MEDIA_GENERATORS:
backend = load_backend(backend_name)()
for key, url, content in backend.get_output():
version = backend.generate_version(key, url, content)
if version:
base, ext = os.path.splitext(url)
url = '%s-%s%s' % (base, version, ext)
path = os.path.join(GENERATED_MEDIA_DIR, url)
parent = os.path.dirname(path)
if not os.path.exists(parent):
os.makedirs(parent)
fp = open(path, 'wb')
fp.write(content)
fp.close()
utils.NAMES[key] = urlquote(url)
settings.MEDIA_DEV_MODE = was_dev_mode
# Generate a module with media file name mappings
- fp = open(GENERATED_MEDIA_MAP_FILE, 'w')
+ fp = open(GENERATED_MEDIA_NAMES_FILE, 'w')
fp.write('NAMES = %r' % utils.NAMES)
fp.close()
diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
index b1e6a26..04043ae 100644
--- a/mediagenerator/settings.py
+++ b/mediagenerator/settings.py
@@ -1,32 +1,35 @@
from django.conf import settings
import os
import __main__
_map_file_path = '_generated_media_names.py'
_media_dir = '_generated_media'
# __main__ is not guaranteed to have the __file__ attribute
if hasattr(__main__, '__file__'):
_root = os.path.dirname(__main__.__file__)
_map_file_path = os.path.join(_root, _map_file_path)
_media_dir = os.path.join(_root, _media_dir)
-GENERATED_MEDIA_DIR = os.path.abspath(getattr(settings, 'GENERATED_MEDIA_DIR',
- _media_dir))
-GENERATED_MEDIA_MAP_FILE = os.path.abspath(_map_file_path)
+GENERATED_MEDIA_DIR = os.path.abspath(
+ getattr(settings, 'GENERATED_MEDIA_DIR', _media_dir))
+GENERATED_MEDIA_NAMES_MODULE = getattr(settings, 'GENERATED_MEDIA_NAMES_MODULE',
+ '_generated_media_names')
+GENERATED_MEDIA_NAMES_FILE = os.path.abspath(
+ getattr(settings, 'GENERATED_MEDIA_NAMES_FILE', _map_file_path))
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
))
GLOBAL_MEDIA_DIRS = getattr(settings, 'GLOBAL_MEDIA_DIRS',
getattr(settings, 'STATICFILES_DIRS', ()))
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 07cf9c3..4648f73 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,138 +1,139 @@
from . import settings as media_settings
-from .settings import GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL, \
- IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL
+from .settings import (GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL,
+ IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL,
+ GENERATED_MEDIA_NAMES_MODULE)
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
try:
- from _generated_media_names import NAMES
-except ImportError:
+ NAMES = import_module(GENERATED_MEDIA_NAMES_MODULE).NAMES
+except (ImportError, AttributeError):
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
raise ImportError('Could not import _generated_media_names. This '
'file is needed for production mode. Please '
'run manage.py generatemedia to create it.')
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
media_dirs = [os.path.normcase(os.path.normpath(root))
for root in GLOBAL_MEDIA_DIRS]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in ('static', 'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | 9c1d39e60a65026ed858271195d81f2b889a5946 | added missing files to package | diff --git a/setup.py b/setup.py
index c97bf50..b370b32 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,33 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
version='1.9.2',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
- package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
+ package_data={'mediagenerator.filters': ['pyjslibs/*.py', '*.rb'],
+ 'mediagenerator': ['templates/mediagenerator/manifest/*']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 2eaf92ea90887bad8aeb9485308572aaf7ddec78 | pep8 cleanup | diff --git a/mediagenerator/contrib/jinja2ext.py b/mediagenerator/contrib/jinja2ext.py
index c1b9ef4..d080b45 100644
--- a/mediagenerator/contrib/jinja2ext.py
+++ b/mediagenerator/contrib/jinja2ext.py
@@ -1,37 +1,35 @@
-from jinja2 import nodes, TemplateAssertionError
+from jinja2 import nodes, TemplateAssertionError, Markup as mark_safe
from jinja2.ext import Extension
-from jinja2 import Markup as mark_safe
-from jinja2.utils import next
from mediagenerator.generators.bundles.utils import _render_include_media
class MediaExtension(Extension):
tags = set(['include_media'])
def __init__(self, environment):
self.environment = environment
def parse(self, parser):
token = parser.stream.next()
args = [parser.parse_expression()]
kwargs = []
while parser.stream.current.type != 'block_end':
if kwargs:
parser.stream.expect('comma')
if parser.stream.skip_if('colon'):
break
name = parser.stream.expect('name')
if name.value in kwargs:
parser.fail('variable %r defined twice.' %
name.value, name.lineno,
exc=TemplateAssertionError)
parser.stream.expect('assign')
key = name.value
value = parser.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
return nodes.Output([self.call_method('_render', args, kwargs)]).set_lineno(token.lineno)
def _render(self, bundle, **variation):
return mark_safe(_render_include_media(bundle, variation))
diff --git a/mediagenerator/filters/cssurl.py b/mediagenerator/filters/cssurl.py
index 4dac238..fed4f17 100644
--- a/mediagenerator/filters/cssurl.py
+++ b/mediagenerator/filters/cssurl.py
@@ -1,84 +1,84 @@
from base64 import b64encode
from django.conf import settings
from mediagenerator.generators.bundles.base import Filter, FileFilter
from mediagenerator.utils import media_url, prepare_patterns, find_file
from mimetypes import guess_type
import logging
import os
import posixpath
import re
url_re = re.compile(r'url\s*\(["\']?([\w\.][^:]*?)["\']?\)', re.UNICODE)
# Whether to rewrite CSS URLs, at all
REWRITE_CSS_URLS = getattr(settings, 'REWRITE_CSS_URLS', True)
# Whether to rewrite CSS URLs relative to the respective source file
# or whether to use "absolute" URL rewriting (i.e., relative URLs are
# considered absolute with regards to STATICFILES_URL)
REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = getattr(settings,
'REWRITE_CSS_URLS_RELATIVE_TO_SOURCE', True)
GENERATE_DATA_URIS = getattr(settings, 'GENERATE_DATA_URIS', False)
-MAX_DATA_URI_FILE_SIZE = getattr(settings, 'MAX_DATA_URI_FILE_SIZE', 12*1024)
+MAX_DATA_URI_FILE_SIZE = getattr(settings, 'MAX_DATA_URI_FILE_SIZE', 12 * 1024)
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_DATA_URI_PATTERNS', (r'.*\.htc',)), 'IGNORE_DATA_URI_PATTERNS')
class URLRewriter(object):
def __init__(self, base_path='./'):
if not base_path:
base_path = './'
self.base_path = base_path
def rewrite_urls(self, content):
if not REWRITE_CSS_URLS:
return content
return url_re.sub(self.fixurls, content)
def fixurls(self, match):
url = match.group(1)
hashid = ''
if '#' in url:
url, hashid = url.split('#', 1)
hashid = '#' + hashid
if ':' not in url and not url.startswith('/'):
rebased_url = posixpath.join(self.base_path, url)
rebased_url = posixpath.normpath(rebased_url)
try:
if GENERATE_DATA_URIS:
path = find_file(rebased_url)
if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \
not IGNORE_PATTERN.match(rebased_url):
data = b64encode(open(path, 'rb').read())
mime = guess_type(path)[0] or 'application/octet-stream'
return 'url(data:%s;base64,%s)' % (mime, data)
url = media_url(rebased_url)
except:
logging.error('URL not found: %s' % url)
return 'url(%s%s)' % (url, hashid)
class CSSURL(Filter):
"""Rewrites URLs relative to media folder ("absolute" rewriting)."""
def __init__(self, **kwargs):
super(CSSURL, self).__init__(**kwargs)
assert self.filetype == 'css', (
'CSSURL only supports CSS output. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
rewriter = URLRewriter()
for input in self.get_input(variation):
yield rewriter.rewrite_urls(input)
def get_dev_output(self, name, variation):
rewriter = URLRewriter()
content = super(CSSURL, self).get_dev_output(name, variation)
return rewriter.rewrite_urls(content)
class CSSURLFileFilter(FileFilter):
"""Rewrites URLs relative to input file's location."""
def get_dev_output(self, name, variation):
content = super(CSSURLFileFilter, self).get_dev_output(name, variation)
if not REWRITE_CSS_URLS_RELATIVE_TO_SOURCE:
return content
rewriter = URLRewriter(posixpath.dirname(name))
return rewriter.rewrite_urls(content)
diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
index be71755..e6fa4bc 100644
--- a/mediagenerator/filters/pyjs_filter.py
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -1,284 +1,284 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
-from mediagenerator.utils import find_file, get_media_dirs
+from mediagenerator.utils import get_media_dirs
from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
from textwrap import dedent
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Register PYVA() function
try:
from pyvascript.grammar import compile
from pyjs.translator import native_js_func
@native_js_func
def PYVA(content, unescape, is_statement, **kwargs):
result = compile(dedent(unescape(content)))
if not is_statement:
return result.strip().rstrip('\r\n\t ;')
return result
except ImportError:
# No PyvaScript installed
pass
_HANDLE_EXCEPTIONS = """
} finally { $pyjs.in_try_except -= 1; }
} catch(err) {
pyjslib['_handle_exception'](err);
}
"""
PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
_LOAD_PYJSLIB = """
$p = $pyjs.loaded_modules["pyjslib"];
$p('pyjslib');
$pyjs.__modules__.pyjslib = $p['pyjslib']
"""
INIT_CODE = """
var $wnd = window;
var $doc = window.document;
var $pyjs = new Object();
var $p = null;
$pyjs.platform = 'safari';
$pyjs.global_namespace = this;
$pyjs.__modules__ = {};
$pyjs.modules_hash = {};
$pyjs.loaded_modules = {};
$pyjs.options = new Object();
$pyjs.options.arg_ignore = true;
$pyjs.options.arg_count = true;
$pyjs.options.arg_is_instance = true;
$pyjs.options.arg_instance_type = false;
$pyjs.options.arg_kwarg_dup = true;
$pyjs.options.arg_kwarg_unexpected_keyword = true;
$pyjs.options.arg_kwarg_multiple_values = true;
$pyjs.options.dynamic_loading = false;
$pyjs.trackstack = [];
$pyjs.track = {module:'__main__', lineno: 1};
$pyjs.trackstack.push($pyjs.track);
$pyjs.__active_exception_stack__ = null;
$pyjs.__last_exception_stack__ = null;
$pyjs.__last_exception__ = null;
$pyjs.in_try_except = 0;
""".lstrip()
class Pyjs(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, exclude_main_libs=False, main_module=None,
debug=None, path=(), only_dependencies=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
self.path += tuple(get_media_dirs())
if self.only_dependencies is None:
self.only_dependencies = bool(self.main_module)
if self.only_dependencies:
self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
super(Pyjs, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Pyjs only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
if self.only_dependencies:
assert self.main_module, \
'You must provide a main module in only_dependencies mode'
self._compiled = {}
self._collected = {}
@classmethod
def from_default(cls, name):
return {'main_module': name.rsplit('.', 1)[0]}
def get_output(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
yield self._compile_init()
if self.only_dependencies:
self._regenerate(dev_mode=False)
for name in sorted(self._compiled.keys()):
yield self._compiled[name][1]
else:
for name in sorted(self._collected.keys()):
fp = open(self._collected[name], 'r')
output = self._compile(name, fp.read(), dev_mode=False)[0]
fp.close()
yield output
yield self._compile_main(dev_mode=False)
def get_dev_output(self, name, variation):
self._collect_all_modules()
-
+
name = name.split('/', 1)[-1]
if name == '._pyjs.js':
return self._compile_init()
elif name == '.main.js':
return self._compile_main(dev_mode=True)
if self.only_dependencies:
self._regenerate(dev_mode=True)
return self._compiled[name][1]
else:
fp = open(self._collected[name], 'r')
output = self._compile(name, fp.read(), dev_mode=True)[0]
fp.close()
return output
def get_dev_output_names(self, variation):
self._collect_all_modules()
if not self.exclude_main_libs:
content = self._compile_init()
hash = sha1(content).hexdigest()
yield '._pyjs.js', hash
if self.only_dependencies:
self._regenerate(dev_mode=True)
for name in sorted(self._compiled.keys()):
yield name, self._compiled[name][2]
else:
for name in sorted(self._collected.keys()):
yield name, None
if self.main_module is not None or not self.exclude_main_libs:
content = self._compile_main(dev_mode=True)
hash = sha1(content).hexdigest()
yield '.main.js', hash
def _regenerate(self, dev_mode=False):
# This function is only called in only_dependencies mode
if self._compiled:
for module_name, (mtime, content, hash) in self._compiled.items():
if module_name not in self._collected or \
not os.path.exists(self._collected[module_name]) or \
os.path.getmtime(self._collected[module_name]) != mtime:
# Just recompile everything
# TODO: track dependencies and changes and recompile only
# what's necessary
self._compiled = {}
break
else:
# No changes
return
modules = [self.main_module, 'pyjslib']
while True:
if not modules:
break
module_name = modules.pop()
path = self._collected[module_name]
mtime = os.path.getmtime(path)
fp = open(path, 'r')
source = fp.read()
fp.close()
try:
content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
except:
self._compiled = {}
raise
hash = sha1(content).hexdigest()
self._compiled[module_name] = (mtime, content, hash)
for name in py_deps:
if name not in self._collected:
if '.' in name and name.rsplit('.', 1)[0] in self._collected:
name = name.rsplit('.', 1)[0]
else:
raise ImportError('The pyjs module %s could not find '
'the dependency %s' % (module_name, name))
if name not in self._compiled:
modules.append(name)
def _compile(self, name, source, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
compiler = import_compiler(False)
tree = compiler.parse(source)
output = StringIO()
translator = Translator(compiler, name, name, source, tree, output,
# Debug options
debug=debug, source_tracking=debug, line_tracking=debug,
store_source=debug,
# Speed and size optimizations
function_argument_checking=debug, attribute_checking=False,
inline_code=False, number_classes=False,
# Sufficient Python conformance
operator_funcs=True, bound_methods=True, descriptors=True,
)
return output.getvalue(), translator.imported_modules, translator.imported_js
def _compile_init(self):
fp = open(PYJS_INIT_LIB_PATH, 'r')
content = fp.read()
fp.close()
return INIT_CODE + content
def _compile_main(self, dev_mode=False):
if self.debug is None:
debug = dev_mode
else:
debug = self.debug
content = ''
if not self.exclude_main_libs:
content += _LOAD_PYJSLIB
if self.main_module is not None:
content += '\n\n'
if debug:
content += 'try {\n'
content += ' try {\n'
content += ' $pyjs.in_try_except += 1;\n '
content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
if debug:
content += _HANDLE_EXCEPTIONS
return content
def _collect_all_modules(self):
"""Collect modules, so we can handle imports later"""
for pkgroot in self.path:
pkgroot = os.path.abspath(pkgroot)
for root, dirs, files in os.walk(pkgroot):
if '__init__.py' in files:
files.remove('__init__.py')
# The root __init__.py is ignored
if root != pkgroot:
files.insert(0, '__init__.py')
elif root != pkgroot:
# Only add valid Python packages
dirs[:] = []
continue
for filename in files:
if not filename.endswith('.py'):
continue
path = os.path.join(root, filename)
- module_path = path[len(pkgroot)+len(os.sep):]
+ module_path = path[len(pkgroot) + len(os.sep):]
if os.path.basename(module_path) == '__init__.py':
module_name = os.path.dirname(module_path)
else:
module_name = module_path[:-3]
assert '.' not in module_name, \
'Invalid module file name: %s' % module_path
module_name = module_name.replace(os.sep, '.')
self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/generators/bundles/itercompat.py b/mediagenerator/generators/bundles/itercompat.py
index 2315821..4080453 100644
--- a/mediagenerator/generators/bundles/itercompat.py
+++ b/mediagenerator/generators/bundles/itercompat.py
@@ -1,12 +1,12 @@
def product(*args, **kwds):
"""
Taken from http://docs.python.org/library/itertools.html#itertools.product
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
- result = [x+[y] for x in result for y in pool]
+ result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
diff --git a/mediagenerator/generators/copyfiles.py b/mediagenerator/generators/copyfiles.py
index 33eac69..f275767 100644
--- a/mediagenerator/generators/copyfiles.py
+++ b/mediagenerator/generators/copyfiles.py
@@ -1,43 +1,43 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
for root_path, dirs, files in os.walk(root):
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
path = os.path.join(root_path, file)
- media_path = path[len(root)+1:].replace(os.sep, '/')
+ media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 22a3b21..bcb3217 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,49 +1,49 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
- MAX_AGE = 60*60*24*365
+ MAX_AGE = 60 * 60 * 24 * 365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
response = HttpResponse(content, content_type=mimetype)
response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
index 2992920..b1e6a26 100644
--- a/mediagenerator/settings.py
+++ b/mediagenerator/settings.py
@@ -1,31 +1,32 @@
from django.conf import settings
import os
import __main__
-default_map_file_path = '_generated_media_names.py'
-default_media_dir = '_generated_media'
-if hasattr(__main__,"__file__"):# __main__ is not guaranteed to have the __file__ attribute
- default_map_file_path = os.path.join(os.path.dirname(__main__.__file__), default_map_file_path)
- default_media_dir = os.path.join(os.path.dirname(__main__.__file__), default_media_dir)
+_map_file_path = '_generated_media_names.py'
+_media_dir = '_generated_media'
+# __main__ is not guaranteed to have the __file__ attribute
+if hasattr(__main__, '__file__'):
+ _root = os.path.dirname(__main__.__file__)
+ _map_file_path = os.path.join(_root, _map_file_path)
+ _media_dir = os.path.join(_root, _media_dir)
+GENERATED_MEDIA_DIR = os.path.abspath(getattr(settings, 'GENERATED_MEDIA_DIR',
+ _media_dir))
+GENERATED_MEDIA_MAP_FILE = os.path.abspath(_map_file_path)
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
))
-GENERATED_MEDIA_DIR = os.path.abspath(default_media_dir)
-
-GENERATED_MEDIA_MAP_FILE = os.path.abspath(default_map_file_path)
-
GLOBAL_MEDIA_DIRS = getattr(settings, 'GLOBAL_MEDIA_DIRS',
getattr(settings, 'STATICFILES_DIRS', ()))
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
-MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
\ No newline at end of file
+MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 32d10be..07cf9c3 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,137 +1,138 @@
from . import settings as media_settings
from .settings import GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL, \
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
try:
from _generated_media_names import NAMES
except ImportError:
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
raise ImportError('Could not import _generated_media_names. This '
'file is needed for production mode. Please '
'run manage.py generatemedia to create it.')
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
- media_dirs = [os.path.abspath(root) for root in GLOBAL_MEDIA_DIRS]
+ media_dirs = [os.path.normcase(os.path.normpath(root))
+ for root in GLOBAL_MEDIA_DIRS]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in ('static', 'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | 305d468139e84eb8c66ff638fb784a974659e406 | experimental support for Compass features in Sass | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index c00ebf6..0bed5c9 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,130 +1,131 @@
Changelog
=============================================================
-Version 1.9.3 (in development)
+Version 1.10 (in development)
-------------------------------------------------------------
+* Added Compass support to Sass filter. You now have to install both Compass and Sass. Import Sass/Compass frameworks via ``manage.py importsassframeworks``.
* Fixed CoffeeScript support on OSX
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
-* Fixed typo which resulted in broken support for .html assets. Thanks to "pendletongp" for the patch.
+* Fixed typo which resulted in broken support for ``.html`` assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/MANIFEST.in b/MANIFEST.in
index b98334f..67b09c9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,8 +1,8 @@
include LICENSE
include CHANGELOG.rst
include README.rst
-recursive-include mediagenerator *.html *.manifest *.gif *.jpg *.jpeg *.png *.js *.css *.sass
+recursive-include mediagenerator *.html *.manifest *.gif *.jpg *.jpeg *.png *.js *.css *.sass *.rb
recursive-include mediagenerator/filters/pyjslibs *.py
recursive-include base_project *.py *.html *.manifest *.gif *.jpg *.jpeg *.png *.js *.css *.sass
prune base_project _generated_media_names.py
prune base_project/_generated_media
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index a82bb12..f2c4d25 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,141 +1,146 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
+SASS_FRAMEWORKS = getattr(settings, 'SASS_FRAMEWORKS', ())
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
- run = ['sass', '-C', '-t', 'expanded']
+ extensions = os.path.join(os.path.dirname(__file__), 'sass_compass.rb')
+ run = ['sass', '-C', '-t', 'expanded', '--require', extensions]
+ for framework in SASS_FRAMEWORKS:
+ run.extend(('--require', framework))
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
try:
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = cmd.communicate('@import %s' % self.main_module)
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
return output
except Exception, e:
raise ValueError("Failed to execute Sass. Please make sure that "
- "you have installed Sass (http://sass-lang.com).\n"
+ "you have installed Sass (http://sass-lang.com) and "
+ "Compass (http://compass-style.org).\n"
"Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
fp = open(path, 'r')
source = fp.read()
fp.close()
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/mediagenerator/filters/sass_compass.rb b/mediagenerator/filters/sass_compass.rb
new file mode 100644
index 0000000..434e0ca
--- /dev/null
+++ b/mediagenerator/filters/sass_compass.rb
@@ -0,0 +1,65 @@
+require "compass"
+
+module Compass::SassExtensions::Functions::Urls
+
+ def stylesheet_url(path, only_path = Sass::Script::Bool.new(false))
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def font_url(path, only_path = Sass::Script::Bool.new(false))
+ path = path.value # get to the string value of the literal.
+
+ # Short curcuit if they have provided an absolute url.
+ if absolute_path?(path)
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ def image_url(path, only_path = Sass::Script::Bool.new(false))
+ print "#{@options}\n"
+ path = path.value # get to the string value of the literal.
+
+ if absolute_path?(path)
+ # Short curcuit if they have provided an absolute url.
+ return Sass::Script::String.new("url(#{path})")
+ end
+
+ if only_path.to_bool
+ Sass::Script::String.new(clean_path(path))
+ else
+ clean_url(path)
+ end
+ end
+
+ private
+
+ # Emits a path, taking off any leading "./"
+ def clean_path(url)
+ url = url.to_s
+ url = url[0..1] == "./" ? url[2..-1] : url
+ end
+
+ # Emits a url, taking off any leading "./"
+ def clean_url(url)
+ Sass::Script::String.new("url('#{clean_path(url)}')")
+ end
+
+ def absolute_path?(path)
+ path[0..0] == "/" || path[0..3] == "http"
+ end
+
+end
+
+module Sass::Script::Functions
+ include Compass::SassExtensions::Functions::Urls
+end
diff --git a/mediagenerator/filters/sass_paths.rb b/mediagenerator/filters/sass_paths.rb
new file mode 100644
index 0000000..9749ffe
--- /dev/null
+++ b/mediagenerator/filters/sass_paths.rb
@@ -0,0 +1,11 @@
+require "sass"
+require "compass"
+
+ARGV.each do |arg|
+ require arg
+end
+
+Compass::Frameworks::ALL.each do |framework|
+ next if framework.name =~ /^_/
+ print "#{File.expand_path(framework.stylesheets_directory)}\n"
+end
diff --git a/mediagenerator/management/commands/importsassframeworks.py b/mediagenerator/management/commands/importsassframeworks.py
new file mode 100644
index 0000000..3087ff8
--- /dev/null
+++ b/mediagenerator/management/commands/importsassframeworks.py
@@ -0,0 +1,70 @@
+from ...filters import sass
+from ...utils import get_media_dirs
+from django.core.management.base import NoArgsCommand
+from subprocess import Popen, PIPE
+import os
+import shutil
+import sys
+import __main__
+
+_frameworks_dir = 'imported-sass-frameworks'
+if hasattr(__main__, '__file__'):
+ _root = os.path.dirname(__main__.__file__)
+ _frameworks_dir = os.path.join(_root, _frameworks_dir)
+FRAMEWORKS_DIR = os.path.normcase(os.path.abspath(_frameworks_dir))
+
+PATHS_SCRIPT = os.path.join(os.path.dirname(sass.__file__), 'sass_paths.rb')
+
+def copy_children(src, dst):
+ for item in os.listdir(src):
+ path = os.path.join(src, item)
+ copy_fs_node(path, dst)
+
+def copy_fs_node(src, dst):
+ basename = os.path.basename(src)
+ dst = os.path.join(dst, basename)
+ if os.path.isfile(src):
+ shutil.copy(src, dst)
+ elif os.path.isdir(src):
+ shutil.copytree(src, dst)
+ else:
+ raise ValueError("Don't know how to copy file system node: %s" % src)
+
+class Command(NoArgsCommand):
+ help = 'Copies Sass/Compass frameworks into the current project.'
+
+ requires_model_validation = False
+
+ def handle_noargs(self, **options):
+ if os.path.exists(FRAMEWORKS_DIR):
+ shutil.rmtree(FRAMEWORKS_DIR)
+ os.mkdir(FRAMEWORKS_DIR)
+ for path in self.get_framework_paths():
+ copy_children(path, FRAMEWORKS_DIR)
+
+ if FRAMEWORKS_DIR not in get_media_dirs():
+ sys.stderr.write('Please add the "%(dir)s" '
+ 'folder to your GLOBAL_MEDIA_DIRS setting '
+ 'like this:\n\n'
+ 'GLOBAL_MEDIA_DIRS = (\n'
+ ' ...\n'
+ " os.path.join(os.path.dirname(__file__),\n"
+ " '%(dir)s'),\n"
+ " ...\n"
+ ")\n" % {'dir': os.path.basename(FRAMEWORKS_DIR)})
+
+ def get_framework_paths(self):
+ run = ['ruby', PATHS_SCRIPT]
+ run.extend(sass.SASS_FRAMEWORKS)
+ try:
+ cmd = Popen(run, universal_newlines=True,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ output, error = cmd.communicate()
+ assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
+ return map(os.path.abspath, filter(None, output.split('\n')))
+ except Exception, e:
+ raise ValueError("Failed to execute an internal Ruby script. "
+ "Please make sure that you have installed Ruby "
+ "(http://ruby-lang.org), Sass (http://sass-lang.com), and "
+ "Compass (http://compass-style.org).\n"
+ "Error was: %s" % e)
|
adieu/django-mediagenerator | f6837c2143615e5b3a2d4087a71f804957723f67 | fixed support for latest CoffeeScript version | diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index e435552..c7f60c2 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,65 +1,65 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
import os
import sys
-class CoffeeScript(Filter):
+class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
def _regenerate(self, debug=False):
path = find_file(self.module)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
fp = open(path, 'r')
source = fp.read()
fp.close()
self._compiled = self._compile(source, debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
shell = sys.platform == 'win32'
- cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
+ cmd = Popen(['coffee', '--compile', '--print', '--stdio', '--bare'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | 5375a6d784507766eb866254a3242c37c2d72fa7 | added changelog entry | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 2472412..c00ebf6 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,129 +1,130 @@
Changelog
=============================================================
Version 1.9.3 (in development)
-------------------------------------------------------------
* Fixed CoffeeScript support on OSX
* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for .html assets. Thanks to "pendletongp" for the patch.
* Now showing instructive error message when Sass can't be found
+* Use correct output path for ``_generated_media_names.py`` even when ``manage.py generatemedia`` is not started from the project root. Thanks to "pendletongp" for the patch.
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
|
adieu/django-mediagenerator | 4df8335811afaa8bb00dc6902ff1bc96cdecb5d5 | reverted the change to utils.py because that's not compatible with App Engine | diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 46b8dce..32d10be 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,146 +1,137 @@
from . import settings as media_settings
-from .settings import GLOBAL_MEDIA_DIRS, GENERATED_MEDIA_MAP_FILE, PRODUCTION_MEDIA_URL, \
+from .settings import GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL, \
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
-from imp import load_source
+
try:
- load_source('GENERATED_MEDIA_MAP_FILE_MODULE', GENERATED_MEDIA_MAP_FILE)
- from GENERATED_MEDIA_MAP_FILE_MODULE import NAMES
-except IOError:
- NAMES = None
+ from _generated_media_names import NAMES
except ImportError:
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
- if os.path.isfile(GENERATED_MEDIA_MAP_FILE):
- raise ImportError('Could not import NAMES from the map file: %s. '
- 'NAMES is needed for production mode. Please '
- 'run manage.py generatemedia to create it.' % GENERATED_MEDIA_MAP_FILE)
- else:
- raise IOError('Could not open the map file with path: %s. This '
+ raise ImportError('Could not import _generated_media_names. This '
'file is needed for production mode. Please '
- 'run manage.py generatemedia to create it. '
- 'If the file exists, you must adjust the permissions.' % GENERATED_MEDIA_MAP_FILE)
+ 'run manage.py generatemedia to create it.')
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
media_dirs = [os.path.abspath(root) for root in GLOBAL_MEDIA_DIRS]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in ('static', 'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | e136913aa21f0cd7587680aca782bb0a37d86d7f | clean fork for issue #7 | diff --git a/mediagenerator/api.py b/mediagenerator/api.py
index bf6d9ec..bafea1f 100644
--- a/mediagenerator/api.py
+++ b/mediagenerator/api.py
@@ -1,42 +1,42 @@
from . import settings, utils
-from .settings import GENERATED_MEDIA_DIR, MEDIA_GENERATORS
+from .settings import GENERATED_MEDIA_DIR, GENERATED_MEDIA_MAP_FILE, MEDIA_GENERATORS
from .utils import load_backend
from django.utils.http import urlquote
import os
import shutil
def generate_media():
if os.path.exists(GENERATED_MEDIA_DIR):
shutil.rmtree(GENERATED_MEDIA_DIR)
# This will make media_url() generate production URLs
was_dev_mode = settings.MEDIA_DEV_MODE
settings.MEDIA_DEV_MODE = False
utils.NAMES = {}
for backend_name in MEDIA_GENERATORS:
backend = load_backend(backend_name)()
for key, url, content in backend.get_output():
version = backend.generate_version(key, url, content)
if version:
base, ext = os.path.splitext(url)
url = '%s-%s%s' % (base, version, ext)
path = os.path.join(GENERATED_MEDIA_DIR, url)
parent = os.path.dirname(path)
if not os.path.exists(parent):
os.makedirs(parent)
fp = open(path, 'wb')
fp.write(content)
fp.close()
utils.NAMES[key] = urlquote(url)
settings.MEDIA_DEV_MODE = was_dev_mode
# Generate a module with media file name mappings
- fp = open('_generated_media_names.py', 'w')
+ fp = open(GENERATED_MEDIA_MAP_FILE, 'w')
fp.write('NAMES = %r' % utils.NAMES)
fp.close()
diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
index 96fb276..2992920 100644
--- a/mediagenerator/settings.py
+++ b/mediagenerator/settings.py
@@ -1,23 +1,31 @@
from django.conf import settings
import os
+import __main__
+
+default_map_file_path = '_generated_media_names.py'
+default_media_dir = '_generated_media'
+if hasattr(__main__,"__file__"):# __main__ is not guaranteed to have the __file__ attribute
+ default_map_file_path = os.path.join(os.path.dirname(__main__.__file__), default_map_file_path)
+ default_media_dir = os.path.join(os.path.dirname(__main__.__file__), default_media_dir)
DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
'mediagenerator.generators.copyfiles.CopyFiles',
'mediagenerator.generators.bundles.Bundles',
'mediagenerator.generators.manifest.Manifest',
))
-GENERATED_MEDIA_DIR = os.path.abspath(
- getattr(settings, 'GENERATED_MEDIA_DIR', '_generated_media'))
+GENERATED_MEDIA_DIR = os.path.abspath(default_media_dir)
+
+GENERATED_MEDIA_MAP_FILE = os.path.abspath(default_map_file_path)
GLOBAL_MEDIA_DIRS = getattr(settings, 'GLOBAL_MEDIA_DIRS',
getattr(settings, 'STATICFILES_DIRS', ()))
IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
('django.contrib.admin',))
-MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
+MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
\ No newline at end of file
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
index 32d10be..46b8dce 100644
--- a/mediagenerator/utils.py
+++ b/mediagenerator/utils.py
@@ -1,137 +1,146 @@
from . import settings as media_settings
-from .settings import GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL, \
+from .settings import GLOBAL_MEDIA_DIRS, GENERATED_MEDIA_MAP_FILE, PRODUCTION_MEDIA_URL, \
IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.utils.http import urlquote
import os
import re
-
+from imp import load_source
try:
- from _generated_media_names import NAMES
+ load_source('GENERATED_MEDIA_MAP_FILE_MODULE', GENERATED_MEDIA_MAP_FILE)
+ from GENERATED_MEDIA_MAP_FILE_MODULE import NAMES
+except IOError:
+ NAMES = None
except ImportError:
NAMES = None
_backends_cache = {}
_media_dirs_cache = []
_generators_cache = []
_generated_names = {}
_backend_mapping = {}
def _load_generators():
if not _generators_cache:
for name in MEDIA_GENERATORS:
backend = load_backend(name)()
_generators_cache.append(backend)
return _generators_cache
def _refresh_dev_names():
_generated_names.clear()
_backend_mapping.clear()
for backend in _load_generators():
for key, url, hash in backend.get_dev_output_names():
versioned_url = urlquote(url)
if hash:
versioned_url += '?version=' + hash
_generated_names.setdefault(key, [])
_generated_names[key].append(versioned_url)
_backend_mapping[url] = backend
class _MatchNothing(object):
def match(self, content):
return False
def prepare_patterns(patterns, setting_name):
"""Helper function for patter-matching settings."""
if isinstance(patterns, basestring):
patterns = (patterns,)
if not patterns:
return _MatchNothing()
# First validate each pattern individually
for pattern in patterns:
try:
re.compile(pattern, re.U)
except re.error:
raise ValueError("""Pattern "%s" can't be compiled """
"in %s" % (pattern, setting_name))
# Now return a combined pattern
return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
def get_production_mapping():
if NAMES is None:
- raise ImportError('Could not import _generated_media_names. This '
+ if os.path.isfile(GENERATED_MEDIA_MAP_FILE):
+ raise ImportError('Could not import NAMES from the map file: %s. '
+ 'NAMES is needed for production mode. Please '
+ 'run manage.py generatemedia to create it.' % GENERATED_MEDIA_MAP_FILE)
+ else:
+ raise IOError('Could not open the map file with path: %s. This '
'file is needed for production mode. Please '
- 'run manage.py generatemedia to create it.')
+ 'run manage.py generatemedia to create it. '
+ 'If the file exists, you must adjust the permissions.' % GENERATED_MEDIA_MAP_FILE)
return NAMES
def get_media_mapping():
if media_settings.MEDIA_DEV_MODE:
return _generated_names
return get_production_mapping()
def get_media_url_mapping():
if media_settings.MEDIA_DEV_MODE:
base_url = DEV_MEDIA_URL
else:
base_url = PRODUCTION_MEDIA_URL
mapping = {}
for key, value in get_media_mapping().items():
if isinstance(value, basestring):
value = (value,)
mapping[key] = [base_url + url for url in value]
return mapping
def media_urls(key, refresh=False):
if media_settings.MEDIA_DEV_MODE:
if refresh:
_refresh_dev_names()
return [DEV_MEDIA_URL + url for url in _generated_names[key]]
return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
def media_url(key, refresh=False):
urls = media_urls(key, refresh=refresh)
if len(urls) == 1:
return urls[0]
raise ValueError('media_url() only works with URLs that contain exactly '
'one file. Use media_urls() (or {% include_media %} in templates) instead.')
def get_media_dirs():
if not _media_dirs_cache:
media_dirs = [os.path.abspath(root) for root in GLOBAL_MEDIA_DIRS]
for app in settings.INSTALLED_APPS:
if app in IGNORE_APP_MEDIA_DIRS:
continue
for name in ('static', 'media'):
app_root = os.path.dirname(import_module(app).__file__)
media_dirs.append(os.path.join(app_root, name))
_media_dirs_cache.extend(media_dirs)
return _media_dirs_cache
def find_file(name, media_dirs=None):
if media_dirs is None:
media_dirs = get_media_dirs()
for root in media_dirs:
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
return path
def load_backend(backend):
if backend not in _backends_cache:
module_name, func_name = backend.rsplit('.', 1)
_backends_cache[backend] = _load_backend(backend)
return _backends_cache[backend]
def _load_backend(path):
module_name, attr_name = path.rsplit('.', 1)
try:
mod = import_module(module_name)
except (ImportError, ValueError), e:
raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
try:
return getattr(mod, attr_name)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
|
adieu/django-mediagenerator | 7925abe6e52d31b6737003d84e702fe0a477989a | fixed CoffeeScript on OS X and show better error message when Sass can't be found and include images in the source distribution zip file | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 9705c5c..2472412 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,127 +1,129 @@
Changelog
=============================================================
Version 1.9.3 (in development)
-------------------------------------------------------------
-* Added "Content-Length" response header for files served in dev mode. Thanks to "sayane" for the patch.
+* Fixed CoffeeScript support on OSX
+* Added "Content-Length" response header for files served in dev mode (needed for Flash). Thanks to "sayane" for the patch.
* Fixed typo which resulted in broken support for .html assets. Thanks to "pendletongp" for the patch.
+* Now showing instructive error message when Sass can't be found
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/MANIFEST.in b/MANIFEST.in
index d039416..b98334f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,8 +1,8 @@
include LICENSE
include CHANGELOG.rst
include README.rst
-recursive-include mediagenerator *.html *.manifest *.js *.css *.sass
+recursive-include mediagenerator *.html *.manifest *.gif *.jpg *.jpeg *.png *.js *.css *.sass
recursive-include mediagenerator/filters/pyjslibs *.py
-recursive-include base_project *.py *.html *.css *.js *.manifest *.sass
+recursive-include base_project *.py *.html *.manifest *.gif *.jpg *.jpeg *.png *.js *.css *.sass
prune base_project _generated_media_names.py
prune base_project/_generated_media
diff --git a/README.rst b/README.rst
index 1d0b09b..1cbcfc7 100644
--- a/README.rst
+++ b/README.rst
@@ -1,27 +1,27 @@
Improve your user experience with amazingly fast page loads by combining,
compressing, and versioning your JavaScript & CSS files and images.
-Eliminate unnecessary HTTP requests and maximize cache usage with
-the django-mediagenerator_ asset manager.
+django-mediagenerator_ eliminates unnecessary HTTP requests
+and maximizes cache usage.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
Most important changes in version 1.9 - 1.9.2
=============================================================
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* Fixed relative imports in Sass filter
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Added missing ``base.manifest`` to zip package
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index 939c674..e435552 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,67 +1,65 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
import os
+import sys
class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
def _regenerate(self, debug=False):
path = find_file(self.module)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
fp = open(path, 'r')
source = fp.read()
fp.close()
self._compiled = self._compile(source, debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
- # coffee
- # -s = Read from stdin for the source
- # -c = Compile
- # -p = print the compiled output to stdout
+ shell = sys.platform == 'win32'
cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
- shell=True, universal_newlines=True)
+ shell=shell, universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index 064acea..a82bb12 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,138 +1,141 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file
from subprocess import Popen, PIPE
import os
import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
run = ['sass', '-C', '-t', 'expanded']
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
- cmd = Popen(run, shell=shell, universal_newlines=True,
- stdin=PIPE, stdout=PIPE, stderr=PIPE)
- output, error = cmd.communicate('@import %s' % self.main_module)
- assert cmd.wait() == 0, ('Sass command returned bad result (did you '
- 'install Sass? http://sass-lang.com):\n%s'
- % error)
- return output
+ try:
+ cmd = Popen(run, shell=shell, universal_newlines=True,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ output, error = cmd.communicate('@import %s' % self.main_module)
+ assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
+ return output
+ except Exception, e:
+ raise ValueError("Failed to execute Sass. Please make sure that "
+ "you have installed Sass (http://sass-lang.com).\n"
+ "Error was: %s" % e)
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
fp = open(path, 'r')
source = fp.read()
fp.close()
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
diff --git a/setup.py b/setup.py
index 6b23460..c97bf50 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
-DESCRIPTION = 'Asset manager for Django: Boost your website'
+DESCRIPTION = 'Asset manager for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
version='1.9.2',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | e21e497554efd131cc449b208ed4e4dd4a35886f | added changelog entries for last two commits | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index bf4019e..9705c5c 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,121 +1,127 @@
Changelog
=============================================================
+Version 1.9.3 (in development)
+-------------------------------------------------------------
+
+* Added "Content-Length" response header for files served in dev mode. Thanks to "sayane" for the patch.
+* Fixed typo which resulted in broken support for .html assets. Thanks to "pendletongp" for the patch.
+
Version 1.9.2
-------------------------------------------------------------
* Added missing ``base.manifest`` template and ``base_project`` to zip package
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
|
adieu/django-mediagenerator | 016f9db52289ed5ae0a6613d1eaf643c0317caef | add "Content-Length" response header when serving files in dev mode. thanks to "sayane" for the patch | diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
index 4d2fdaf..22a3b21 100644
--- a/mediagenerator/middleware.py
+++ b/mediagenerator/middleware.py
@@ -1,48 +1,49 @@
from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
# Only load other dependencies if they're needed
if MEDIA_DEV_MODE:
from .utils import _refresh_dev_names, _backend_mapping
from django.http import HttpResponse, Http404
from django.utils.cache import patch_cache_control
from django.utils.http import http_date
import time
class MediaMiddleware(object):
"""
Middleware for serving and browser-side caching of media files.
This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
other middleware might add ETags or otherwise manipulate the caching
headers which would result in the browser doing unnecessary HTTP
roundtrips for unchanged media.
"""
MAX_AGE = 60*60*24*365
def process_request(self, request):
if not MEDIA_DEV_MODE:
return
# We refresh the dev names only once for the whole request, so all
# media_url() calls are cached.
_refresh_dev_names()
if not request.path.startswith(DEV_MEDIA_URL):
return
filename = request.path[len(DEV_MEDIA_URL):]
try:
backend = _backend_mapping[filename]
except KeyError:
raise Http404('No such media file "%s"' % filename)
content, mimetype = backend.get_dev_output(filename)
response = HttpResponse(content, content_type=mimetype)
+ response['Content-Length'] = len(content)
# Cache manifest files MUST NEVER be cached or you'll be unable to update
# your cached app!!!
if response['Content-Type'] != 'text/cache-manifest' and \
response.status_code == 200:
patch_cache_control(response, public=True, max_age=self.MAX_AGE)
response['Expires'] = http_date(time.time() + self.MAX_AGE)
return response
|
adieu/django-mediagenerator | a2c27ac2d4742c97cc1c82775dcd7a80d02d3eed | fixed typo. thanks to "pendletongp" bitbucket user for the bug report | diff --git a/mediagenerator/generators/bundles/settings.py b/mediagenerator/generators/bundles/settings.py
index 0ed8c2f..eb7741c 100644
--- a/mediagenerator/generators/bundles/settings.py
+++ b/mediagenerator/generators/bundles/settings.py
@@ -1,25 +1,25 @@
from django.conf import settings
DEFAULT_MEDIA_FILTERS = getattr(settings, 'DEFAULT_MEDIA_FILTERS', {
'ccss': 'mediagenerator.filters.clevercss.CleverCSS',
'coffee': 'mediagenerator.filters.coffeescript.CoffeeScript',
'css': 'mediagenerator.filters.cssurl.CSSURLFileFilter',
- 'html': 'mediageneraator.filters.template.Template',
+ 'html': 'mediagenerator.filters.template.Template',
'py': 'mediagenerator.filters.pyjs_filter.Pyjs',
'pyva': 'mediagenerator.filters.pyvascript_filter.PyvaScript',
'sass': 'mediagenerator.filters.sass.Sass',
'scss': 'mediagenerator.filters.sass.Sass',
})
ROOT_MEDIA_FILTERS = getattr(settings, 'ROOT_MEDIA_FILTERS', {})
# These are applied in addition to ROOT_MEDIA_FILTERS.
# The separation is done because we don't want users to
# always specify the default filters when they merely want
# to configure YUICompressor or Closure.
BASE_ROOT_MEDIA_FILTERS = getattr(settings, 'BASE_ROOT_MEDIA_FILTERS', {
'*': 'mediagenerator.filters.concat.Concat',
'css': 'mediagenerator.filters.cssurl.CSSURL',
})
MEDIA_BUNDLES = getattr(settings, 'MEDIA_BUNDLES', ())
|
adieu/django-mediagenerator | 17d1f458118d9a82c68cc4015c5adf355ad5e1d6 | added missing files to zip package | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 2807b20..bf4019e 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,116 +1,121 @@
Changelog
=============================================================
+Version 1.9.2
+-------------------------------------------------------------
+
+* Added missing ``base.manifest`` template and ``base_project`` to zip package
+
Version 1.9.1
-------------------------------------------------------------
* Fixed relative imports in Sass filter
Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/MANIFEST.in b/MANIFEST.in
index 60d3980..d039416 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,6 +1,8 @@
include LICENSE
include CHANGELOG.rst
include README.rst
+recursive-include mediagenerator *.html *.manifest *.js *.css *.sass
recursive-include mediagenerator/filters/pyjslibs *.py
+recursive-include base_project *.py *.html *.css *.js *.manifest *.sass
+prune base_project _generated_media_names.py
prune base_project/_generated_media
-prune base_project/_generated_media_names.py
diff --git a/README.rst b/README.rst
index ab8a06b..1d0b09b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,27 +1,27 @@
-Improve your user experience with amazingly fast page load times by
-combining, compressing, and versioning your JavaScript & CSS files and
-images. Eliminate unnecessary HTTP requests and maximize cache usage for
-instant page loads with django-mediagenerator_, the most complete
-asset manager for Django.
+Improve your user experience with amazingly fast page loads by combining,
+compressing, and versioning your JavaScript & CSS files and images.
+Eliminate unnecessary HTTP requests and maximize cache usage with
+the django-mediagenerator_ asset manager.
Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
-Most important changes in version 1.9.1 and 1.9
+Most important changes in version 1.9 - 1.9.2
=============================================================
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* Fixed relative imports in Sass filter
* Fixed i18n filter in development mode. Contributed by Simon Payne.
+* Added missing ``base.manifest`` to zip package
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _CoffeeScript: http://coffeescript.org/
.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/setup.py b/setup.py
index d328769..6b23460 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Asset manager for Django: Boost your website'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.9.1',
+ version='1.9.2',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | b0741f084651304a4f33a6aa82dd203ae615749a | let's not overdo it :) | diff --git a/setup.py b/setup.py
index 4163a60..d328769 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
-DESCRIPTION = 'Asset manager for Django: Boost your website!'
+DESCRIPTION = 'Asset manager for Django: Boost your website'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
version='1.9.1',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | b34b6911b0803dbc95795ca283ab4050db2e15b4 | improved bla bla :) | diff --git a/README.rst b/README.rst
index 91f8745..ab8a06b 100644
--- a/README.rst
+++ b/README.rst
@@ -1,35 +1,27 @@
-django-mediagenerator_ is an asset manager for Django.
-With django-mediagenerator you can combine and compress your JS
-and CSS files. All files (including images) are versioned, so they
-can be efficiently cached with far-future expires.
+Improve your user experience with amazingly fast page load times by
+combining, compressing, and versioning your JavaScript & CSS files and
+images. Eliminate unnecessary HTTP requests and maximize cache usage for
+instant page loads with django-mediagenerator_, the most complete
+asset manager for Django.
-The media generator works in sandboxed environments like App Engine.
-It supports Sass_, HTML5 offline manifests, Jinja2,
-Python (via pyjs_/Pyjamas), PyvaScript_, and much more. Visit the
+Supports App Engine, Sass_, HTML5 offline manifests, Jinja2_,
+Python/pyjs_, CoffeeScript_, and much more. Visit the
`project site`_ for more information.
-What's new in version 1.9.1
-=============================================================
-
-* Fixed relative imports in Sass filter
-
-What's new in version 1.9
+Most important changes in version 1.9.1 and 1.9
=============================================================
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
-* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
-* By default ``.woff`` files are now copied, too
-* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
+* Fixed relative imports in Sass filter
* Fixed i18n filter in development mode. Contributed by Simon Payne.
-* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
-* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
-.. _PyvaScript: http://www.allbuttonspressed.com/projects/pyvascript
+.. _CoffeeScript: http://coffeescript.org/
+.. _Jinja2: http://jinja.pocoo.org/
.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/setup.py b/setup.py
index 2e91425..4163a60 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
-DESCRIPTION = 'Total asset management for Django. Combine and compress your JavaScript and CSS.'
+DESCRIPTION = 'Asset manager for Django: Boost your website!'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
version='1.9.1',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | bbed2c337fda43e0fbfdde7d25689ba702a7db63 | fixed docstring | diff --git a/mediagenerator/generators/bundles/base.py b/mediagenerator/generators/bundles/base.py
index c026a9d..fea56f5 100644
--- a/mediagenerator/generators/bundles/base.py
+++ b/mediagenerator/generators/bundles/base.py
@@ -1,195 +1,194 @@
from .settings import DEFAULT_MEDIA_FILTERS
from hashlib import sha1
from mediagenerator.utils import load_backend, find_file
import os
class Filter(object):
takes_input = True
def __init__(self, **kwargs):
self.file_filter = FileFilter
self.config(kwargs, filetype=None, filter=None,
bundle=None, _from_default=None)
# We assume that if this is e.g. a 'js' backend then all input must
# also be 'js'. Subclasses must override this if they expect a special
# input file type. Also, subclasses have to check if their file type
# is supported.
self.input_filetype = self.filetype
if self.takes_input:
self.config(kwargs, input=())
if not isinstance(self.input, (tuple, list)):
self.input = (self.input,)
self._input_filters = None
assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys())
@classmethod
def from_default(cls, name):
return {'input': name}
def should_use_default_filter(self, ext):
return ext != self._from_default
def get_variations(self):
"""
Returns all possible variations that get generated by this filter.
The result must be a dict whose values are tuples.
"""
return {}
def get_output(self, variation):
"""
- Yields file-like objects with content for each output item for the
- given variation.
+ Yields content for each output item for the given variation.
"""
raise NotImplementedError()
def get_dev_output(self, name, variation):
"""
Returns content for the given file name and variation in development mode.
"""
index, child = name.split('/', 1)
index = int(index)
filter = self.get_input_filters()[index]
return filter.get_dev_output(child, variation)
def get_dev_output_names(self, variation):
"""
Yields file names for the given variation in development mode.
"""
# By default we simply return our input filters' file names
for index, filter in enumerate(self.get_input_filters()):
for name, hash in filter.get_dev_output_names(variation):
yield '%d/%s' % (index, name), hash
def get_input(self, variation):
"""Yields contents for each input item."""
for filter in self.get_input_filters():
for input in filter.get_output(variation):
yield input
def get_input_filters(self):
"""Returns a Filter instance for each input item."""
if not self.takes_input:
raise ValueError("The %s media filter doesn't take any input" %
self.__class__.__name__)
if self._input_filters is not None:
return self._input_filters
self._input_filters = []
for input in self.input:
if isinstance(input, dict):
filter = self.get_filter(input)
else:
filter = self.get_item(input)
self._input_filters.append(filter)
return self._input_filters
def get_filter(self, config):
backend_class = load_backend(config.get('filter'))
return backend_class(filetype=self.input_filetype, bundle=self.bundle,
**config)
def get_item(self, name):
ext = os.path.splitext(name)[1].lstrip('.')
if ext in DEFAULT_MEDIA_FILTERS and self.should_use_default_filter(ext):
backend_class = load_backend(DEFAULT_MEDIA_FILTERS[ext])
else:
backend_class = self.file_filter
config = backend_class.from_default(name)
config.setdefault('filter',
'%s.%s' % (backend_class.__module__, backend_class.__name__))
config.setdefault('filetype', self.input_filetype)
config['bundle'] = self.bundle
# This is added to make really sure we don't instantiate the same
# filter in an endless loop. Normally, the child class should
# take care of this in should_use_default_filter().
config.setdefault('_from_default', ext)
return backend_class(**config)
def _get_variations_with_input(self):
"""Utility function to get variations including input variations"""
variations = self.get_variations()
if not self.takes_input:
return variations
for filter in self.get_input_filters():
subvariations = filter._get_variations_with_input()
for k, v in subvariations.items():
if k in variations and v != variations[k]:
raise ValueError('Conflicting variations for "%s": %r != %r' % (
k, v, variations[k]))
variations.update(subvariations)
return variations
def config(self, init, **defaults):
for key in defaults:
setattr(self, key, init.pop(key, defaults[key]))
class FileFilter(Filter):
"""A filter that just returns the given file."""
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, name=None)
self.mtime = self.hash = None
super(FileFilter, self).__init__(**kwargs)
@classmethod
def from_default(cls, name):
return {'name': name}
def get_output(self, variation):
yield self.get_dev_output(self.name, variation)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
path = self._get_path()
fp = open(path, 'r')
output = fp.read()
fp.close()
return output
def get_dev_output_names(self, variation):
path = self._get_path()
mtime = os.path.getmtime(path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
hash = sha1(output).hexdigest()
else:
hash = self.hash
yield self.name, hash
def _get_path(self):
path = find_file(self.name)
assert path, """File name "%s" doesn't exist.""" % self.name
return path
class RawFileFilter(FileFilter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=None)
super(RawFileFilter, self).__init__(**kwargs)
def get_dev_output(self, name, variation):
assert name == self.name, (
'''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
% (name, self.name))
fp = open(self.path, 'r')
output = fp.read()
fp.close()
return output
def get_dev_output_names(self, variation):
mtime = os.path.getmtime(self.path)
if mtime != self.mtime:
output = self.get_dev_output(self.name, variation)
hash = sha1(output).hexdigest()
else:
hash = self.hash
yield self.name, hash
|
adieu/django-mediagenerator | 19915dd3104339047319f858244f2c603b3958f3 | bumped version | diff --git a/setup.py b/setup.py
index 1a4b2b8..2e91425 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Total asset management for Django. Combine and compress your JavaScript and CSS.'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.9',
+ version='1.9.1',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 26eb0f06501966b726c1a8a9b9c24b78ed7ce105 | fixed relative imports in Sass filter | diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
index cc81640..064acea 100644
--- a/mediagenerator/filters/sass.py
+++ b/mediagenerator/filters/sass.py
@@ -1,131 +1,138 @@
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import get_media_dirs, find_file
from subprocess import Popen, PIPE
import os
+import posixpath
import re
import sys
# Emits extra debug info that can be used by the FireSass Firebug plugin
SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
class Sass(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
super(Sass, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Sass only supports compilation to css. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
self.path += tuple(get_media_dirs())
self.path_args = []
for path in self.path:
self.path_args.extend(('-I', path))
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module, self._compiled_hash
def _compile(self, debug=False):
run = ['sass', '-C', '-t', 'expanded']
if debug:
run.append('--line-numbers')
if SASS_DEBUG_INFO:
run.append('--debug-info')
run.extend(self.path_args)
shell = sys.platform == 'win32'
cmd = Popen(run, shell=shell, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = cmd.communicate('@import %s' % self.main_module)
assert cmd.wait() == 0, ('Sass command returned bad result (did you '
'install Sass? http://sass-lang.com):\n%s'
% error)
return output
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Sass module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
fp = open(path, 'r')
source = fp.read()
fp.close()
dependencies = self._get_dependencies(source)
for name in dependencies:
- path = self._find_file(name)
+ # Try relative import, first
+ transformed = posixpath.join(posixpath.dirname(module_name), name)
+ path = self._find_file(transformed)
+ if path:
+ name = transformed
+ else:
+ path = self._find_file(name)
assert path, ('The Sass module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
self._compiled = self._compile(debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
parts = name.rsplit('/', 1)
parts[-1] = '_' + parts[-1]
partial = '/'.join(parts)
if not name.endswith(('.sass', '.scss')):
names = (name + '.sass', name + '.scss', partial + '.sass',
partial + '.scss')
else:
names = (name, partial)
for name in names:
path = find_file(name, media_dirs=self.path)
if path:
return path
|
adieu/django-mediagenerator | 37ed6448bc5f444027cd9019d4d8c6e33efcc271 | marked as release 1.9 | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 1edeb6e..2f25894 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,111 +1,111 @@
Changelog
=============================================================
-Version 1.8.1 (in development)
+Version 1.9
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/README.rst b/README.rst
index 121d2e0..732393c 100644
--- a/README.rst
+++ b/README.rst
@@ -1,24 +1,30 @@
django-mediagenerator_ is an asset manager for Django.
With django-mediagenerator you can combine and compress your JS
and CSS files. All files (including images) are versioned, so they
can be efficiently cached with far-future expires.
The media generator works in sandboxed environments like App Engine.
It supports Sass_, HTML5 offline manifests, Jinja2,
Python (via pyjs_/Pyjamas), PyvaScript_, and much more. Visit the
`project site`_ for more information.
-What's new in version 1.8
+What's new in version 1.9
=============================================================
-* HTML5 manifest now uses a regex to match included/excluded files
-* Added support for scss files
-* Fixed Sass ``@import`` tracking for partials
+* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
+* Added caching for CoffeeScript compilation results
+* In cache manifests the ``NETWORK`` section now contains "``*``" by default
+* By default ``.woff`` files are now copied, too
+* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
+* Fixed i18n filter in development mode. Contributed by Simon Payne.
+* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
+* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
-See the ``CHANGELOG.rst`` file for the complete changelog.
+See `CHANGELOG.rst`_ for the complete changelog.
.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
.. _project site: django-mediagenerator_
.. _Sass: http://sass-lang.com/
.. _pyjs: http://pyjs.org/
.. _PyvaScript: http://www.allbuttonspressed.com/projects/pyvascript
+.. _CHANGELOG.rst: https://bitbucket.org/wkornewald/django-mediagenerator/src/tip/CHANGELOG.rst
diff --git a/setup.py b/setup.py
index b0fa072..1a4b2b8 100644
--- a/setup.py
+++ b/setup.py
@@ -1,32 +1,32 @@
from setuptools import setup, find_packages
DESCRIPTION = 'Total asset management for Django. Combine and compress your JavaScript and CSS.'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except:
pass
setup(name='django-mediagenerator',
- version='1.8',
+ version='1.9',
packages=find_packages(exclude=('tests', 'tests.*',
'base_project', 'base_project.*')),
package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
author='Waldemar Kornewald',
author_email='[email protected]',
url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
platforms=['any'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
],
)
|
adieu/django-mediagenerator | 7fe0f822d25fa035405fcba6023d998bd26b17a6 | fixed remaining bugs | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index bb42aa3..1edeb6e 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,110 +1,111 @@
Changelog
=============================================================
Version 1.8.1 (in development)
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
+* Added caching for CoffeeScript compilation results
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index f80c9f2..939c674 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,67 +1,67 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
import os
class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
- assert name == self.main_module
+ assert name == self.module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
- def _regenerate(debug=False):
- path = find_file(path)
+ def _regenerate(self, debug=False):
+ path = find_file(self.module)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
fp = open(path, 'r')
source = fp.read()
fp.close()
self._compiled = self._compile(source, debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
# coffee
# -s = Read from stdin for the source
# -c = Compile
# -p = print the compiled output to stdout
cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=True, universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | 7494c3af23044e9379f840a2a7f9c70b4e0298ff | made coffeescript filter work on Windows (when using a .bat file) | diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index d75be1d..f80c9f2 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,67 +1,67 @@
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
import os
class CoffeeScript(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
self._compiled = None
self._compiled_hash = None
self._mtime = None
@classmethod
def from_default(cls, name):
return {'module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.module, self._compiled_hash
def _regenerate(debug=False):
path = find_file(path)
mtime = os.path.getmtime(path)
if mtime == self._mtime:
return
fp = open(path, 'r')
source = fp.read()
fp.close()
self._compiled = self._compile(source, debug=debug)
self._compiled_hash = sha1(self._compiled).hexdigest()
self._mtime = mtime
def _compile(self, input, debug=False):
try:
# coffee
# -s = Read from stdin for the source
# -c = Compile
# -p = print the compiled output to stdout
cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
- universal_newlines=True)
+ shell=True, universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
|
adieu/django-mediagenerator | 52a42a8b1d87883657a2b5a26ead48c498f3c6f6 | enable caching for CoffeeScript filter | diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
index 5e12e97..d75be1d 100644
--- a/mediagenerator/filters/coffeescript.py
+++ b/mediagenerator/filters/coffeescript.py
@@ -1,43 +1,67 @@
+from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import find_file
from subprocess import Popen, PIPE
+import os
class CoffeeScript(Filter):
+ takes_input = False
+
def __init__(self, **kwargs):
+ self.config(kwargs, module=None)
super(CoffeeScript, self).__init__(**kwargs)
assert self.filetype == 'js', (
'CoffeeScript only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
- self.input_filetype = 'coffee-script'
+ self._compiled = None
+ self._compiled_hash = None
+ self._mtime = None
+
+ @classmethod
+ def from_default(cls, name):
+ return {'module': name}
+
+ def get_output(self, variation):
+ self._regenerate(debug=False)
+ yield self._compiled
+
+ def get_dev_output(self, name, variation):
+ assert name == self.main_module
+ self._regenerate(debug=True)
+ return self._compiled
+
+ def get_dev_output_names(self, variation):
+ self._regenerate(debug=True)
+ yield self.module, self._compiled_hash
- def compile(self, input):
+ def _regenerate(debug=False):
+ path = find_file(path)
+ mtime = os.path.getmtime(path)
+ if mtime == self._mtime:
+ return
+ fp = open(path, 'r')
+ source = fp.read()
+ fp.close()
+ self._compiled = self._compile(source, debug=debug)
+ self._compiled_hash = sha1(self._compiled).hexdigest()
+ self._mtime = mtime
+
+ def _compile(self, input, debug=False):
try:
# coffee
# -s = Read from stdin for the source
# -c = Compile
# -p = print the compiled output to stdout
cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(input)
assert cmd.wait() == 0, ('CoffeeScript command returned bad '
'result:\n%s' % error)
return output
except Exception, e:
raise ValueError("Failed to run CoffeeScript compiler for this "
"file. Please confirm that the \"coffee\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
-
- def should_use_default_filter(self, ext):
- if ext == 'coffee':
- return False
- return super(CoffeeScript, self).should_use_default_filter(ext)
-
- def get_output(self, variation):
- for input in self.get_input(variation):
- yield self.compile(input)
-
- def get_dev_output(self, name, variation):
- content = super(CoffeeScript, self).get_dev_output(name, variation)
- return self.compile(content)
|
adieu/django-mediagenerator | 3bc8c76228bce37b433baeb7f6aaf389911f1060 | added changelog entry for bug fix | diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 30a4e8f..bb42aa3 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -1,109 +1,110 @@
Changelog
=============================================================
Version 1.8.1 (in development)
-------------------------------------------------------------
* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
* In cache manifests the ``NETWORK`` section now contains "``*``" by default
* By default ``.woff`` files are now copied, too
* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
* Fixed i18n filter in development mode. Contributed by Simon Payne.
+* Fixed support for "/" in bundle names in dev mode (always worked fine in production)
* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
Version 1.8
-------------------------------------------------------------
* HTML5 manifest now uses a regex to match included/excluded files
* Added support for scss files
* Fixed Sass ``@import`` tracking for partials
Version 1.7
-------------------------------------------------------------
* Large performance improvements, in particular on App Engine dev_appserver
Version 1.6.1
-------------------------------------------------------------
* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
Version 1.6
-------------------------------------------------------------
**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
* Added support for CleverCSS
* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
Version 1.5.1
-------------------------------------------------------------
**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
Version 1.5
-------------------------------------------------------------
This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
* Fixed ``MediaMiddleware``, so it doesn't cache error responses
Version 1.4
-------------------------------------------------------------
This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
Version 1.3.1
-------------------------------------------------------------
* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
Version 1.3
-------------------------------------------------------------
* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
Version 1.2.1
-------------------------------------------------------------
* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
Version 1.2
-------------------------------------------------------------
**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
* Added media_url() filter which provides access to generated URLs from JS
* CopyFiles backend can now ignore files matching certain regex patterns
Version 1.1
-------------------------------------------------------------
* Added Closure compiler backend
* Added HTML5 cache manifest file backend
* Fixed Sass support on Linux
* Updated pyjs filter to latest pyjs repo version
* "swf" and "ico" files are now copied, too, by default
|
adieu/django-mediagenerator | d711d2df5d82c5b7fb76fde6e9f4d0e176b55643 | fixed bug which caused error if "/" was in bundle name. now "|" is used as a separator which is pretty unlikely to appear in file names. also use urlquote URLs | diff --git a/.hgeol b/.hgeol
new file mode 100644
index 0000000..b2d9e3b
--- /dev/null
+++ b/.hgeol
@@ -0,0 +1,15 @@
+[patterns]
+**.txt = native
+**.pyva = native
+**.py = native
+**.c = native
+**.cpp = native
+**.cu = native
+**.h = native
+**.hpp = native
+**.tmpl = native
+**.html = native
+**.htm = native
+**.js = native
+**.manifest = native
+**.yaml = native
diff --git a/.hgignore b/.hgignore
new file mode 100644
index 0000000..097bd67
--- /dev/null
+++ b/.hgignore
@@ -0,0 +1,18 @@
+syntax: glob
+build
+dist
+*.egg-info
+.project
+.pydevproject
+.settings
+*~
+*.orig
+*.pyc
+*.pyo
+*.swp
+*.tmp
+_generated_media*
+desktop.ini
+nbproject
+settings_local.py
+sqlite.db
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..30a4e8f
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,109 @@
+Changelog
+=============================================================
+
+Version 1.8.1 (in development)
+-------------------------------------------------------------
+
+* Added CoffeeScript support (use ``.coffee`` extension). Contributed by Andrew Allen.
+* In cache manifests the ``NETWORK`` section now contains "``*``" by default
+* By default ``.woff`` files are now copied, too
+* Fixed first-time media generation when ``MEDIA_DEV_MODE=False``
+* Fixed i18n filter in development mode. Contributed by Simon Payne.
+* Changed ``DEV_MEDIA_URL`` fallback from ``STATICFILES_URL`` to ``STATIC_URL`` (has been changed in Django trunk)
+
+Version 1.8
+-------------------------------------------------------------
+
+* HTML5 manifest now uses a regex to match included/excluded files
+* Added support for scss files
+* Fixed Sass ``@import`` tracking for partials
+
+Version 1.7
+-------------------------------------------------------------
+
+* Large performance improvements, in particular on App Engine dev_appserver
+
+Version 1.6.1
+-------------------------------------------------------------
+
+* Fixed support for Django 1.1 which imports ``mediagenerator.templatetags.media`` as ``django.templatetags.media`` and thus breaks relative imports
+
+Version 1.6
+-------------------------------------------------------------
+
+**Upgrade notes:** The installation got simplified. Please remove the media code from your urls.py. The ``MediaMiddleware`` now takes care of everything.
+
+* Added support for CSS data URIs. Doesn't yet generate MHTML for IE6/7 support.
+* Added support for pre-bundling i18n JavaScript translations, so you don't need to use Django's slower AJAX view. With this filter translations are part of your generated JS bundle.
+* Added support for CleverCSS
+* Simplified installation process. The media view got completely replaced by ``MediaMiddleware``.
+* Fixed support for output variations (needed by i18n filter to generate the same JS file in different variations for each language)
+
+Version 1.5.1
+-------------------------------------------------------------
+
+**Upgrade notes:** There's a conflict with ``STATICFILES_URL`` in Django trunk (1.3). Use ``DEV_MEDIA_URL`` instead from now on.
+
+* ``DEV_MEDIA_URL`` should be used instead of ``MEDIA_URL`` and ``STATICFILES_URL``, though the other two are still valid for backwards-compatibility
+
+Version 1.5
+-------------------------------------------------------------
+
+This is another staticfiles-compatibility release which is intended to allow for writing reusable open-source apps.
+
+**Upgrade notes:** The CSS URL rewriting scheme has changed. Previously, ``url()`` statements in CSS files were treated similar to "absolute" URLs where the root is ``STATICFILES_URL`` (or ``MEDIA_URL``). This scheme was used because it was consistent with URLs in Sass. Now URLs are treated as relative to the CSS file. So, if the file ``css/style.css`` wants to link to ``img/icon.png`` the URL now has to be ``url(../img/icon.png)``. Previously it was ``url(img/icon.png)``. One way to upgrade to the staticfiles-compatible scheme is to modify your existing URLs.
+
+If you don't want to change your CSS files there is an alternative, but it's not staticfiles-compatible. Add the following to your settings: ``REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = False``
+
+**Important:** Sass files still use the old scheme (``url(img/icon.png)``) because this is **much** easier to understand and allows for more reusable code, especially when you ``@import`` other Sass modules and those link to images.
+
+* Made CSS URL rewriting system compatible with ``django.contrib.staticfiles``
+* Added support for CSS URLs that contain a hash (e.g.: ``url('webfont.svg#webfontmAfNlbV6')``). Thanks to Karl Bowden for the patch!
+* Filter backends now have an additional ``self.bundle`` attribute which contains the final bundle name
+* Fixed an incompatibility with Django 1.1 and 1.0 (``django.utils.itercompat.product`` isn't available in those releases)
+* Fixed ``MediaMiddleware``, so it doesn't cache error responses
+
+Version 1.4
+-------------------------------------------------------------
+
+This is a compatibility release which prepares for the new staticfiles feature in Django 1.3.
+
+**Upgrade notes:** Place your app media in a "static" folder instead of a "media" folder. Use ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) instead of ``MEDIA_URL`` from now on.
+
+* App media is now searched in "static" folders instead of "media". For now, you can still use "media" folders, but this might be deprecated in the future (for the sake of having just one standard for reusable apps).
+* ``DEV_MEDIA_URL`` (edit: was ``STATICFILES_URL``) should be used instead of ``MEDIA_URL`` because the meaning of that variable has changed in Django 1.3.
+* ``DEV_MEDIA_URL`` falls back to ``STATICFILES_URL`` and ``GLOBAL_MEDIA_DIRS`` falls back to ``STATICFILES_DIRS`` if undefined (you should still use the former, respectively; this is just for convenience)
+
+Version 1.3.1
+-------------------------------------------------------------
+
+* Improved handling of media variations. This also fixes a bug with using CSS media types in production mode
+
+Version 1.3
+-------------------------------------------------------------
+
+* Added support for setting media type for CSS. E.g.: ``{% include_media 'bundle.css' media='print' %}``
+
+Version 1.2.1
+-------------------------------------------------------------
+
+* Fixed caching problems on runserver when using i18n and ``LocaleMiddleware``
+
+Version 1.2
+-------------------------------------------------------------
+
+**Upgrade notes:** Please add ``'mediagenerator.middleware.MediaMiddleware'`` as the **first** middleware in your settings.py.
+
+* Got rid of unnecessary HTTP roundtrips when ``USE_ETAGS = True``
+* Added Django template filter (by default only used for .html files), contributed by Matt Bierner
+* Added media_url() filter which provides access to generated URLs from JS
+* CopyFiles backend can now ignore files matching certain regex patterns
+
+Version 1.1
+-------------------------------------------------------------
+
+* Added Closure compiler backend
+* Added HTML5 cache manifest file backend
+* Fixed Sass support on Linux
+* Updated pyjs filter to latest pyjs repo version
+* "swf" and "ico" files are now copied, too, by default
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..07bab4d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) Waldemar Kornewald, Thomas Wanschik, and all contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of All Buttons Pressed nor
+ the names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..60d3980
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include LICENSE
+include CHANGELOG.rst
+include README.rst
+recursive-include mediagenerator/filters/pyjslibs *.py
+prune base_project/_generated_media
+prune base_project/_generated_media_names.py
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..121d2e0
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,24 @@
+django-mediagenerator_ is an asset manager for Django.
+With django-mediagenerator you can combine and compress your JS
+and CSS files. All files (including images) are versioned, so they
+can be efficiently cached with far-future expires.
+
+The media generator works in sandboxed environments like App Engine.
+It supports Sass_, HTML5 offline manifests, Jinja2,
+Python (via pyjs_/Pyjamas), PyvaScript_, and much more. Visit the
+`project site`_ for more information.
+
+What's new in version 1.8
+=============================================================
+
+* HTML5 manifest now uses a regex to match included/excluded files
+* Added support for scss files
+* Fixed Sass ``@import`` tracking for partials
+
+See the ``CHANGELOG.rst`` file for the complete changelog.
+
+.. _django-mediagenerator: http://www.allbuttonspressed.com/projects/django-mediagenerator
+.. _project site: django-mediagenerator_
+.. _Sass: http://sass-lang.com/
+.. _pyjs: http://pyjs.org/
+.. _PyvaScript: http://www.allbuttonspressed.com/projects/pyvascript
diff --git a/base_project/__init__.py b/base_project/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/base_project/manage.py b/base_project/manage.py
new file mode 100644
index 0000000..5e78ea9
--- /dev/null
+++ b/base_project/manage.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+from django.core.management import execute_manager
+try:
+ import settings # Assumed to be in the same directory.
+except ImportError:
+ import sys
+ sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
+ sys.exit(1)
+
+if __name__ == "__main__":
+ execute_manager(settings)
diff --git a/base_project/settings.py b/base_project/settings.py
new file mode 100644
index 0000000..162764f
--- /dev/null
+++ b/base_project/settings.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+import os
+
+DEBUG = True
+TEMPLATE_DEBUG = DEBUG
+
+MEDIA_BUNDLES = (
+ ('main.css',
+ 'css/reset.css',
+ 'css/style.css',
+ 'css/icons/icon.css',
+ ),
+)
+
+# Get project root folder
+_project_root = os.path.dirname(__file__)
+
+# Set global media search paths
+GLOBAL_MEDIA_DIRS = (
+ os.path.join(_project_root, 'static'),
+)
+
+# Set media URL (important: don't forget the trailing slash!).
+# PRODUCTION_MEDIA_URL is used when running manage.py generatemedia
+MEDIA_DEV_MODE = DEBUG
+DEV_MEDIA_URL = '/devmedia/'
+PRODUCTION_MEDIA_URL = '/media/'
+
+# Configure yuicompressor if available
+YUICOMPRESSOR_PATH = os.path.join(
+ os.path.dirname(_project_root), 'yuicompressor.jar')
+if os.path.exists(YUICOMPRESSOR_PATH):
+ ROOT_MEDIA_FILTERS = {
+ 'js': 'mediagenerator.filters.yuicompressor.YUICompressor',
+ 'css': 'mediagenerator.filters.yuicompressor.YUICompressor',
+ }
+
+ADMIN_MEDIA_PREFIX = '/media/admin/'
+
+DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': 'sqlite.db',
+ }
+}
+
+SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
+
+SITE_ID = 1
+
+INSTALLED_APPS = (
+ 'django.contrib.auth',
+ 'django.contrib.sessions',
+ 'django.contrib.contenttypes',
+ 'django.contrib.sites',
+ 'mediagenerator',
+)
+
+MIDDLEWARE_CLASSES = (
+ 'mediagenerator.middleware.MediaMiddleware',
+ 'django.middleware.common.CommonMiddleware',
+ 'django.contrib.sessions.middleware.SessionMiddleware',
+ 'django.contrib.auth.middleware.AuthenticationMiddleware',
+)
+
+TEMPLATE_CONTEXT_PROCESSORS = (
+ 'django.core.context_processors.auth',
+ 'django.core.context_processors.request',
+)
+
+USE_I18N = False
+
+MEDIA_ROOT = os.path.join(_project_root, 'media')
+
+TEMPLATE_DIRS = (os.path.join(_project_root, 'templates'),)
+
+ROOT_URLCONF = 'urls'
diff --git a/base_project/static/css/icons/icon.css b/base_project/static/css/icons/icon.css
new file mode 100644
index 0000000..89424ab
--- /dev/null
+++ b/base_project/static/css/icons/icon.css
@@ -0,0 +1,7 @@
+#django {
+ padding: 32px 0 0 70px;
+ display: inline-block;
+ background: url(../../img/django.png) bottom left no-repeat;
+}
+
+* #django { height: 0; } /* for IE7 */
diff --git a/base_project/static/css/reset.css b/base_project/static/css/reset.css
new file mode 100644
index 0000000..711a97b
--- /dev/null
+++ b/base_project/static/css/reset.css
@@ -0,0 +1,90 @@
+* {
+ margin: 0;
+ padding: 0;
+}
+
+body {
+ font-family: Verdana, Tahoma, Arial, sans-serif;
+ font-size: 12px;
+ line-height: 18px;
+}
+
+img {
+ border: 0;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ margin: 0;
+}
+
+h1 {
+ font-size: 24px;
+ line-height: 36px;
+}
+
+h2 {
+ font-size: 18px;
+ line-height: 36px;
+}
+
+h3 {
+ font-size: 14px;
+}
+
+h4 {
+ font-size: 12px;
+}
+
+h5 {
+ font-size: 10px;
+}
+
+p, form table {
+ margin: 0 0 18px;
+}
+
+ul, ol {
+ margin: 0 0 18px 18px;
+}
+
+form table th {
+ vertical-align: top;
+ padding: 0 5px 0 0;
+ text-align: right;
+}
+
+form table td {
+ vertical-align: top;
+ padding: 0 0 7px;
+ text-align: left;
+}
+
+label {
+ white-space: nowrap;
+ font-weight: bold;
+}
+
+input[type="text"], input[type="password"] {
+ width: 170px;
+ font-size: 12px;
+}
+
+input, textarea {
+ padding: 2px 0 2px 2px;
+}
+
+input[type="submit"], button {
+ padding: 1px;
+}
+
+select {
+ padding: 2px;
+}
+
+span form {
+ display: inline;
+}
+
+blockquote {
+ padding: 0px 30px;
+}
diff --git a/base_project/static/css/style.css b/base_project/static/css/style.css
new file mode 100644
index 0000000..85c20ff
--- /dev/null
+++ b/base_project/static/css/style.css
@@ -0,0 +1,27 @@
+#header {
+ margin-bottom: 20px;
+}
+
+#footer {
+ margin-top: 20px;
+}
+
+.highlight {
+ margin-bottom: 18px;
+}
+
+#header, #footer {
+ font-size: 11px;
+}
+
+body {
+ color: #36393D;
+}
+
+a {
+ color: #1985B5;
+}
+
+a:hover {
+ text-decoration: underline;
+}
diff --git a/base_project/static/img/django.png b/base_project/static/img/django.png
new file mode 100644
index 0000000..aa247c7
Binary files /dev/null and b/base_project/static/img/django.png differ
diff --git a/base_project/templates/404.html b/base_project/templates/404.html
new file mode 100644
index 0000000..281fe9c
--- /dev/null
+++ b/base_project/templates/404.html
@@ -0,0 +1,6 @@
+{% extends 'base.html' %}
+{% block title %}Page not found{% endblock %}
+
+{% block content %}
+The page you requested could not be found.
+{% endblock %}
diff --git a/base_project/templates/500.html b/base_project/templates/500.html
new file mode 100644
index 0000000..ea97b71
--- /dev/null
+++ b/base_project/templates/500.html
@@ -0,0 +1,6 @@
+{% extends 'base.html' %}
+{% block title %}Server error{% endblock %}
+
+{% block content %}
+There was an error while handling your request.
+{% endblock %}
diff --git a/base_project/templates/base.html b/base_project/templates/base.html
new file mode 100644
index 0000000..2676d89
--- /dev/null
+++ b/base_project/templates/base.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html
+ PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+{% load media %}
+<html xmlns="http://www.w3.org/1999/xhtml"
+ dir="{% if LANGUAGE_BIDI %}rtl{% else %}ltr{% endif %}"
+ xml:lang="{% firstof LANGUAGE_CODE 'en' %}"
+ lang="{% firstof LANGUAGE_CODE 'en' %}">
+ <head>
+ <title>{% ifnotequal request.path '/' %}{% block title %}{% endblock %} - {% endifnotequal %}Media generator sample project</title>
+ {% block css %}
+ {% include_media 'main.css' %}
+ {% endblock %}
+
+ {% block preload_js %}
+ {% endblock %}
+
+ {% block extra-head %}{% endblock %}
+ </head>
+
+ <body>
+ <div id="header">
+ <a id="logo" href="/">Media generator base project</a>
+ </div>
+
+ <div class="columns">
+ <div id="content" class="column">
+ {% block content-header %}
+ {% if error %}<div class="error">{{ error }}</div>{% endif %}
+ {% if info %}<div class="info">{{ info }}</div>{% endif %}
+ {% if messages %}
+ {% for message in messages %}
+ <div class="info">{{ message }}</div>
+ {% endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block content %}{% endblock %}
+ </div>
+
+ <div id="sidebar" class="column">
+ {% block sidebar %}
+ {% endblock %}
+ </div>
+ </div>
+
+ <div id="footer">
+ <p>
+ Powered by <a href="http://www.allbuttonspressed.com/projects/django-mediagenerator">django-mediagenerator</a>
+ </p>
+ </div>
+
+ {% block js %}
+ {% endblock %}
+
+ {% if google_analytics_id %}
+ <script type="text/javascript">
+ /* <![CDATA[ */
+ var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
+ document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
+ /* ]]> */
+ </script>
+ <script type="text/javascript">
+ /* <![CDATA[ */try {
+ var pageTracker = _gat._getTracker("{{ google_analytics_id|escapejs }}");
+ pageTracker._trackPageview();
+ } catch(err) {}/* ]]> */</script>
+
+ {% endif %}
+ </body>
+</html>
diff --git a/base_project/templates/home.html b/base_project/templates/home.html
new file mode 100644
index 0000000..0eb52b9
--- /dev/null
+++ b/base_project/templates/home.html
@@ -0,0 +1,31 @@
+{% extends 'base.html' %}
+{% load media %}
+{% block title %}Home{% endblock %}
+
+{% block content %}
+<p>
+Welcome to the media generator base project. This project is primarily intended as a development server for client-side web developers and designers who want to automate the whole media management process:
+</p>
+
+<ul>
+ <li>combining media into a single file</li>
+ <li>compression (e.g. via yuicompressor or closure)</li>
+ <li>sprite generation</li>
+ <li>integrating <a href="http://sass-lang.com/">Sass</a> and other CSS compilers</li>
+ <li>integrating <a href="http://pyjs.org/">pyjs</a> and other JavaScript compilers</li>
+ <li>etc.</li>
+</ul>
+
+<h2>Example: Integrating an image</h2>
+<p>
+Let's see a two examples of how you would add an image to your website.
+</p>
+
+<p>
+This icon is integrated via the CSS background property: <span id="django"></span>
+</p>
+
+<p>
+This icon is integrated via the media_url template tag: <img src="{% media_url 'img/django.png' %}" alt="gtalk" />
+</p>
+{% endblock %}
diff --git a/base_project/urls.py b/base_project/urls.py
new file mode 100644
index 0000000..c5636d2
--- /dev/null
+++ b/base_project/urls.py
@@ -0,0 +1,5 @@
+from django.conf.urls.defaults import *
+
+urlpatterns = patterns('',
+ (r'^$', 'django.views.generic.simple.direct_to_template', {'template': 'home.html'}),
+)
diff --git a/mediagenerator/__init__.py b/mediagenerator/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/api.py b/mediagenerator/api.py
new file mode 100644
index 0000000..bf6d9ec
--- /dev/null
+++ b/mediagenerator/api.py
@@ -0,0 +1,42 @@
+from . import settings, utils
+from .settings import GENERATED_MEDIA_DIR, MEDIA_GENERATORS
+from .utils import load_backend
+from django.utils.http import urlquote
+import os
+import shutil
+
+def generate_media():
+ if os.path.exists(GENERATED_MEDIA_DIR):
+ shutil.rmtree(GENERATED_MEDIA_DIR)
+
+ # This will make media_url() generate production URLs
+ was_dev_mode = settings.MEDIA_DEV_MODE
+ settings.MEDIA_DEV_MODE = False
+
+ utils.NAMES = {}
+
+ for backend_name in MEDIA_GENERATORS:
+ backend = load_backend(backend_name)()
+ for key, url, content in backend.get_output():
+ version = backend.generate_version(key, url, content)
+ if version:
+ base, ext = os.path.splitext(url)
+ url = '%s-%s%s' % (base, version, ext)
+
+ path = os.path.join(GENERATED_MEDIA_DIR, url)
+ parent = os.path.dirname(path)
+ if not os.path.exists(parent):
+ os.makedirs(parent)
+
+ fp = open(path, 'wb')
+ fp.write(content)
+ fp.close()
+
+ utils.NAMES[key] = urlquote(url)
+
+ settings.MEDIA_DEV_MODE = was_dev_mode
+
+ # Generate a module with media file name mappings
+ fp = open('_generated_media_names.py', 'w')
+ fp.write('NAMES = %r' % utils.NAMES)
+ fp.close()
diff --git a/mediagenerator/base.py b/mediagenerator/base.py
new file mode 100644
index 0000000..0edf5bb
--- /dev/null
+++ b/mediagenerator/base.py
@@ -0,0 +1,37 @@
+from hashlib import sha1
+
+class Generator(object):
+ def generate_version(self, key, url, content):
+ return sha1(content).hexdigest()
+
+ def get_output(self):
+ """
+ Generates content for production mode.
+
+ Yields tuples of the form:
+ key, url, content
+
+ Here, key must be the same as for get_dev_output_names().
+ """
+ for key, url, hash in self.get_dev_output_names():
+ yield key, url, self.get_dev_output(url)[0]
+
+ def get_dev_output(self, name):
+ """
+ Generates content for dev mode.
+
+ Yields tuples of the form:
+ content, mimetype
+ """
+ raise NotImplementedError()
+
+ def get_dev_output_names(self):
+ """
+ Generates file names for dev mode.
+
+ Yields tuples of the form:
+ key, url, version_hash
+
+ Here, key must be the same as for get_output_names().
+ """
+ raise NotImplementedError()
diff --git a/mediagenerator/contrib/__init__.py b/mediagenerator/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/contrib/jinja2ext.py b/mediagenerator/contrib/jinja2ext.py
new file mode 100644
index 0000000..c1b9ef4
--- /dev/null
+++ b/mediagenerator/contrib/jinja2ext.py
@@ -0,0 +1,37 @@
+from jinja2 import nodes, TemplateAssertionError
+from jinja2.ext import Extension
+from jinja2 import Markup as mark_safe
+from jinja2.utils import next
+from mediagenerator.generators.bundles.utils import _render_include_media
+
+class MediaExtension(Extension):
+ tags = set(['include_media'])
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def parse(self, parser):
+ token = parser.stream.next()
+ args = [parser.parse_expression()]
+ kwargs = []
+ while parser.stream.current.type != 'block_end':
+ if kwargs:
+ parser.stream.expect('comma')
+
+ if parser.stream.skip_if('colon'):
+ break
+
+ name = parser.stream.expect('name')
+ if name.value in kwargs:
+ parser.fail('variable %r defined twice.' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
+ parser.stream.expect('assign')
+ key = name.value
+ value = parser.parse_expression()
+ kwargs.append(nodes.Keyword(key, value,
+ lineno=value.lineno))
+ return nodes.Output([self.call_method('_render', args, kwargs)]).set_lineno(token.lineno)
+
+ def _render(self, bundle, **variation):
+ return mark_safe(_render_include_media(bundle, variation))
diff --git a/mediagenerator/contrib/jinja2install.py b/mediagenerator/contrib/jinja2install.py
new file mode 100644
index 0000000..ec87df4
--- /dev/null
+++ b/mediagenerator/contrib/jinja2install.py
@@ -0,0 +1,5 @@
+from jinja2 import Environment
+from mediagenerator.contrib.jinja2ext import MediaExtension
+from mediagenerator.utils import media_url
+env = Environment(extensions=[MediaExtension])
+env.globals['media_url'] = media_url
diff --git a/mediagenerator/filters/__init__.py b/mediagenerator/filters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/filters/clevercss.py b/mediagenerator/filters/clevercss.py
new file mode 100644
index 0000000..90fb07b
--- /dev/null
+++ b/mediagenerator/filters/clevercss.py
@@ -0,0 +1,23 @@
+from mediagenerator.generators.bundles.base import Filter
+from clevercss import convert
+
+class CleverCSS(Filter):
+ def __init__(self, **kwargs):
+ super(CleverCSS, self).__init__(**kwargs)
+ assert self.filetype == 'css', (
+ 'CleverCSS only supports compilation to css. '
+ 'The parent filter expects "%s".' % self.filetype)
+ self.input_filetype = 'clevercss'
+
+ def should_use_default_filter(self, ext):
+ if ext == 'ccss':
+ return False
+ return super(CleverCSS, self).should_use_default_filter(ext)
+
+ def get_output(self, variation):
+ for input in self.get_input(variation):
+ yield convert(input)
+
+ def get_dev_output(self, name, variation):
+ content = super(CleverCSS, self).get_dev_output(name, variation)
+ return convert(content)
diff --git a/mediagenerator/filters/closure.py b/mediagenerator/filters/closure.py
new file mode 100644
index 0000000..2f61864
--- /dev/null
+++ b/mediagenerator/filters/closure.py
@@ -0,0 +1,35 @@
+from django.conf import settings
+from mediagenerator.generators.bundles.base import Filter
+
+COMPILATION_LEVEL = getattr(settings, 'CLOSURE_COMPILATION_LEVEL',
+ 'SIMPLE_OPTIMIZATIONS')
+
+class Closure(Filter):
+ def __init__(self, **kwargs):
+ self.config(kwargs, compilation_level=COMPILATION_LEVEL)
+ super(Closure, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'Closure only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_output(self, variation):
+ # We import this here, so App Engine Helper users don't get import
+ # errors.
+ from subprocess import Popen, PIPE
+ for input in self.get_input(variation):
+ try:
+ compressor = settings.CLOSURE_COMPILER_PATH
+ cmd = Popen(['java', '-jar', compressor,
+ '--charset', 'utf-8',
+ '--compilation_level', self.compilation_level],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ universal_newlines=True)
+ output, error = cmd.communicate(input)
+ assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
+ yield output
+ except Exception, e:
+ raise ValueError("Failed to execute Java VM or Closure. "
+ "Please make sure that you have installed Java "
+ "and that it's in your PATH and that you've configured "
+ "CLOSURE_COMPILER_PATH in your settings correctly.\n"
+ "Error was: %s" % e)
diff --git a/mediagenerator/filters/coffeescript.py b/mediagenerator/filters/coffeescript.py
new file mode 100644
index 0000000..5e12e97
--- /dev/null
+++ b/mediagenerator/filters/coffeescript.py
@@ -0,0 +1,43 @@
+from mediagenerator.generators.bundles.base import Filter
+from subprocess import Popen, PIPE
+
+class CoffeeScript(Filter):
+ def __init__(self, **kwargs):
+ super(CoffeeScript, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'CoffeeScript only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+ self.input_filetype = 'coffee-script'
+
+ def compile(self, input):
+ try:
+ # coffee
+ # -s = Read from stdin for the source
+ # -c = Compile
+ # -p = print the compiled output to stdout
+ cmd = Popen(['coffee', '-c', '-p', '-s', '--no-wrap'],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ universal_newlines=True)
+ output, error = cmd.communicate(input)
+ assert cmd.wait() == 0, ('CoffeeScript command returned bad '
+ 'result:\n%s' % error)
+ return output
+ except Exception, e:
+ raise ValueError("Failed to run CoffeeScript compiler for this "
+ "file. Please confirm that the \"coffee\" application is "
+ "on your path and that you can run it from your own command "
+ "line.\n"
+ "Error was: %s" % e)
+
+ def should_use_default_filter(self, ext):
+ if ext == 'coffee':
+ return False
+ return super(CoffeeScript, self).should_use_default_filter(ext)
+
+ def get_output(self, variation):
+ for input in self.get_input(variation):
+ yield self.compile(input)
+
+ def get_dev_output(self, name, variation):
+ content = super(CoffeeScript, self).get_dev_output(name, variation)
+ return self.compile(content)
diff --git a/mediagenerator/filters/concat.py b/mediagenerator/filters/concat.py
new file mode 100644
index 0000000..8db3944
--- /dev/null
+++ b/mediagenerator/filters/concat.py
@@ -0,0 +1,31 @@
+from hashlib import sha1
+from mediagenerator.generators.bundles.base import Filter
+
+class Concat(Filter):
+ """
+ Simply concatenates multiple files into a single file.
+
+ This is also the default root filter.
+ """
+ def __init__(self, **kwargs):
+ self.config(kwargs, concat_dev_output=False, dev_output_name='concat')
+ super(Concat, self).__init__(**kwargs)
+
+ def get_output(self, variation):
+ yield '\n\n'.join(input for input in self.get_input(variation))
+
+ def get_dev_output(self, name, variation):
+ if not self.concat_dev_output:
+ return super(Concat, self).get_dev_output(name, variation)
+ assert self.dev_output_name == name
+ names = super(Concat, self).get_dev_output_names(variation)
+ return '\n\n'.join(super(Concat, self).get_dev_output(name[0], variation)
+ for name in names)
+
+ def get_dev_output_names(self, variation):
+ if not self.concat_dev_output:
+ for data in super(Concat, self).get_dev_output_names(variation):
+ yield data
+ return
+ content = self.get_dev_output(self.dev_output_name, variation)
+ yield self.dev_output_name, sha1(content).hexdigest()
diff --git a/mediagenerator/filters/cssurl.py b/mediagenerator/filters/cssurl.py
new file mode 100644
index 0000000..4dac238
--- /dev/null
+++ b/mediagenerator/filters/cssurl.py
@@ -0,0 +1,84 @@
+from base64 import b64encode
+from django.conf import settings
+from mediagenerator.generators.bundles.base import Filter, FileFilter
+from mediagenerator.utils import media_url, prepare_patterns, find_file
+from mimetypes import guess_type
+import logging
+import os
+import posixpath
+import re
+
+url_re = re.compile(r'url\s*\(["\']?([\w\.][^:]*?)["\']?\)', re.UNICODE)
+
+# Whether to rewrite CSS URLs, at all
+REWRITE_CSS_URLS = getattr(settings, 'REWRITE_CSS_URLS', True)
+# Whether to rewrite CSS URLs relative to the respective source file
+# or whether to use "absolute" URL rewriting (i.e., relative URLs are
+# considered absolute with regards to STATICFILES_URL)
+REWRITE_CSS_URLS_RELATIVE_TO_SOURCE = getattr(settings,
+ 'REWRITE_CSS_URLS_RELATIVE_TO_SOURCE', True)
+
+GENERATE_DATA_URIS = getattr(settings, 'GENERATE_DATA_URIS', False)
+MAX_DATA_URI_FILE_SIZE = getattr(settings, 'MAX_DATA_URI_FILE_SIZE', 12*1024)
+IGNORE_PATTERN = prepare_patterns(getattr(settings,
+ 'IGNORE_DATA_URI_PATTERNS', (r'.*\.htc',)), 'IGNORE_DATA_URI_PATTERNS')
+
+class URLRewriter(object):
+ def __init__(self, base_path='./'):
+ if not base_path:
+ base_path = './'
+ self.base_path = base_path
+
+ def rewrite_urls(self, content):
+ if not REWRITE_CSS_URLS:
+ return content
+ return url_re.sub(self.fixurls, content)
+
+ def fixurls(self, match):
+ url = match.group(1)
+ hashid = ''
+ if '#' in url:
+ url, hashid = url.split('#', 1)
+ hashid = '#' + hashid
+ if ':' not in url and not url.startswith('/'):
+ rebased_url = posixpath.join(self.base_path, url)
+ rebased_url = posixpath.normpath(rebased_url)
+ try:
+ if GENERATE_DATA_URIS:
+ path = find_file(rebased_url)
+ if os.path.getsize(path) <= MAX_DATA_URI_FILE_SIZE and \
+ not IGNORE_PATTERN.match(rebased_url):
+ data = b64encode(open(path, 'rb').read())
+ mime = guess_type(path)[0] or 'application/octet-stream'
+ return 'url(data:%s;base64,%s)' % (mime, data)
+ url = media_url(rebased_url)
+ except:
+ logging.error('URL not found: %s' % url)
+ return 'url(%s%s)' % (url, hashid)
+
+class CSSURL(Filter):
+ """Rewrites URLs relative to media folder ("absolute" rewriting)."""
+ def __init__(self, **kwargs):
+ super(CSSURL, self).__init__(**kwargs)
+ assert self.filetype == 'css', (
+ 'CSSURL only supports CSS output. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_output(self, variation):
+ rewriter = URLRewriter()
+ for input in self.get_input(variation):
+ yield rewriter.rewrite_urls(input)
+
+ def get_dev_output(self, name, variation):
+ rewriter = URLRewriter()
+ content = super(CSSURL, self).get_dev_output(name, variation)
+ return rewriter.rewrite_urls(content)
+
+class CSSURLFileFilter(FileFilter):
+ """Rewrites URLs relative to input file's location."""
+ def get_dev_output(self, name, variation):
+ content = super(CSSURLFileFilter, self).get_dev_output(name, variation)
+ if not REWRITE_CSS_URLS_RELATIVE_TO_SOURCE:
+ return content
+ rewriter = URLRewriter(posixpath.dirname(name))
+ return rewriter.rewrite_urls(content)
diff --git a/mediagenerator/filters/i18n.py b/mediagenerator/filters/i18n.py
new file mode 100644
index 0000000..86de21d
--- /dev/null
+++ b/mediagenerator/filters/i18n.py
@@ -0,0 +1,54 @@
+from django.conf import settings
+from django.http import HttpRequest
+from django.views.i18n import javascript_catalog
+from hashlib import sha1
+from mediagenerator.generators.bundles.base import Filter
+
+if settings.USE_I18N:
+ LANGUAGES = [code for code, _ in settings.LANGUAGES]
+else:
+ LANGUAGES = (settings.LANGUAGE_CODE,)
+
+class I18N(Filter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ super(I18N, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'I18N only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_variations(self):
+ return {'language': LANGUAGES}
+
+ def get_output(self, variation):
+ language = variation['language']
+ yield self._generate(language)
+
+ def get_dev_output(self, name, variation):
+ language = variation['language']
+ assert language == name
+ return self._generate(language)
+
+ def get_dev_output_names(self, variation):
+ language = variation['language']
+ content = self._generate(language)
+ hash = sha1(content).hexdigest()
+ yield language, hash
+
+ def _generate(self, language):
+ language_bidi = language.split('-')[0] in settings.LANGUAGES_BIDI
+ request = HttpRequest()
+ request.GET['language'] = language
+ # Add some JavaScript data
+ content = 'var LANGUAGE_CODE = "%s";\n' % language
+ content += 'var LANGUAGE_BIDI = ' + \
+ (language_bidi and 'true' or 'false') + ';\n'
+ content += javascript_catalog(request,
+ packages=settings.INSTALLED_APPS).content
+ # The hgettext() function just calls gettext() internally, but
+ # it won't get indexed by makemessages.
+ content += '\nwindow.hgettext = function(text) { return gettext(text); };\n'
+ # Add a similar hngettext() function
+ content += 'window.hngettext = function(singular, plural, count) { return ngettext(singular, plural, count); };\n'
+ return content
diff --git a/mediagenerator/filters/media_url.py b/mediagenerator/filters/media_url.py
new file mode 100644
index 0000000..8930d77
--- /dev/null
+++ b/mediagenerator/filters/media_url.py
@@ -0,0 +1,46 @@
+from django.utils.simplejson import dumps
+from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import get_media_url_mapping
+from hashlib import sha1
+
+_CODE = """
+_$MEDIA_URLS = %s;
+
+media_urls = function(key) {
+ var urls = _$MEDIA_URLS[key];
+ if (!urls)
+ throw 'Could not resolve media url ' + key;
+ return urls;
+};
+
+media_url = function(key) {
+ var urls = media_urls(key);
+ if (urls.length == 1)
+ return urls[0];
+ throw 'media_url() only works with keys that point to a single entry (e.g. an image), but not bundles. Use media_urls() instead.';
+};
+""".lstrip()
+
+class MediaURL(Filter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ super(MediaURL, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'MediaURL only supports JS output. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_output(self, variation):
+ yield self._compile()
+
+ def get_dev_output(self, name, variation):
+ assert name == '.media_url.js'
+ return self._compile()
+
+ def get_dev_output_names(self, variation):
+ content = self._compile()
+ hash = sha1(content).hexdigest()
+ yield '.media_url.js', hash
+
+ def _compile(self):
+ return _CODE % dumps(get_media_url_mapping())
diff --git a/mediagenerator/filters/pyjs_filter.py b/mediagenerator/filters/pyjs_filter.py
new file mode 100644
index 0000000..be71755
--- /dev/null
+++ b/mediagenerator/filters/pyjs_filter.py
@@ -0,0 +1,284 @@
+from hashlib import sha1
+from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import find_file, get_media_dirs
+from pyjs.translator import import_compiler, Translator, LIBRARY_PATH
+from textwrap import dedent
+import os
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+# Register PYVA() function
+try:
+ from pyvascript.grammar import compile
+ from pyjs.translator import native_js_func
+
+ @native_js_func
+ def PYVA(content, unescape, is_statement, **kwargs):
+ result = compile(dedent(unescape(content)))
+ if not is_statement:
+ return result.strip().rstrip('\r\n\t ;')
+ return result
+except ImportError:
+ # No PyvaScript installed
+ pass
+
+_HANDLE_EXCEPTIONS = """
+ } finally { $pyjs.in_try_except -= 1; }
+} catch(err) {
+ pyjslib['_handle_exception'](err);
+}
+"""
+
+PYJS_INIT_LIB_PATH = os.path.join(LIBRARY_PATH, 'builtin', 'public', '_pyjs.js')
+BUILTIN_PATH = os.path.join(LIBRARY_PATH, 'builtin')
+STDLIB_PATH = os.path.join(LIBRARY_PATH, 'lib')
+EXTRA_LIBS_PATH = os.path.join(os.path.dirname(__file__), 'pyjslibs')
+
+_LOAD_PYJSLIB = """
+
+$p = $pyjs.loaded_modules["pyjslib"];
+$p('pyjslib');
+$pyjs.__modules__.pyjslib = $p['pyjslib']
+"""
+
+INIT_CODE = """
+var $wnd = window;
+var $doc = window.document;
+var $pyjs = new Object();
+var $p = null;
+$pyjs.platform = 'safari';
+$pyjs.global_namespace = this;
+$pyjs.__modules__ = {};
+$pyjs.modules_hash = {};
+$pyjs.loaded_modules = {};
+$pyjs.options = new Object();
+$pyjs.options.arg_ignore = true;
+$pyjs.options.arg_count = true;
+$pyjs.options.arg_is_instance = true;
+$pyjs.options.arg_instance_type = false;
+$pyjs.options.arg_kwarg_dup = true;
+$pyjs.options.arg_kwarg_unexpected_keyword = true;
+$pyjs.options.arg_kwarg_multiple_values = true;
+$pyjs.options.dynamic_loading = false;
+$pyjs.trackstack = [];
+$pyjs.track = {module:'__main__', lineno: 1};
+$pyjs.trackstack.push($pyjs.track);
+$pyjs.__active_exception_stack__ = null;
+$pyjs.__last_exception_stack__ = null;
+$pyjs.__last_exception__ = null;
+$pyjs.in_try_except = 0;
+""".lstrip()
+
+class Pyjs(Filter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ self.config(kwargs, exclude_main_libs=False, main_module=None,
+ debug=None, path=(), only_dependencies=None)
+ if isinstance(self.path, basestring):
+ self.path = (self.path,)
+ self.path += tuple(get_media_dirs())
+ if self.only_dependencies is None:
+ self.only_dependencies = bool(self.main_module)
+ if self.only_dependencies:
+ self.path += (STDLIB_PATH, BUILTIN_PATH, EXTRA_LIBS_PATH)
+ super(Pyjs, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'Pyjs only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ if self.only_dependencies:
+ assert self.main_module, \
+ 'You must provide a main module in only_dependencies mode'
+
+ self._compiled = {}
+ self._collected = {}
+
+ @classmethod
+ def from_default(cls, name):
+ return {'main_module': name.rsplit('.', 1)[0]}
+
+ def get_output(self, variation):
+ self._collect_all_modules()
+
+ if not self.exclude_main_libs:
+ yield self._compile_init()
+
+ if self.only_dependencies:
+ self._regenerate(dev_mode=False)
+ for name in sorted(self._compiled.keys()):
+ yield self._compiled[name][1]
+ else:
+ for name in sorted(self._collected.keys()):
+ fp = open(self._collected[name], 'r')
+ output = self._compile(name, fp.read(), dev_mode=False)[0]
+ fp.close()
+ yield output
+
+ yield self._compile_main(dev_mode=False)
+
+ def get_dev_output(self, name, variation):
+ self._collect_all_modules()
+
+ name = name.split('/', 1)[-1]
+
+ if name == '._pyjs.js':
+ return self._compile_init()
+ elif name == '.main.js':
+ return self._compile_main(dev_mode=True)
+
+ if self.only_dependencies:
+ self._regenerate(dev_mode=True)
+ return self._compiled[name][1]
+ else:
+ fp = open(self._collected[name], 'r')
+ output = self._compile(name, fp.read(), dev_mode=True)[0]
+ fp.close()
+ return output
+
+ def get_dev_output_names(self, variation):
+ self._collect_all_modules()
+
+ if not self.exclude_main_libs:
+ content = self._compile_init()
+ hash = sha1(content).hexdigest()
+ yield '._pyjs.js', hash
+
+ if self.only_dependencies:
+ self._regenerate(dev_mode=True)
+ for name in sorted(self._compiled.keys()):
+ yield name, self._compiled[name][2]
+ else:
+ for name in sorted(self._collected.keys()):
+ yield name, None
+
+ if self.main_module is not None or not self.exclude_main_libs:
+ content = self._compile_main(dev_mode=True)
+ hash = sha1(content).hexdigest()
+ yield '.main.js', hash
+
+ def _regenerate(self, dev_mode=False):
+ # This function is only called in only_dependencies mode
+ if self._compiled:
+ for module_name, (mtime, content, hash) in self._compiled.items():
+ if module_name not in self._collected or \
+ not os.path.exists(self._collected[module_name]) or \
+ os.path.getmtime(self._collected[module_name]) != mtime:
+ # Just recompile everything
+ # TODO: track dependencies and changes and recompile only
+ # what's necessary
+ self._compiled = {}
+ break
+ else:
+ # No changes
+ return
+
+ modules = [self.main_module, 'pyjslib']
+ while True:
+ if not modules:
+ break
+
+ module_name = modules.pop()
+ path = self._collected[module_name]
+ mtime = os.path.getmtime(path)
+
+ fp = open(path, 'r')
+ source = fp.read()
+ fp.close()
+
+ try:
+ content, py_deps, js_deps = self._compile(module_name, source, dev_mode=dev_mode)
+ except:
+ self._compiled = {}
+ raise
+ hash = sha1(content).hexdigest()
+ self._compiled[module_name] = (mtime, content, hash)
+
+ for name in py_deps:
+ if name not in self._collected:
+ if '.' in name and name.rsplit('.', 1)[0] in self._collected:
+ name = name.rsplit('.', 1)[0]
+ else:
+ raise ImportError('The pyjs module %s could not find '
+ 'the dependency %s' % (module_name, name))
+ if name not in self._compiled:
+ modules.append(name)
+
+ def _compile(self, name, source, dev_mode=False):
+ if self.debug is None:
+ debug = dev_mode
+ else:
+ debug = self.debug
+ compiler = import_compiler(False)
+ tree = compiler.parse(source)
+ output = StringIO()
+ translator = Translator(compiler, name, name, source, tree, output,
+ # Debug options
+ debug=debug, source_tracking=debug, line_tracking=debug,
+ store_source=debug,
+ # Speed and size optimizations
+ function_argument_checking=debug, attribute_checking=False,
+ inline_code=False, number_classes=False,
+ # Sufficient Python conformance
+ operator_funcs=True, bound_methods=True, descriptors=True,
+ )
+ return output.getvalue(), translator.imported_modules, translator.imported_js
+
+ def _compile_init(self):
+ fp = open(PYJS_INIT_LIB_PATH, 'r')
+ content = fp.read()
+ fp.close()
+ return INIT_CODE + content
+
+ def _compile_main(self, dev_mode=False):
+ if self.debug is None:
+ debug = dev_mode
+ else:
+ debug = self.debug
+ content = ''
+ if not self.exclude_main_libs:
+ content += _LOAD_PYJSLIB
+ if self.main_module is not None:
+ content += '\n\n'
+ if debug:
+ content += 'try {\n'
+ content += ' try {\n'
+ content += ' $pyjs.in_try_except += 1;\n '
+ content += 'pyjslib.___import___("%s", null, "__main__");' % self.main_module
+ if debug:
+ content += _HANDLE_EXCEPTIONS
+ return content
+
+ def _collect_all_modules(self):
+ """Collect modules, so we can handle imports later"""
+ for pkgroot in self.path:
+ pkgroot = os.path.abspath(pkgroot)
+ for root, dirs, files in os.walk(pkgroot):
+ if '__init__.py' in files:
+ files.remove('__init__.py')
+ # The root __init__.py is ignored
+ if root != pkgroot:
+ files.insert(0, '__init__.py')
+ elif root != pkgroot:
+ # Only add valid Python packages
+ dirs[:] = []
+ continue
+
+ for filename in files:
+ if not filename.endswith('.py'):
+ continue
+
+ path = os.path.join(root, filename)
+ module_path = path[len(pkgroot)+len(os.sep):]
+ if os.path.basename(module_path) == '__init__.py':
+ module_name = os.path.dirname(module_path)
+ else:
+ module_name = module_path[:-3]
+ assert '.' not in module_name, \
+ 'Invalid module file name: %s' % module_path
+ module_name = module_name.replace(os.sep, '.')
+
+ self._collected.setdefault(module_name, path)
diff --git a/mediagenerator/filters/pyjslibs/dynamic.py b/mediagenerator/filters/pyjslibs/dynamic.py
new file mode 100644
index 0000000..b8efb1c
--- /dev/null
+++ b/mediagenerator/filters/pyjslibs/dynamic.py
@@ -0,0 +1,2 @@
+# This is here because pyjslib.py imports the dynamic module.
+# However, we've turned off dynamic module loading, so this module is empty.
diff --git a/mediagenerator/filters/pyjslibs/getopt.py b/mediagenerator/filters/pyjslibs/getopt.py
new file mode 100644
index 0000000..95fd0ab
--- /dev/null
+++ b/mediagenerator/filters/pyjslibs/getopt.py
@@ -0,0 +1 @@
+# Unused dependency of base64 module
diff --git a/mediagenerator/filters/pyvascript_filter.py b/mediagenerator/filters/pyvascript_filter.py
new file mode 100644
index 0000000..1012d50
--- /dev/null
+++ b/mediagenerator/filters/pyvascript_filter.py
@@ -0,0 +1,32 @@
+from mediagenerator.generators.bundles.base import Filter, RawFileFilter
+from pyvascript.grammar import compile
+import os
+import pyvascript
+
+class PyvaScript(Filter):
+ def __init__(self, **kwargs):
+ super(PyvaScript, self).__init__(**kwargs)
+ assert self.filetype == 'js', (
+ 'PyvaScript only supports compilation to js. '
+ 'The parent filter expects "%s".' % self.filetype)
+ self.input_filetype = 'pyvascript'
+
+ def should_use_default_filter(self, ext):
+ if ext == 'pyva':
+ return False
+ return super(PyvaScript, self).should_use_default_filter(ext)
+
+ def get_output(self, variation):
+ for input in self.get_input(variation):
+ yield compile(input)
+
+ def get_dev_output(self, name, variation):
+ content = super(PyvaScript, self).get_dev_output(name, variation)
+ return compile(content)
+
+ def get_item(self, name):
+ if name == '.stdlib.pyva':
+ path = os.path.join(os.path.dirname(pyvascript.__file__),
+ 'stdlib.pyva')
+ return RawFileFilter(name='.stdlib.pyva', path=path)
+ return super(PyvaScript, self).get_item(name)
diff --git a/mediagenerator/filters/sass.py b/mediagenerator/filters/sass.py
new file mode 100644
index 0000000..cc81640
--- /dev/null
+++ b/mediagenerator/filters/sass.py
@@ -0,0 +1,131 @@
+from django.conf import settings
+from hashlib import sha1
+from mediagenerator.generators.bundles.base import Filter
+from mediagenerator.utils import get_media_dirs, find_file
+from subprocess import Popen, PIPE
+import os
+import re
+import sys
+
+# Emits extra debug info that can be used by the FireSass Firebug plugin
+SASS_DEBUG_INFO = getattr(settings, 'SASS_DEBUG_INFO', False)
+
+_RE_FLAGS = re.MULTILINE | re.UNICODE
+multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
+one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
+import_re = re.compile(r'^@import\s+["\']?(.+?)["\']?\s*;?\s*$', _RE_FLAGS)
+
+class Sass(Filter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ self.config(kwargs, path=(), main_module=None)
+ if isinstance(self.path, basestring):
+ self.path = (self.path,)
+ super(Sass, self).__init__(**kwargs)
+ assert self.filetype == 'css', (
+ 'Sass only supports compilation to css. '
+ 'The parent filter expects "%s".' % self.filetype)
+ assert self.main_module, \
+ 'You must provide a main module'
+
+ self.path += tuple(get_media_dirs())
+ self.path_args = []
+ for path in self.path:
+ self.path_args.extend(('-I', path))
+
+ self._compiled = None
+ self._compiled_hash = None
+ self._dependencies = {}
+
+ @classmethod
+ def from_default(cls, name):
+ return {'main_module': name}
+
+ def get_output(self, variation):
+ self._regenerate(debug=False)
+ yield self._compiled
+
+ def get_dev_output(self, name, variation):
+ assert name == self.main_module
+ self._regenerate(debug=True)
+ return self._compiled
+
+ def get_dev_output_names(self, variation):
+ self._regenerate(debug=True)
+ yield self.main_module, self._compiled_hash
+
+ def _compile(self, debug=False):
+ run = ['sass', '-C', '-t', 'expanded']
+ if debug:
+ run.append('--line-numbers')
+ if SASS_DEBUG_INFO:
+ run.append('--debug-info')
+ run.extend(self.path_args)
+ shell = sys.platform == 'win32'
+ cmd = Popen(run, shell=shell, universal_newlines=True,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE)
+ output, error = cmd.communicate('@import %s' % self.main_module)
+ assert cmd.wait() == 0, ('Sass command returned bad result (did you '
+ 'install Sass? http://sass-lang.com):\n%s'
+ % error)
+ return output
+
+ def _regenerate(self, debug=False):
+ if self._dependencies:
+ for name, mtime in self._dependencies.items():
+ path = self._find_file(name)
+ if not path or os.path.getmtime(path) != mtime:
+ # Just recompile everything
+ self._dependencies = {}
+ break
+ else:
+ # No changes
+ return
+
+ modules = [self.main_module]
+ while True:
+ if not modules:
+ break
+
+ module_name = modules.pop()
+ path = self._find_file(module_name)
+ assert path, 'Could not find the Sass module %s' % module_name
+ mtime = os.path.getmtime(path)
+ self._dependencies[module_name] = mtime
+
+ fp = open(path, 'r')
+ source = fp.read()
+ fp.close()
+
+ dependencies = self._get_dependencies(source)
+
+ for name in dependencies:
+ path = self._find_file(name)
+ assert path, ('The Sass module %s could not find the '
+ 'dependency %s' % (module_name, name))
+ if name not in self._dependencies:
+ modules.append(name)
+
+ self._compiled = self._compile(debug=debug)
+ self._compiled_hash = sha1(self._compiled).hexdigest()
+
+ def _get_dependencies(self, source):
+ clean_source = multi_line_comment_re.sub('\n', source)
+ clean_source = one_line_comment_re.sub('', clean_source)
+ return [name for name in import_re.findall(clean_source)
+ if not name.endswith('.css')]
+
+ def _find_file(self, name):
+ parts = name.rsplit('/', 1)
+ parts[-1] = '_' + parts[-1]
+ partial = '/'.join(parts)
+ if not name.endswith(('.sass', '.scss')):
+ names = (name + '.sass', name + '.scss', partial + '.sass',
+ partial + '.scss')
+ else:
+ names = (name, partial)
+ for name in names:
+ path = find_file(name, media_dirs=self.path)
+ if path:
+ return path
diff --git a/mediagenerator/filters/template.py b/mediagenerator/filters/template.py
new file mode 100644
index 0000000..9f5b090
--- /dev/null
+++ b/mediagenerator/filters/template.py
@@ -0,0 +1,19 @@
+from django.template import Context, Template as DjangoTemplate
+from mediagenerator.generators.bundles.base import Filter
+
+class Template(Filter):
+ def __init__(self, **kwargs):
+ super(Template, self).__init__(**kwargs)
+
+ def get_output(self, variation):
+ for input in self.get_input(variation):
+ yield self._template(input)
+
+ def get_dev_output(self, name, variation):
+ content = super(Template, self).get_dev_output(name, variation)
+ return self._template(content)
+
+ def _template(self, content):
+ context = Context({})
+ context.autoescape = self.filetype == 'html'
+ return DjangoTemplate(content).render(context)
diff --git a/mediagenerator/filters/yuicompressor.py b/mediagenerator/filters/yuicompressor.py
new file mode 100644
index 0000000..47f035d
--- /dev/null
+++ b/mediagenerator/filters/yuicompressor.py
@@ -0,0 +1,30 @@
+from django.conf import settings
+from mediagenerator.generators.bundles.base import Filter
+
+class YUICompressor(Filter):
+ def __init__(self, **kwargs):
+ super(YUICompressor, self).__init__(**kwargs)
+ assert self.filetype in ('css', 'js'), (
+ 'YUICompressor only supports compilation to css and js. '
+ 'The parent filter expects "%s".' % self.filetype)
+
+ def get_output(self, variation):
+ # We import this here, so App Engine Helper users don't get import
+ # errors.
+ from subprocess import Popen, PIPE
+ for input in self.get_input(variation):
+ try:
+ compressor = settings.YUICOMPRESSOR_PATH
+ cmd = Popen(['java', '-jar', compressor,
+ '--charset', 'utf-8', '--type', self.filetype],
+ stdin=PIPE, stdout=PIPE, stderr=PIPE,
+ universal_newlines=True)
+ output, error = cmd.communicate(input)
+ assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
+ yield output
+ except Exception, e:
+ raise ValueError("Failed to execute Java VM or yuicompressor. "
+ "Please make sure that you have installed Java "
+ "and that it's in your PATH and that you've configured "
+ "YUICOMPRESSOR_PATH in your settings correctly.\n"
+ "Error was: %s" % e)
diff --git a/mediagenerator/generators/__init__.py b/mediagenerator/generators/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/generators/bundles/__init__.py b/mediagenerator/generators/bundles/__init__.py
new file mode 100644
index 0000000..f69e44b
--- /dev/null
+++ b/mediagenerator/generators/bundles/__init__.py
@@ -0,0 +1 @@
+from .bundles import Bundles
diff --git a/mediagenerator/generators/bundles/base.py b/mediagenerator/generators/bundles/base.py
new file mode 100644
index 0000000..c026a9d
--- /dev/null
+++ b/mediagenerator/generators/bundles/base.py
@@ -0,0 +1,195 @@
+from .settings import DEFAULT_MEDIA_FILTERS
+from hashlib import sha1
+from mediagenerator.utils import load_backend, find_file
+import os
+
+class Filter(object):
+ takes_input = True
+
+ def __init__(self, **kwargs):
+ self.file_filter = FileFilter
+ self.config(kwargs, filetype=None, filter=None,
+ bundle=None, _from_default=None)
+
+ # We assume that if this is e.g. a 'js' backend then all input must
+ # also be 'js'. Subclasses must override this if they expect a special
+ # input file type. Also, subclasses have to check if their file type
+ # is supported.
+ self.input_filetype = self.filetype
+
+ if self.takes_input:
+ self.config(kwargs, input=())
+ if not isinstance(self.input, (tuple, list)):
+ self.input = (self.input,)
+ self._input_filters = None
+ assert not kwargs, 'Unknown parameters: %s' % ', '.join(kwargs.keys())
+
+ @classmethod
+ def from_default(cls, name):
+ return {'input': name}
+
+ def should_use_default_filter(self, ext):
+ return ext != self._from_default
+
+ def get_variations(self):
+ """
+ Returns all possible variations that get generated by this filter.
+
+ The result must be a dict whose values are tuples.
+ """
+ return {}
+
+ def get_output(self, variation):
+ """
+ Yields file-like objects with content for each output item for the
+ given variation.
+ """
+ raise NotImplementedError()
+
+ def get_dev_output(self, name, variation):
+ """
+ Returns content for the given file name and variation in development mode.
+ """
+ index, child = name.split('/', 1)
+ index = int(index)
+ filter = self.get_input_filters()[index]
+ return filter.get_dev_output(child, variation)
+
+ def get_dev_output_names(self, variation):
+ """
+ Yields file names for the given variation in development mode.
+ """
+ # By default we simply return our input filters' file names
+ for index, filter in enumerate(self.get_input_filters()):
+ for name, hash in filter.get_dev_output_names(variation):
+ yield '%d/%s' % (index, name), hash
+
+ def get_input(self, variation):
+ """Yields contents for each input item."""
+ for filter in self.get_input_filters():
+ for input in filter.get_output(variation):
+ yield input
+
+ def get_input_filters(self):
+ """Returns a Filter instance for each input item."""
+ if not self.takes_input:
+ raise ValueError("The %s media filter doesn't take any input" %
+ self.__class__.__name__)
+ if self._input_filters is not None:
+ return self._input_filters
+ self._input_filters = []
+ for input in self.input:
+ if isinstance(input, dict):
+ filter = self.get_filter(input)
+ else:
+ filter = self.get_item(input)
+ self._input_filters.append(filter)
+ return self._input_filters
+
+ def get_filter(self, config):
+ backend_class = load_backend(config.get('filter'))
+ return backend_class(filetype=self.input_filetype, bundle=self.bundle,
+ **config)
+
+ def get_item(self, name):
+ ext = os.path.splitext(name)[1].lstrip('.')
+ if ext in DEFAULT_MEDIA_FILTERS and self.should_use_default_filter(ext):
+ backend_class = load_backend(DEFAULT_MEDIA_FILTERS[ext])
+ else:
+ backend_class = self.file_filter
+
+ config = backend_class.from_default(name)
+ config.setdefault('filter',
+ '%s.%s' % (backend_class.__module__, backend_class.__name__))
+ config.setdefault('filetype', self.input_filetype)
+ config['bundle'] = self.bundle
+ # This is added to make really sure we don't instantiate the same
+ # filter in an endless loop. Normally, the child class should
+ # take care of this in should_use_default_filter().
+ config.setdefault('_from_default', ext)
+ return backend_class(**config)
+
+ def _get_variations_with_input(self):
+ """Utility function to get variations including input variations"""
+ variations = self.get_variations()
+ if not self.takes_input:
+ return variations
+
+ for filter in self.get_input_filters():
+ subvariations = filter._get_variations_with_input()
+ for k, v in subvariations.items():
+ if k in variations and v != variations[k]:
+ raise ValueError('Conflicting variations for "%s": %r != %r' % (
+ k, v, variations[k]))
+ variations.update(subvariations)
+ return variations
+
+ def config(self, init, **defaults):
+ for key in defaults:
+ setattr(self, key, init.pop(key, defaults[key]))
+
+class FileFilter(Filter):
+ """A filter that just returns the given file."""
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ self.config(kwargs, name=None)
+ self.mtime = self.hash = None
+ super(FileFilter, self).__init__(**kwargs)
+
+ @classmethod
+ def from_default(cls, name):
+ return {'name': name}
+
+ def get_output(self, variation):
+ yield self.get_dev_output(self.name, variation)
+
+ def get_dev_output(self, name, variation):
+ assert name == self.name, (
+ '''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
+ % (name, self.name))
+ path = self._get_path()
+ fp = open(path, 'r')
+ output = fp.read()
+ fp.close()
+ return output
+
+ def get_dev_output_names(self, variation):
+ path = self._get_path()
+ mtime = os.path.getmtime(path)
+ if mtime != self.mtime:
+ output = self.get_dev_output(self.name, variation)
+ hash = sha1(output).hexdigest()
+ else:
+ hash = self.hash
+ yield self.name, hash
+
+ def _get_path(self):
+ path = find_file(self.name)
+ assert path, """File name "%s" doesn't exist.""" % self.name
+ return path
+
+class RawFileFilter(FileFilter):
+ takes_input = False
+
+ def __init__(self, **kwargs):
+ self.config(kwargs, path=None)
+ super(RawFileFilter, self).__init__(**kwargs)
+
+ def get_dev_output(self, name, variation):
+ assert name == self.name, (
+ '''File name "%s" doesn't match the one in GENERATE_MEDIA ("%s")'''
+ % (name, self.name))
+ fp = open(self.path, 'r')
+ output = fp.read()
+ fp.close()
+ return output
+
+ def get_dev_output_names(self, variation):
+ mtime = os.path.getmtime(self.path)
+ if mtime != self.mtime:
+ output = self.get_dev_output(self.name, variation)
+ hash = sha1(output).hexdigest()
+ else:
+ hash = self.hash
+ yield self.name, hash
diff --git a/mediagenerator/generators/bundles/bundles.py b/mediagenerator/generators/bundles/bundles.py
new file mode 100644
index 0000000..325f943
--- /dev/null
+++ b/mediagenerator/generators/bundles/bundles.py
@@ -0,0 +1,85 @@
+from .settings import MEDIA_BUNDLES
+from .utils import _load_root_filter, _get_key
+try:
+ from itertools import product
+except ImportError:
+ try:
+ from django.utils.itercompat import product
+ except ImportError:
+ # Needed for Django 1.0 and 1.1 support.
+ # TODO/FIXME: Remove this when nobody uses Django 1.0/1.1, anymore.
+ from .itercompat import product
+from mediagenerator.base import Generator
+from mimetypes import guess_type
+import os
+
+class Bundles(Generator):
+ def get_output(self):
+ for items in MEDIA_BUNDLES:
+ bundle = items[0]
+ backend = _load_root_filter(bundle)
+ variations = backend._get_variations_with_input()
+ if not variations:
+ name, content = self.generate_file(backend, bundle, {})
+ yield _get_key(bundle), name, content
+ else:
+ # Generate media files for all variation combinations
+ combinations = product(*(variations[key]
+ for key in sorted(variations.keys())))
+ for combination in combinations:
+ variation_map = zip(sorted(variations.keys()), combination)
+ variation = dict(variation_map)
+ name, content = self.generate_file(backend, bundle,
+ variation, combination)
+
+ key = _get_key(bundle, variation_map)
+ yield key, name, content
+
+ def get_dev_output(self, name):
+ bundle_combination, path = name.split('|', 1)
+ parts = bundle_combination.split('--')
+ bundle = parts[0]
+ combination = parts[1:]
+ root = _load_root_filter(bundle)
+ variations = root._get_variations_with_input()
+ variation = dict(zip(sorted(variations.keys()), combination))
+ content = root.get_dev_output(path, variation)
+ mimetype = guess_type(bundle)[0]
+ return content, mimetype
+
+ def get_dev_output_names(self):
+ for items in MEDIA_BUNDLES:
+ bundle = items[0]
+ backend = _load_root_filter(bundle)
+ variations = backend._get_variations_with_input()
+ if not variations:
+ for name, hash in backend.get_dev_output_names({}):
+ url = '%s|%s' % (bundle, name)
+ yield _get_key(bundle), url, hash
+ else:
+ # Generate media files for all variation combinations
+ combinations = product(*(variations[key]
+ for key in sorted(variations.keys())))
+ for combination in combinations:
+ variation_map = zip(sorted(variations.keys()), combination)
+ variation = dict(variation_map)
+ for name, hash in backend.get_dev_output_names(variation):
+ url = '%s--%s|%s' % (bundle, '--'.join(combination), name)
+ yield _get_key(bundle, variation_map), url, hash
+
+ def generate_file(self, backend, bundle, variation, combination=()):
+ print 'Generating %s with variation %r' % (bundle, variation)
+ output = list(backend.get_output(variation))
+ if len(output) == 0:
+ output = ('',)
+ assert len(output) == 1, \
+ 'Media bundle "%s" would result in multiple output files' % bundle
+ content = output[0]
+
+ combination = '--'.join(combination)
+ if combination:
+ combination = '--' + combination
+
+ base, ext = os.path.splitext(bundle)
+ filename = base + combination + ext
+ return filename, content
diff --git a/mediagenerator/generators/bundles/itercompat.py b/mediagenerator/generators/bundles/itercompat.py
new file mode 100644
index 0000000..2315821
--- /dev/null
+++ b/mediagenerator/generators/bundles/itercompat.py
@@ -0,0 +1,12 @@
+def product(*args, **kwds):
+ """
+ Taken from http://docs.python.org/library/itertools.html#itertools.product
+ """
+ # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
+ # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
+ pools = map(tuple, args) * kwds.get('repeat', 1)
+ result = [[]]
+ for pool in pools:
+ result = [x+[y] for x in result for y in pool]
+ for prod in result:
+ yield tuple(prod)
diff --git a/mediagenerator/generators/bundles/settings.py b/mediagenerator/generators/bundles/settings.py
new file mode 100644
index 0000000..0ed8c2f
--- /dev/null
+++ b/mediagenerator/generators/bundles/settings.py
@@ -0,0 +1,25 @@
+from django.conf import settings
+
+DEFAULT_MEDIA_FILTERS = getattr(settings, 'DEFAULT_MEDIA_FILTERS', {
+ 'ccss': 'mediagenerator.filters.clevercss.CleverCSS',
+ 'coffee': 'mediagenerator.filters.coffeescript.CoffeeScript',
+ 'css': 'mediagenerator.filters.cssurl.CSSURLFileFilter',
+ 'html': 'mediageneraator.filters.template.Template',
+ 'py': 'mediagenerator.filters.pyjs_filter.Pyjs',
+ 'pyva': 'mediagenerator.filters.pyvascript_filter.PyvaScript',
+ 'sass': 'mediagenerator.filters.sass.Sass',
+ 'scss': 'mediagenerator.filters.sass.Sass',
+})
+
+ROOT_MEDIA_FILTERS = getattr(settings, 'ROOT_MEDIA_FILTERS', {})
+
+# These are applied in addition to ROOT_MEDIA_FILTERS.
+# The separation is done because we don't want users to
+# always specify the default filters when they merely want
+# to configure YUICompressor or Closure.
+BASE_ROOT_MEDIA_FILTERS = getattr(settings, 'BASE_ROOT_MEDIA_FILTERS', {
+ '*': 'mediagenerator.filters.concat.Concat',
+ 'css': 'mediagenerator.filters.cssurl.CSSURL',
+})
+
+MEDIA_BUNDLES = getattr(settings, 'MEDIA_BUNDLES', ())
diff --git a/mediagenerator/generators/bundles/utils.py b/mediagenerator/generators/bundles/utils.py
new file mode 100644
index 0000000..de7b021
--- /dev/null
+++ b/mediagenerator/generators/bundles/utils.py
@@ -0,0 +1,78 @@
+from .settings import ROOT_MEDIA_FILTERS, MEDIA_BUNDLES, BASE_ROOT_MEDIA_FILTERS
+from mediagenerator.settings import MEDIA_DEV_MODE
+from mediagenerator.utils import load_backend, media_urls
+import os
+
+_cache = {}
+
+def _load_root_filter(bundle):
+ if bundle not in _cache:
+ _cache[bundle] = _load_root_filter_uncached(bundle)
+ return _cache[bundle]
+
+def _get_root_filters_list(filetype):
+ root_filters = ()
+ filetypes = (filetype, '*')
+ for filters_spec in (BASE_ROOT_MEDIA_FILTERS, ROOT_MEDIA_FILTERS):
+ for filetype in filetypes:
+ filters = filters_spec.get(filetype, ())
+ if not isinstance(filters, (tuple, list)):
+ filters = (filters, )
+ root_filters += tuple(filters)
+ return root_filters
+
+def _load_root_filter_uncached(bundle):
+ for items in MEDIA_BUNDLES:
+ if items[0] == bundle:
+ input = items[1:]
+ break
+ else:
+ raise ValueError('Could not find media bundle "%s"' % bundle)
+ filetype = os.path.splitext(bundle)[-1].lstrip('.')
+ root_filters = _get_root_filters_list(filetype)
+ backend_class = load_backend(root_filters[-1])
+ for filter in reversed(root_filters[:-1]):
+ input = [{'filter': filter, 'input': input}]
+
+ return backend_class(filter=root_filters[-1], filetype=filetype,
+ bundle=bundle, input=input)
+
+def _get_key(bundle, variation_map=None):
+ if variation_map:
+ bundle += '?' + '&'.join('='.join(item) for item in variation_map)
+ return bundle
+
+def _render_include_media(bundle, variation):
+ variation = variation.copy()
+ filetype = os.path.splitext(bundle)[-1].lstrip('.')
+
+ # The "media" variation is special and defines CSS media types
+ media_types = None
+ if filetype == 'css':
+ media_types = variation.pop('media', None)
+
+ if MEDIA_DEV_MODE:
+ root = _load_root_filter(bundle)
+ variations = root._get_variations_with_input()
+ variation_map = [(key, variation.pop(key))
+ for key in sorted(variations.keys())]
+ if variation:
+ raise ValueError('Bundle %s does not support the following variation(s): %s'
+ % (bundle, ', '.join(variation.keys())))
+ else:
+ variation_map = tuple((key, variation[key])
+ for key in sorted(variation.keys()))
+
+ urls = media_urls(_get_key(bundle, variation_map))
+
+ if filetype == 'css':
+ if media_types:
+ tag = u'<link rel="stylesheet" type="text/css" href="%%s" media="%s" />' % media_types
+ else:
+ tag = u'<link rel="stylesheet" type="text/css" href="%s" />'
+ elif filetype == 'js':
+ tag = u'<script type="text/javascript" src="%s"></script>'
+ else:
+ raise ValueError("""Don't know how to include file type "%s".""" % filetype)
+
+ return '\n'.join(tag % url for url in urls)
diff --git a/mediagenerator/generators/copyfiles.py b/mediagenerator/generators/copyfiles.py
new file mode 100644
index 0000000..33eac69
--- /dev/null
+++ b/mediagenerator/generators/copyfiles.py
@@ -0,0 +1,43 @@
+from django.conf import settings
+from hashlib import sha1
+from mediagenerator.base import Generator
+from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
+from mimetypes import guess_type
+import os
+
+COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
+ ('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
+ 'eot', 'woff'))
+
+IGNORE_PATTERN = prepare_patterns(getattr(settings,
+ 'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
+
+class CopyFiles(Generator):
+ def get_dev_output(self, name):
+ path = find_file(name)
+ fp = open(path, 'rb')
+ content = fp.read()
+ fp.close()
+ mimetype = guess_type(path)[0]
+ return content, mimetype
+
+ def get_dev_output_names(self):
+ media_files = {}
+ for root in get_media_dirs():
+ self.collect_copyable_files(media_files, root)
+
+ for name, source in media_files.items():
+ fp = open(source, 'rb')
+ hash = sha1(fp.read()).hexdigest()
+ fp.close()
+ yield name, name, hash
+
+ def collect_copyable_files(self, media_files, root):
+ for root_path, dirs, files in os.walk(root):
+ for file in files:
+ ext = os.path.splitext(file)[1].lstrip('.')
+ path = os.path.join(root_path, file)
+ media_path = path[len(root)+1:].replace(os.sep, '/')
+ if ext in COPY_MEDIA_FILETYPES and \
+ not IGNORE_PATTERN.match(media_path):
+ media_files[media_path] = path
diff --git a/mediagenerator/generators/manifest.py b/mediagenerator/generators/manifest.py
new file mode 100644
index 0000000..37fabd5
--- /dev/null
+++ b/mediagenerator/generators/manifest.py
@@ -0,0 +1,52 @@
+from django.conf import settings
+from django.template.loader import render_to_string
+from mediagenerator.base import Generator
+from mediagenerator.utils import get_media_mapping, prepare_patterns
+
+OFFLINE_MANIFEST = getattr(settings, 'OFFLINE_MANIFEST', {})
+if isinstance(OFFLINE_MANIFEST, basestring):
+ OFFLINE_MANIFEST = {OFFLINE_MANIFEST: '.*'}
+
+def get_tuple(data, name, default=()):
+ result = data.get(name, default)
+ if isinstance(result, basestring):
+ return (result,)
+ return result
+
+class Manifest(Generator):
+ def generate_version(self, key, url, content):
+ return None
+
+ def get_dev_output(self, name):
+ config = OFFLINE_MANIFEST[name]
+ if isinstance(config, (tuple, list)):
+ config = {'cache': config}
+ elif isinstance(config, basestring):
+ config = {'cache': (config,)}
+
+ cache_pattern = prepare_patterns(get_tuple(config, 'cache', '.*'),
+ 'OFFLINE_MANIFEST[%s]' % name)
+ exclude = prepare_patterns(get_tuple(config, 'exclude'),
+ "OFFLINE_MANIFEST[%s]['exclude']" % name)
+ cache = set()
+ for item in get_media_mapping().keys():
+ if cache_pattern.match(item) and not exclude.match(item):
+ cache.add(item)
+ cache -= set(OFFLINE_MANIFEST.keys())
+
+ network = get_tuple(config, 'network', ('*',))
+ fallback = get_tuple(config, 'fallback')
+
+ template = get_tuple(config, 'template') + (
+ 'mediagenerator/manifest/' + name,
+ 'mediagenerator/manifest/base.manifest'
+ )
+
+ content = render_to_string(template, {
+ 'cache': cache, 'network': network, 'fallback': fallback,
+ })
+ return content, 'text/cache-manifest'
+
+ def get_dev_output_names(self):
+ for name in OFFLINE_MANIFEST:
+ yield name, name, None
diff --git a/mediagenerator/management/__init__.py b/mediagenerator/management/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/management/commands/__init__.py b/mediagenerator/management/commands/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/management/commands/generatemedia.py b/mediagenerator/management/commands/generatemedia.py
new file mode 100644
index 0000000..34aae08
--- /dev/null
+++ b/mediagenerator/management/commands/generatemedia.py
@@ -0,0 +1,10 @@
+from ...api import generate_media
+from django.core.management.base import NoArgsCommand
+
+class Command(NoArgsCommand):
+ help = 'Combines and compresses your media files and saves them in _generated_media.'
+
+ requires_model_validation = False
+
+ def handle_noargs(self, **options):
+ generate_media()
diff --git a/mediagenerator/middleware.py b/mediagenerator/middleware.py
new file mode 100644
index 0000000..4d2fdaf
--- /dev/null
+++ b/mediagenerator/middleware.py
@@ -0,0 +1,48 @@
+from .settings import DEV_MEDIA_URL, MEDIA_DEV_MODE
+# Only load other dependencies if they're needed
+if MEDIA_DEV_MODE:
+ from .utils import _refresh_dev_names, _backend_mapping
+ from django.http import HttpResponse, Http404
+ from django.utils.cache import patch_cache_control
+ from django.utils.http import http_date
+ import time
+
+class MediaMiddleware(object):
+ """
+ Middleware for serving and browser-side caching of media files.
+
+ This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
+ other middleware might add ETags or otherwise manipulate the caching
+ headers which would result in the browser doing unnecessary HTTP
+ roundtrips for unchanged media.
+ """
+
+ MAX_AGE = 60*60*24*365
+
+ def process_request(self, request):
+ if not MEDIA_DEV_MODE:
+ return
+
+ # We refresh the dev names only once for the whole request, so all
+ # media_url() calls are cached.
+ _refresh_dev_names()
+
+ if not request.path.startswith(DEV_MEDIA_URL):
+ return
+
+ filename = request.path[len(DEV_MEDIA_URL):]
+
+ try:
+ backend = _backend_mapping[filename]
+ except KeyError:
+ raise Http404('No such media file "%s"' % filename)
+ content, mimetype = backend.get_dev_output(filename)
+ response = HttpResponse(content, content_type=mimetype)
+
+ # Cache manifest files MUST NEVER be cached or you'll be unable to update
+ # your cached app!!!
+ if response['Content-Type'] != 'text/cache-manifest' and \
+ response.status_code == 200:
+ patch_cache_control(response, public=True, max_age=self.MAX_AGE)
+ response['Expires'] = http_date(time.time() + self.MAX_AGE)
+ return response
diff --git a/mediagenerator/settings.py b/mediagenerator/settings.py
new file mode 100644
index 0000000..96fb276
--- /dev/null
+++ b/mediagenerator/settings.py
@@ -0,0 +1,23 @@
+from django.conf import settings
+import os
+
+DEV_MEDIA_URL = getattr(settings, 'DEV_MEDIA_URL',
+ getattr(settings, 'STATIC_URL', settings.MEDIA_URL))
+PRODUCTION_MEDIA_URL = getattr(settings, 'PRODUCTION_MEDIA_URL', DEV_MEDIA_URL)
+
+MEDIA_GENERATORS = getattr(settings, 'MEDIA_GENERATORS', (
+ 'mediagenerator.generators.copyfiles.CopyFiles',
+ 'mediagenerator.generators.bundles.Bundles',
+ 'mediagenerator.generators.manifest.Manifest',
+))
+
+GENERATED_MEDIA_DIR = os.path.abspath(
+ getattr(settings, 'GENERATED_MEDIA_DIR', '_generated_media'))
+
+GLOBAL_MEDIA_DIRS = getattr(settings, 'GLOBAL_MEDIA_DIRS',
+ getattr(settings, 'STATICFILES_DIRS', ()))
+
+IGNORE_APP_MEDIA_DIRS = getattr(settings, 'IGNORE_APP_MEDIA_DIRS',
+ ('django.contrib.admin',))
+
+MEDIA_DEV_MODE = getattr(settings, 'MEDIA_DEV_MODE', settings.DEBUG)
diff --git a/mediagenerator/templates/mediagenerator/manifest/base.manifest b/mediagenerator/templates/mediagenerator/manifest/base.manifest
new file mode 100644
index 0000000..0e6f578
--- /dev/null
+++ b/mediagenerator/templates/mediagenerator/manifest/base.manifest
@@ -0,0 +1,25 @@
+CACHE MANIFEST
+{% load media %}
+
+CACHE:
+{% block cache %}
+{% for name in cache %}
+{% for url in name|media_urls %}
+{{ url }}
+{% endfor %}
+{% endfor %}
+{% endblock cache %}
+
+NETWORK:
+{% block network %}
+{% for url in network %}
+{{ url }}
+{% endfor %}
+{% endblock %}
+
+FALLBACK:
+{% block fallback %}
+{% for source, target in fallback.items %}
+{{ source }} {{ target }}
+{% endfor %}
+{% endblock %}
diff --git a/mediagenerator/templatetags/__init__.py b/mediagenerator/templatetags/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mediagenerator/templatetags/media.py b/mediagenerator/templatetags/media.py
new file mode 100644
index 0000000..c426fd0
--- /dev/null
+++ b/mediagenerator/templatetags/media.py
@@ -0,0 +1,45 @@
+from django import template
+from mediagenerator.generators.bundles.utils import _render_include_media
+from mediagenerator import utils
+
+register = template.Library()
+
+class MediaNode(template.Node):
+ def __init__(self, bundle, variation):
+ self.bundle = bundle
+ self.variation = variation
+
+ def render(self, context):
+ bundle = template.Variable(self.bundle).resolve(context)
+ variation = {}
+ for key, value in self.variation.items():
+ variation[key] = template.Variable(value).resolve(context)
+
+ return _render_include_media(bundle, variation)
+
[email protected]
+def include_media(parser, token):
+ try:
+ contents = token.split_contents()
+ bundle = contents[1]
+ variation_spec = contents[2:]
+ variation = {}
+ for item in variation_spec:
+ key, value = item.split('=')
+ variation[key] = value
+ except (ValueError, AssertionError, IndexError):
+ raise template.TemplateSyntaxError(
+ '%r could not parse the arguments: the first argument must be the '
+ 'the name of a bundle in the MEDIA_BUNDLES setting, and the '
+ 'following arguments specify the media variation (if you have '
+ 'any) and must be of the form key="value"' % contents[0])
+
+ return MediaNode(bundle, variation)
+
[email protected]_tag
+def media_url(url):
+ return utils.media_url(url)
+
[email protected]
+def media_urls(url):
+ return utils.media_urls(url)
diff --git a/mediagenerator/utils.py b/mediagenerator/utils.py
new file mode 100644
index 0000000..32d10be
--- /dev/null
+++ b/mediagenerator/utils.py
@@ -0,0 +1,137 @@
+from . import settings as media_settings
+from .settings import GLOBAL_MEDIA_DIRS, PRODUCTION_MEDIA_URL, \
+ IGNORE_APP_MEDIA_DIRS, MEDIA_GENERATORS, DEV_MEDIA_URL
+from django.conf import settings
+from django.core.exceptions import ImproperlyConfigured
+from django.utils.importlib import import_module
+from django.utils.http import urlquote
+import os
+import re
+
+try:
+ from _generated_media_names import NAMES
+except ImportError:
+ NAMES = None
+
+_backends_cache = {}
+_media_dirs_cache = []
+
+_generators_cache = []
+_generated_names = {}
+_backend_mapping = {}
+
+def _load_generators():
+ if not _generators_cache:
+ for name in MEDIA_GENERATORS:
+ backend = load_backend(name)()
+ _generators_cache.append(backend)
+ return _generators_cache
+
+def _refresh_dev_names():
+ _generated_names.clear()
+ _backend_mapping.clear()
+ for backend in _load_generators():
+ for key, url, hash in backend.get_dev_output_names():
+ versioned_url = urlquote(url)
+ if hash:
+ versioned_url += '?version=' + hash
+ _generated_names.setdefault(key, [])
+ _generated_names[key].append(versioned_url)
+ _backend_mapping[url] = backend
+
+class _MatchNothing(object):
+ def match(self, content):
+ return False
+
+def prepare_patterns(patterns, setting_name):
+ """Helper function for patter-matching settings."""
+ if isinstance(patterns, basestring):
+ patterns = (patterns,)
+ if not patterns:
+ return _MatchNothing()
+ # First validate each pattern individually
+ for pattern in patterns:
+ try:
+ re.compile(pattern, re.U)
+ except re.error:
+ raise ValueError("""Pattern "%s" can't be compiled """
+ "in %s" % (pattern, setting_name))
+ # Now return a combined pattern
+ return re.compile('^(' + ')$|^('.join(patterns) + ')$', re.U)
+
+def get_production_mapping():
+ if NAMES is None:
+ raise ImportError('Could not import _generated_media_names. This '
+ 'file is needed for production mode. Please '
+ 'run manage.py generatemedia to create it.')
+ return NAMES
+
+def get_media_mapping():
+ if media_settings.MEDIA_DEV_MODE:
+ return _generated_names
+ return get_production_mapping()
+
+def get_media_url_mapping():
+ if media_settings.MEDIA_DEV_MODE:
+ base_url = DEV_MEDIA_URL
+ else:
+ base_url = PRODUCTION_MEDIA_URL
+
+ mapping = {}
+ for key, value in get_media_mapping().items():
+ if isinstance(value, basestring):
+ value = (value,)
+ mapping[key] = [base_url + url for url in value]
+
+ return mapping
+
+def media_urls(key, refresh=False):
+ if media_settings.MEDIA_DEV_MODE:
+ if refresh:
+ _refresh_dev_names()
+ return [DEV_MEDIA_URL + url for url in _generated_names[key]]
+ return [PRODUCTION_MEDIA_URL + get_production_mapping()[key]]
+
+def media_url(key, refresh=False):
+ urls = media_urls(key, refresh=refresh)
+ if len(urls) == 1:
+ return urls[0]
+ raise ValueError('media_url() only works with URLs that contain exactly '
+ 'one file. Use media_urls() (or {% include_media %} in templates) instead.')
+
+def get_media_dirs():
+ if not _media_dirs_cache:
+ media_dirs = [os.path.abspath(root) for root in GLOBAL_MEDIA_DIRS]
+ for app in settings.INSTALLED_APPS:
+ if app in IGNORE_APP_MEDIA_DIRS:
+ continue
+ for name in ('static', 'media'):
+ app_root = os.path.dirname(import_module(app).__file__)
+ media_dirs.append(os.path.join(app_root, name))
+ _media_dirs_cache.extend(media_dirs)
+ return _media_dirs_cache
+
+def find_file(name, media_dirs=None):
+ if media_dirs is None:
+ media_dirs = get_media_dirs()
+ for root in media_dirs:
+ path = os.path.normpath(os.path.join(root, name))
+ if os.path.isfile(path):
+ return path
+
+def load_backend(backend):
+ if backend not in _backends_cache:
+ module_name, func_name = backend.rsplit('.', 1)
+ _backends_cache[backend] = _load_backend(backend)
+ return _backends_cache[backend]
+
+def _load_backend(path):
+ module_name, attr_name = path.rsplit('.', 1)
+ try:
+ mod = import_module(module_name)
+ except (ImportError, ValueError), e:
+ raise ImproperlyConfigured('Error importing backend module %s: "%s"' % (module_name, e))
+ try:
+ return getattr(mod, attr_name)
+ except AttributeError:
+ raise ImproperlyConfigured('Module "%s" does not define a "%s" backend' % (module_name, attr_name))
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..b0fa072
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,32 @@
+from setuptools import setup, find_packages
+
+DESCRIPTION = 'Total asset management for Django. Combine and compress your JavaScript and CSS.'
+LONG_DESCRIPTION = None
+try:
+ LONG_DESCRIPTION = open('README.rst').read()
+except:
+ pass
+
+setup(name='django-mediagenerator',
+ version='1.8',
+ packages=find_packages(exclude=('tests', 'tests.*',
+ 'base_project', 'base_project.*')),
+ package_data={'mediagenerator.filters': ['pyjslibs/*.py']},
+ author='Waldemar Kornewald',
+ author_email='[email protected]',
+ url='http://www.allbuttonspressed.com/projects/django-mediagenerator',
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ platforms=['any'],
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Web Environment',
+ 'Framework :: Django',
+ 'Intended Audience :: Developers',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development :: Libraries :: Application Frameworks',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'License :: OSI Approved :: BSD License',
+ ],
+)
|
prashantkg16/paperclip-extended | 919ae567f046dbbda623293efa86f6e8716b7b97 | Now can upload to different s3 buckets to improve download speed by web browsers | diff --git a/README b/README
deleted file mode 100644
index 1d6c56a..0000000
--- a/README
+++ /dev/null
@@ -1,54 +0,0 @@
-=PaperclipExtended
-
-PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It adds some possibly useful functionalities that original plugin lacks.
-
-Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
-
-PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
-
-==Functionalities provided by PaperclipExtended
-
-* Custom commands
- Enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
-
-* File name normalization
- You can use :normalized_basename string in attachment's :path or :url definition, which is later interpolated. It works just like :basename, but it is normalized by substituting unusual characters with underscores.
-
-==Usage
-
-===Custom commands
-
-In your model:
-
- class User < ActiveRecord::Base
- has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
- end
-
-The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
-
-The result convert command will be now:
-convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
-
-Instead of just:
-convert -scale "300x300>"
-
-The commands parameter is optional, also you can define it only for certain styles as above.
-
-Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
-
-===File name normalization
-
-In your model:
-
- class User < ActiveRecord::Base
- has_attached_file :avatar,
- :styles => { :thumb => "80x80#" },
- :path => "#{RAILS_ROOT}/public/avatars/:id/:normalized_basename_:style.:extension",
- :url => "/avatars/:id/:normalized_basename_:style.:extension"
- end
-
-:normalized_basename works just like :basename, the only difference is that file's basename is normalized by substituting unusual chars with underscores. By "unusual characters" are those that are not matched by /[A-Za-z0-9_-]/ regular expression. For example if "my avatar.jpg" file is uploaded it's normalized basename would be "my_avatar.jpg". Such normalization may be useful in situations where you normally would url_encode the filename or something like that.
-
-
-
-Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
diff --git a/README.rdoc b/README.rdoc
index 1d6c56a..648d355 100644
--- a/README.rdoc
+++ b/README.rdoc
@@ -1,54 +1,79 @@
=PaperclipExtended
PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It adds some possibly useful functionalities that original plugin lacks.
Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
==Functionalities provided by PaperclipExtended
* Custom commands
Enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
* File name normalization
You can use :normalized_basename string in attachment's :path or :url definition, which is later interpolated. It works just like :basename, but it is normalized by substituting unusual characters with underscores.
+* Multiple S3 buckets
+ Enables you to upload attachments to different S3 buckets. Modern web browsers are limited to download 2-4 files from one host at the same time, so placing your files in 4 different buckets (hosts) will let browsers download 8-16 files simultaneously.
+
==Usage
===Custom commands
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
end
The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
The result convert command will be now:
convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
Instead of just:
convert -scale "300x300>"
The commands parameter is optional, also you can define it only for certain styles as above.
Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
===File name normalization
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar,
:styles => { :thumb => "80x80#" },
:path => "#{RAILS_ROOT}/public/avatars/:id/:normalized_basename_:style.:extension",
:url => "/avatars/:id/:normalized_basename_:style.:extension"
end
:normalized_basename works just like :basename, the only difference is that file's basename is normalized by substituting unusual chars with underscores. By "unusual characters" are those that are not matched by /[A-Za-z0-9_-]/ regular expression. For example if "my avatar.jpg" file is uploaded it's normalized basename would be "my_avatar.jpg". Such normalization may be useful in situations where you normally would url_encode the filename or something like that.
+===Multiple S3 buckets
+
+Modern web browsers are limited to download 2-4 files from one host at the same time, so placing your files in 4 different buckets (hosts) will let browsers download 8-16 files simultaneously.
+
+In your model:
+ class User < ActiveRecord::Base
+ has_attached_file :avatar,
+ :storage => :s3,
+ :s3_credentials => "#{RAILS_ROOT}/config/s3.yml",
+ :path => "avatars/:id/:style_:extension",
+ :bucket => lambda do |attachment|
+ i = attachment.instance.id % 4
+ "bucket_#{i}"
+ end
+ end
+
+This will place each avatar in one of four buckets: bucket_0, bucket_1, bucket_2 or bucket_3. The exact bucket is chosen at runtime and in this case it's based on models id.
+
+Getting attachment's path:
+puts User.find(1).avatar.url(:original)
+# => http://bucket_1.s3.amazonaws.com/avatars/1/original.jpg
+If your attachments are images and you display many of them on the same page, now you should see them loading much faster.
Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
diff --git a/lib/paperclip_extended.rb b/lib/paperclip_extended.rb
index 60897c4..4b133ee 100644
--- a/lib/paperclip_extended.rb
+++ b/lib/paperclip_extended.rb
@@ -1,3 +1,4 @@
require 'commands/attachment'
require 'commands/thumbnail'
-require 'interpolations/attachment'
\ No newline at end of file
+require 'interpolations/attachment'
+require 's3_buckets/storage'
\ No newline at end of file
diff --git a/lib/s3_buckets/storage.rb b/lib/s3_buckets/storage.rb
new file mode 100644
index 0000000..ebcaf40
--- /dev/null
+++ b/lib/s3_buckets/storage.rb
@@ -0,0 +1,28 @@
+module Paperclip
+ module Storage
+ module S3
+ class << self
+ alias_method :original_extended, :extended
+ end
+
+ def self.extended base
+ original_extended(base)
+ base.class.interpolations[:s3_url] = lambda do |attachment, style|
+ "http://#{attachment.bucket_name}.s3.amazonaws.com/#{attachment.path(style).gsub(%r{^/}, "")}"
+ end
+ end
+
+ def s3_bucket
+ @s3_bucket ||= s3.bucket(bucket, true, @s3_permissions)
+ end
+
+ def bucket_name
+ bucket
+ end
+
+ def bucket
+ @bucket.is_a?(Proc) ? @bucket.call(self) : @bucket
+ end
+ end
+ end
+end
\ No newline at end of file
|
prashantkg16/paperclip-extended | 133352aa0bb979b67cfd46a414fdae1956fa73d6 | fixed buggy basename extraction from filename | diff --git a/lib/interpolations/attachment.rb b/lib/interpolations/attachment.rb
index 8bc9c69..7a91a1e 100644
--- a/lib/interpolations/attachment.rb
+++ b/lib/interpolations/attachment.rb
@@ -1,22 +1,19 @@
module Paperclip
class Attachment
class << self
alias_method :original_interpolations, :interpolations
end
def self.interpolations
@interpolations ||= {
:normalized_basename => lambda do |attachment, style|
normalize_basename(attachment.original_filename.gsub(File.extname(attachment.original_filename), ""))
end
}.merge(original_interpolations)
end
def self.normalize_basename filename
- name, ext = File.basename(filename, File.extname(filename)), File.extname(filename)
- normalized = name.gsub(/[^A-Za-z0-9_-]/, '_')
- normalized = 'filename' if normalized.empty?
- normalized + ext
+ filename.gsub(/[^A-Za-z0-9_-]/, '_')
end
end
end
\ No newline at end of file
|
prashantkg16/paperclip-extended | 1bb3dddfba5ff101ce4727831d3d14c98350de12 | :normalized_basename interpolation added | diff --git a/README b/README
index 6d627e6..560b9ab 100644
--- a/README
+++ b/README
@@ -1,29 +1,54 @@
=PaperclipExtended
-PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
+PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It add some possibly useful functionalities that original plugin lacks.
Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
+==Functionalities provided by PaperclipExtended
+
+* Custom commands
+ Enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
+
+* File name normalization
+ You can use :normalized_basename string in attachment's :path or :url definition, which is later interpolated. It works just like :basename, but it is normalized by substituting unusual characters with underscores.
+
==Usage
+===Custom commands
+
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
end
The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
The result convert command will be now:
convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
Instead of just:
convert -scale "300x300>"
The commands parameter is optional, also you can define it only for certain styles as above.
Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
+===File name normalization
+
+In your model:
+
+ class User < ActiveRecord::Base
+ has_attached_file :avatar,
+ :styles => { :thumb => "80x80#" },
+ :path => "#{RAILS_ROOT}/public/avatars/:id/:normalized_basename_:style.:extension",
+ :url => "/avatars/:id/:normalized_basename_:style.:extension"
+ end
+
+:normalized_basename works just like :basename, the only difference is that file's base name is normalized by substituting unusual chars with underscores. By "unusual characters" are those that are not matched by /[A-Za-z0-9_-]/ regular expression. For example if "my avatar.jpg" file is uploaded it's normalized basename would be "my_avatar.jpg". Such normalization may be useful in situations where you normally would url_encode the filename or something like that.
+
+
+
Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
diff --git a/README.rdoc b/README.rdoc
index 6d627e6..560b9ab 100644
--- a/README.rdoc
+++ b/README.rdoc
@@ -1,29 +1,54 @@
=PaperclipExtended
-PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
+PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It add some possibly useful functionalities that original plugin lacks.
Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
+==Functionalities provided by PaperclipExtended
+
+* Custom commands
+ Enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
+
+* File name normalization
+ You can use :normalized_basename string in attachment's :path or :url definition, which is later interpolated. It works just like :basename, but it is normalized by substituting unusual characters with underscores.
+
==Usage
+===Custom commands
+
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
end
The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
The result convert command will be now:
convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
Instead of just:
convert -scale "300x300>"
The commands parameter is optional, also you can define it only for certain styles as above.
Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
+===File name normalization
+
+In your model:
+
+ class User < ActiveRecord::Base
+ has_attached_file :avatar,
+ :styles => { :thumb => "80x80#" },
+ :path => "#{RAILS_ROOT}/public/avatars/:id/:normalized_basename_:style.:extension",
+ :url => "/avatars/:id/:normalized_basename_:style.:extension"
+ end
+
+:normalized_basename works just like :basename, the only difference is that file's base name is normalized by substituting unusual chars with underscores. By "unusual characters" are those that are not matched by /[A-Za-z0-9_-]/ regular expression. For example if "my avatar.jpg" file is uploaded it's normalized basename would be "my_avatar.jpg". Such normalization may be useful in situations where you normally would url_encode the filename or something like that.
+
+
+
Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
diff --git a/lib/interpolations/attachment.rb b/lib/interpolations/attachment.rb
new file mode 100644
index 0000000..8bc9c69
--- /dev/null
+++ b/lib/interpolations/attachment.rb
@@ -0,0 +1,22 @@
+module Paperclip
+ class Attachment
+ class << self
+ alias_method :original_interpolations, :interpolations
+ end
+
+ def self.interpolations
+ @interpolations ||= {
+ :normalized_basename => lambda do |attachment, style|
+ normalize_basename(attachment.original_filename.gsub(File.extname(attachment.original_filename), ""))
+ end
+ }.merge(original_interpolations)
+ end
+
+ def self.normalize_basename filename
+ name, ext = File.basename(filename, File.extname(filename)), File.extname(filename)
+ normalized = name.gsub(/[^A-Za-z0-9_-]/, '_')
+ normalized = 'filename' if normalized.empty?
+ normalized + ext
+ end
+ end
+end
\ No newline at end of file
diff --git a/lib/paperclip_extended.rb b/lib/paperclip_extended.rb
index 883804d..60897c4 100644
--- a/lib/paperclip_extended.rb
+++ b/lib/paperclip_extended.rb
@@ -1,2 +1,3 @@
require 'commands/attachment'
-require 'commands/thumbnail'
\ No newline at end of file
+require 'commands/thumbnail'
+require 'interpolations/attachment'
\ No newline at end of file
|
prashantkg16/paperclip-extended | b1f657b173b819c9812d1fa3a880b2297d511f5f | file structure changed | diff --git a/lib/paperclip_extended/attachment.rb b/lib/commands/attachment.rb
similarity index 100%
rename from lib/paperclip_extended/attachment.rb
rename to lib/commands/attachment.rb
diff --git a/lib/paperclip_extended/thumbnail.rb b/lib/commands/thumbnail.rb
similarity index 100%
rename from lib/paperclip_extended/thumbnail.rb
rename to lib/commands/thumbnail.rb
diff --git a/lib/paperclip_extended.rb b/lib/paperclip_extended.rb
index 3fbb085..883804d 100644
--- a/lib/paperclip_extended.rb
+++ b/lib/paperclip_extended.rb
@@ -1,2 +1,2 @@
-require 'paperclip_extended/attachment'
-require 'paperclip_extended/thumbnail'
\ No newline at end of file
+require 'commands/attachment'
+require 'commands/thumbnail'
\ No newline at end of file
|
prashantkg16/paperclip-extended | 8fe9f7ce0958c88e9e48baa688168d1b4b35ca42 | copyrights update | diff --git a/README.rdoc b/README.rdoc
index c19858d..6d627e6 100644
--- a/README.rdoc
+++ b/README.rdoc
@@ -1,29 +1,29 @@
=PaperclipExtended
PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
==Usage
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
end
The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
The result convert command will be now:
convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
Instead of just:
convert -scale "300x300>"
The commands parameter is optional, also you can define it only for certain styles as above.
Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
-Copyright (c) 2008 Michal Szajbe (http://codetunes.com), released under the MIT license
+Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
|
prashantkg16/paperclip-extended | 7b17fe01a9148d83d208689aaddb5e44a5bf5c9a | copyrights update | diff --git a/README b/README
index c19858d..6d627e6 100644
--- a/README
+++ b/README
@@ -1,29 +1,29 @@
=PaperclipExtended
PaperclipExtended extends Paperclip plugin by Jon Yurek and thoughtbot. It enables user to define additional options that will be passed to ImageMagick convert command after thumbnail generation by Paperclip.
Note that PaperclipExtended plugin is not a replacement for Paperclip. It requires that you have Paperclip plugin already installed.
PaperclipExtended is known to work with Paperclip 2.1.2 (current version at the time of development). Note that extensions provided by PaperclipExtended may be included in Paperclip in future, so this plugin will not be needed anymore.
==Usage
In your model:
class User < ActiveRecord::Base
has_attached_file :avatar, :styles => { :medium => "300x300>", :thumb => "100x100>" }, :commands => { :medium => "-background white -gravity center -extent 300x300 +repage" }
end
The string you pass in commands hash will be attached to convert command after usual thumbnail generation by Paperclip.
The result convert command will be now:
convert -scale "300x300>" -background white -gravity center -extent 300x300 +repage
Instead of just:
convert -scale "300x300>"
The commands parameter is optional, also you can define it only for certain styles as above.
Read ImageMagick Command Line Options documentation for information on what can be put in commands parameter (http://www.imagemagick.org/script/command-line-options.php).
-Copyright (c) 2008 Michal Szajbe (http://codetunes.com), released under the MIT license
+Copyright (c) 2008 Michal Szajbe (http://codetunes.com) and netguru (http://netguru.pl), released under the MIT license
|
JonGretar/gitfile-templates | 9ad275d05523a3a202dc278b4152f616da53b521 | Added ignore of Mnesia.* for Erlang projects. | diff --git a/Erlang/.gitignore b/Erlang/.gitignore
index ba16851..fced35d 100644
--- a/Erlang/.gitignore
+++ b/Erlang/.gitignore
@@ -1,4 +1,5 @@
.DS_Store
*.beam
*.dump
log/*
+Mnesia.*
\ No newline at end of file
|
JonGretar/gitfile-templates | b9d073d75eccda0b6888ead8b7d1b90c98c842f9 | Initial Import | diff --git a/Erlang/.gitignore b/Erlang/.gitignore
new file mode 100644
index 0000000..ba16851
--- /dev/null
+++ b/Erlang/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+*.beam
+*.dump
+log/*
diff --git a/README.markdown b/README.markdown
new file mode 100644
index 0000000..080ef93
--- /dev/null
+++ b/README.markdown
@@ -0,0 +1,3 @@
+# Git Templates
+
+Just a collection of git config files for new projects.
diff --git a/Xcode/.gitattributes b/Xcode/.gitattributes
new file mode 100644
index 0000000..fdbd29f
--- /dev/null
+++ b/Xcode/.gitattributes
@@ -0,0 +1 @@
+*.pbxproj -crlf -diff -merge
diff --git a/Xcode/.gitignore b/Xcode/.gitignore
new file mode 100644
index 0000000..7f70da8
--- /dev/null
+++ b/Xcode/.gitignore
@@ -0,0 +1,7 @@
+.DS_Store
+*.pbxuser
+*.perspectivev3
+*.tm_build_errors
+*.mode1v3
+build/*
+
|
jaz303/exp-cpp | ea385c12b8a33aab565549e2843008f2e0e72851 | foreach | diff --git a/Rakefile b/Rakefile
index 7612e51..437d432 100644
--- a/Rakefile
+++ b/Rakefile
@@ -1,18 +1,18 @@
task :clean do
sh "rm -f *.o"
end
rule '.o' => '.cpp' do |t|
sh "g++ -I/opt/local/include #{t.source} -c -o #{t.name}"
end
-%w(copy_constructors references auto_ptr boost_ptr).each do |exe|
+%w(copy_constructors references auto_ptr boost_ptr boost_foreach).each do |exe|
file exe => "#{exe}.o" do
sh "g++ -I/opt/local/include -o #{exe} #{exe}.o"
end
task :all => exe
task :clean do
sh "rm -f #{exe}"
end
end
diff --git a/boost_foreach.cpp b/boost_foreach.cpp
new file mode 100644
index 0000000..4e1f978
--- /dev/null
+++ b/boost_foreach.cpp
@@ -0,0 +1,22 @@
+#include <string>
+#include <iostream>
+#include <boost/foreach.hpp>
+
+#define foreach BOOST_FOREACH
+#define reverse_foreach BOOST_REVERSE_FOREACH
+
+int main(int argc, char* argv[]) {
+
+ // how the hell does this work?
+ int a[3] = {1,2,3};
+ foreach(int i, a) {
+ std::cout << i << "\n";
+ }
+
+ std::string my_str = "abc";
+ foreach(char c, my_str) {
+ std::cout << c << "\n";
+ }
+
+ return 0;
+}
\ No newline at end of file
|
jaz303/exp-cpp | 6c9c431fa9707658ece759844ef0a3602e6718b4 | more pointer voodoo | diff --git a/boost_ptr.cpp b/boost_ptr.cpp
index 18ad466..b662ab6 100644
--- a/boost_ptr.cpp
+++ b/boost_ptr.cpp
@@ -1,62 +1,86 @@
#include <iostream>
#include <boost/scoped_ptr.hpp>
#include <boost/scoped_array.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/shared_array.hpp>
#include <boost/weak_ptr.hpp>
#include <boost/intrusive_ptr.hpp>
class Foo {
public:
~Foo() { std::cout << "byebye\n"; }
void greet() { std::cout << "hello!\n"; }
};
void scoped_ptr() {
// The primary reason to use scoped_ptr rather than auto_ptr is
// to let readers of your code know that you intend "resource
// acquisition is initialization" to be applied only for the
// current scope, and have no intent to transfer ownership.
//
// A secondary reason to use scoped_ptr is to prevent a later
// maintenance programmer from adding a function that transfers
// ownership by returning the auto_ptr, because the maintenance
// programmer saw auto_ptr, and assumed ownership could safely
// be transferred.
std::cout << "\nscoped ptr\n";
boost::scoped_ptr<Foo> a(new Foo());
boost::scoped_ptr<Foo> b(new Foo());
b.reset();
boost::scoped_ptr<Foo> c;
// c = a; - won't compile as assignment is private
a->greet();
}
void scoped_array() {
std::cout << "\nscoped array\n";
boost::scoped_array<Foo> d(new Foo[10]);
}
void shared_ptr() {
std::cout << "\nshared ptr\n";
boost::shared_ptr<Foo> e(new Foo);
boost::shared_ptr<Foo> f(e);
+ std::cout << e.use_count() << "\n";
+ std::cout << f.use_count() << "\n";
+
+ f.reset();
+
+ std::cout << e.use_count() << "\n";
+
if (e == f) {
std::cout << "equal!\n";
}
}
+void weak_ptr() {
+ std::cout << "\nweak ptr\n";
+
+ boost::shared_ptr<int> a(new int);
+ boost::weak_ptr<int> b(a);
+
+ *a = 10;
+ std::cout << b.expired() << "\n";
+ std::cout << *b.lock() << "\n";
+
+ a.reset();
+
+ std::cout << b.expired() << "\n";
+
+}
+
int main(int argc, char* argv[]) {
scoped_ptr();
scoped_array();
shared_ptr();
+ weak_ptr();
return 0;
}
|
jaz303/exp-cpp | 9cc5bcccafdb76dd80c9e2103ddf675c8c7cb866 | messing with smart pointers | diff --git a/Rakefile b/Rakefile
index 41cecdb..7612e51 100644
--- a/Rakefile
+++ b/Rakefile
@@ -1,18 +1,18 @@
task :clean do
sh "rm -f *.o"
end
rule '.o' => '.cpp' do |t|
- sh "g++ #{t.source} -c -o #{t.name}"
+ sh "g++ -I/opt/local/include #{t.source} -c -o #{t.name}"
end
-%w(copy_constructors references).each do |exe|
+%w(copy_constructors references auto_ptr boost_ptr).each do |exe|
file exe => "#{exe}.o" do
- sh "g++ -o #{exe} #{exe}.o"
+ sh "g++ -I/opt/local/include -o #{exe} #{exe}.o"
end
task :all => exe
task :clean do
sh "rm -f #{exe}"
end
end
diff --git a/auto_ptr.cpp b/auto_ptr.cpp
new file mode 100644
index 0000000..d59050e
--- /dev/null
+++ b/auto_ptr.cpp
@@ -0,0 +1,23 @@
+#include <memory>
+#include <iostream>
+
+class Foo {
+public:
+ ~Foo() { std::cout << "go bye bye\n"; }
+ void greet() { std::cout << "hello!\n"; }
+};
+
+int main(int argc, char* argv[]) {
+
+ std::auto_ptr<Foo> a(new Foo());
+ std::auto_ptr<Foo> b;
+
+ b = a;
+
+ b->greet();
+
+ std::cout << a.get() << "\n"; // NULL
+ std::cout << b.get() << "\n"; // not null
+
+ return 0;
+}
\ No newline at end of file
diff --git a/boost_ptr.cpp b/boost_ptr.cpp
new file mode 100644
index 0000000..18ad466
--- /dev/null
+++ b/boost_ptr.cpp
@@ -0,0 +1,62 @@
+#include <iostream>
+#include <boost/scoped_ptr.hpp>
+#include <boost/scoped_array.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <boost/weak_ptr.hpp>
+#include <boost/intrusive_ptr.hpp>
+
+class Foo {
+public:
+ ~Foo() { std::cout << "byebye\n"; }
+ void greet() { std::cout << "hello!\n"; }
+};
+
+void scoped_ptr() {
+ // The primary reason to use scoped_ptr rather than auto_ptr is
+ // to let readers of your code know that you intend "resource
+ // acquisition is initialization" to be applied only for the
+ // current scope, and have no intent to transfer ownership.
+ //
+ // A secondary reason to use scoped_ptr is to prevent a later
+ // maintenance programmer from adding a function that transfers
+ // ownership by returning the auto_ptr, because the maintenance
+ // programmer saw auto_ptr, and assumed ownership could safely
+ // be transferred.
+
+ std::cout << "\nscoped ptr\n";
+
+ boost::scoped_ptr<Foo> a(new Foo());
+ boost::scoped_ptr<Foo> b(new Foo());
+ b.reset();
+
+ boost::scoped_ptr<Foo> c;
+ // c = a; - won't compile as assignment is private
+
+ a->greet();
+}
+
+void scoped_array() {
+ std::cout << "\nscoped array\n";
+
+ boost::scoped_array<Foo> d(new Foo[10]);
+}
+
+void shared_ptr() {
+ std::cout << "\nshared ptr\n";
+
+ boost::shared_ptr<Foo> e(new Foo);
+ boost::shared_ptr<Foo> f(e);
+
+ if (e == f) {
+ std::cout << "equal!\n";
+ }
+}
+
+int main(int argc, char* argv[]) {
+ scoped_ptr();
+ scoped_array();
+ shared_ptr();
+
+ return 0;
+}
|
jaz303/exp-cpp | 23a8e2f18e262fa51a9e709365c5606ba7ea114e | initial revision | diff --git a/Rakefile b/Rakefile
new file mode 100644
index 0000000..41cecdb
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1,18 @@
+
+task :clean do
+ sh "rm -f *.o"
+end
+
+rule '.o' => '.cpp' do |t|
+ sh "g++ #{t.source} -c -o #{t.name}"
+end
+
+%w(copy_constructors references).each do |exe|
+ file exe => "#{exe}.o" do
+ sh "g++ -o #{exe} #{exe}.o"
+ end
+ task :all => exe
+ task :clean do
+ sh "rm -f #{exe}"
+ end
+end
diff --git a/copy_constructors.cpp b/copy_constructors.cpp
new file mode 100644
index 0000000..08a6c91
--- /dev/null
+++ b/copy_constructors.cpp
@@ -0,0 +1,51 @@
+#include <iostream>
+
+using namespace std;
+
+class A {
+public:
+ A() {}
+ A(const A& a) { cout << "copy A" << endl; }
+ virtual A* clone() { return new A(*this); }
+};
+
+class B : public A {
+public:
+ B() {}
+ B(const B& b) : A(b) { cout << "copy B" << endl; }
+ B* clone() { return new B(*this); }
+};
+
+void foo(A& ref) {
+ cout << "enter foo, expecting both A & B's copy constructors to be called:" << endl;
+ A* o1 = ref.clone();
+ cout << "exit foo" << endl;
+}
+
+void bar(A& ref) {
+ cout << "enter bar, expecting only A's copy constructor to be called" << endl;
+ A other = ref;
+ cout << "exit bar" << endl;
+}
+
+void baz(A val) {
+ cout << "enter baz, expecting only A's copy constructor to be called" << endl;
+ A other = val;
+ cout << "exit baz" << endl;
+}
+
+int main() {
+
+ B bleem;
+
+ foo(bleem);
+ cout << endl;
+
+ bar(bleem);
+ cout << endl;
+
+ baz(bleem);
+
+ return 0;
+
+}
\ No newline at end of file
diff --git a/references.cpp b/references.cpp
new file mode 100644
index 0000000..b510078
--- /dev/null
+++ b/references.cpp
@@ -0,0 +1,40 @@
+#include <iostream>
+
+using namespace std;
+
+class A {
+public:
+ A() { cout << "constructing A" << endl; }
+ A(const A& a) { cout << "copying A, shouldn't see me" << endl; }
+ virtual ~A() {}
+ virtual void foo() { cout << "A speaks!" << endl; }
+};
+
+class B : public A {
+public:
+ B() { cout << "constructing B" << endl; }
+ B(const B& b) : A(b) { cout << "copying B, shouldn't see me" << endl; }
+ void foo() { cout << "B speaks!" << endl; }
+};
+
+class C : public B {
+public:
+ C() { cout << "constructing C" << endl; }
+ C(const C& c) : B(c) { cout << "copying C, shouldn't see me" << endl; }
+ void foo() { cout << "C speaks!" << endl; }
+};
+
+void foo(A &arg) {
+
+ arg.foo();
+
+ A &other = arg;
+ other.foo();
+
+}
+
+int main(int argc, char* argv[]) {
+ A* thing = new C();
+ foo(*thing);
+ delete thing;
+}
|
vim-scripts/java_apidoc.vim | c15a4a138a092e87c3836709e1d165a2680f24bc | Version 3.1.1 | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 0868288..bf3b3b4 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,164 +1,164 @@
-" java_apidoc.vim v3.0 by Paul Etchells <[email protected]>
+" java_apidoc.vim v3.1.1 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc generated API for an imported package,
" or for a class name under the cursor. Uses the index files generated by
" javadoc to quickly search for the html page, and a Vim buffer to store them
-" whilst its searching.
+" whilst it's searching.
"
" Tested and working on GVim >= 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
"
" CONFIGURATION - BROWSER
" Uncomment for whichever browser you want to use on your system.
"-----------------------------------------------------------------------------
let browser="dillo"
"let browser="xterm -e lynx"
"let browser="opera"
" Windows users....
"let browser="explorer"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - JAVA API PATH
" Set the javadoc_path variable to a comma separated list of paths to
" the tops of the Javadoc trees. Change these to match your api locations.
"-----------------------------------------------------------------------------
let javadoc_path="/home/etch/www/dox/java/j2se1.4.2/api,/opt/j2sdkee1.3.1/doc/api,/home/etch/www/dox/java/java-gnome-2.6.0.1/doc-core/api"
" Windows users....
"let javadoc_path="C:\\j2se1.4.2\\doc\\api"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - KEY ASSIGNMENT
"-----------------------------------------------------------------------------
nmap <F2> :call OpenJavadoc()<CR><CR>
"-----------------------------------------------------------------------------
"
" Avoid loading this function twice
if exists("loaded_java_apidoc") || &cp
finish
endif
let loaded_java_apidoc = 1
if has("win32")
let s:slash = '\'
else
let s:slash = '/'
endif
function! OpenJavadoc()
let line = getline(".")
let bufhidden = getbufvar(bufnr("%"), "&hidden")
let regex = '^import\s\+\(\S\+\);$'
let l = matchstr(line, regex)
let file = substitute(l, regex, '\1', '')
let null = ''
let s:found = 0
let classname = expand("<cword>")
call setbufvar(bufnr("%"), "&hidden", 1)
let file = substitute(file, '\.', s:slash, 'g')
let javapath = g:javadoc_path
let regex = "^[^,]*"
let trimmed_classname = substitute(classname, '\W', '', 'g')
if (strlen(trimmed_classname) < 1)
echo "Class name is too short"
return
endif
while (strlen(javapath))
let path = s:java_apidoc_getFirstPathElement(javapath, regex)
let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
let lfile = path . s:slash . file . ".html"
if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
let lfile = substitute(lfile, "\*\.html$", "", "")
if (isdirectory(expand(lfile)))
if has("win32")
let lfile = substitute(lfile, '/', '\', 'g')
let null = system(g:browser.' '.lfile)
else
let null = system(g:browser.' '.lfile.' &')
endif
let s:found = s:found + 1
endif
elseif (filereadable(expand(lfile)))
if has("win32")
let lfile = substitute(lfile, '/', '\', 'g')
let null = system(g:browser.' '.lfile)
else
let null = system(g:browser.' '.lfile.' &')
endif
let s:found = s:found + 1
break
endif
endwhile
if (s:found == 0)
" Couldn't find the file directly, so search the allclasses
" files for a match, which incidentally, is the target name.
" Loop through the path elements
let javapath = g:javadoc_path
while (strlen(javapath))
let path = s:java_apidoc_getFirstPathElement(javapath, regex)
let allclasses = path.s:slash.'allclasses-noframe.html'
if (filereadable(allclasses))
call s:java_apidoc_search(allclasses, path, classname)
else
let allclasses = path.s:slash.'allclasses-frame.html'
if (filereadable(allclasses))
call s:java_apidoc_search(allclasses, path, classname)
endif
endif
let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
endwhile
endif
call setbufvar(bufnr("%"), "&hidden", bufhidden)
endfunction
function! s:java_apidoc_search(allclasses, path, classname)
let curr_buf = bufnr('%')
let html_buf = 0
execute(':view +0 '.a:allclasses)
call s:java_apidoc_searchHtml(a:path, a:classname)
let html_buf = bufnr('%')
execute(':b '.curr_buf)
execute(':bd '.html_buf)
endfunction
function! s:java_apidoc_searchHtml(path, classname)
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
while (lineno > 0)
let fpath = substitute(getline("."), '<A HREF="\([^ ]*\)".*$', '\1', 'i')
if has("win32")
let fpath = substitute(fpath, '/', '\', 'g')
let null = system(g:browser.' '.a:path.s:slash.fpath)
else
let null = system(g:browser.' '.a:path.s:slash.fpath.' &')
endif
let s:found = s:found + 1
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
endwhile
endfunction
" Return everything up to the first regex in a path
function! s:java_apidoc_getFirstPathElement(path, regex)
let lpath = matchstr(a:path, a:regex)
return lpath
endfunction
" Remove everything up to the first "," in a path
function! s:java_apidoc_removeFirstPathElement(path, regex)
let lpath = a:path
let lregex = a:regex
let lpath = substitute(lpath, lregex, "", "")
let lpath = substitute(lpath, "^,", "", "")
return lpath
endfunction
|
vim-scripts/java_apidoc.vim | 2e9721eacb76a6270e8330fdcdbe2b974bb680ee | Version 3.1: Improved Win32 compatability. Thanks to W. H. Jou for the patch. | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 4c22300..0868288 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,154 +1,164 @@
" java_apidoc.vim v3.0 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc generated API for an imported package,
" or for a class name under the cursor. Uses the index files generated by
" javadoc to quickly search for the html page, and a Vim buffer to store them
" whilst its searching.
"
" Tested and working on GVim >= 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
"
" CONFIGURATION - BROWSER
" Uncomment for whichever browser you want to use on your system.
"-----------------------------------------------------------------------------
let browser="dillo"
"let browser="xterm -e lynx"
"let browser="opera"
" Windows users....
"let browser="explorer"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - JAVA API PATH
" Set the javadoc_path variable to a comma separated list of paths to
" the tops of the Javadoc trees. Change these to match your api locations.
"-----------------------------------------------------------------------------
-let javadoc_path="/home/etch/www/dox/java/j2se1.4.2/api,/opt/j2sdkee1.3.1/doc/api,/home/etch/www/dox/java/java-gnome-0.8.3/doc-core/api"
+let javadoc_path="/home/etch/www/dox/java/j2se1.4.2/api,/opt/j2sdkee1.3.1/doc/api,/home/etch/www/dox/java/java-gnome-2.6.0.1/doc-core/api"
" Windows users....
"let javadoc_path="C:\\j2se1.4.2\\doc\\api"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - KEY ASSIGNMENT
"-----------------------------------------------------------------------------
nmap <F2> :call OpenJavadoc()<CR><CR>
"-----------------------------------------------------------------------------
"
" Avoid loading this function twice
if exists("loaded_java_apidoc") || &cp
finish
endif
let loaded_java_apidoc = 1
if has("win32")
let s:slash = '\'
else
let s:slash = '/'
endif
function! OpenJavadoc()
let line = getline(".")
let bufhidden = getbufvar(bufnr("%"), "&hidden")
let regex = '^import\s\+\(\S\+\);$'
let l = matchstr(line, regex)
let file = substitute(l, regex, '\1', '')
let null = ''
let s:found = 0
let classname = expand("<cword>")
call setbufvar(bufnr("%"), "&hidden", 1)
let file = substitute(file, '\.', s:slash, 'g')
let javapath = g:javadoc_path
let regex = "^[^,]*"
let trimmed_classname = substitute(classname, '\W', '', 'g')
if (strlen(trimmed_classname) < 1)
echo "Class name is too short"
return
endif
while (strlen(javapath))
let path = s:java_apidoc_getFirstPathElement(javapath, regex)
let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
let lfile = path . s:slash . file . ".html"
if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
let lfile = substitute(lfile, "\*\.html$", "", "")
if (isdirectory(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
+ if has("win32")
+ let lfile = substitute(lfile, '/', '\', 'g')
+ let null = system(g:browser.' '.lfile)
+ else
+ let null = system(g:browser.' '.lfile.' &')
+ endif
let s:found = s:found + 1
endif
elseif (filereadable(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
+ if has("win32")
+ let lfile = substitute(lfile, '/', '\', 'g')
+ let null = system(g:browser.' '.lfile)
+ else
+ let null = system(g:browser.' '.lfile.' &')
+ endif
let s:found = s:found + 1
break
endif
endwhile
if (s:found == 0)
" Couldn't find the file directly, so search the allclasses
" files for a match, which incidentally, is the target name.
" Loop through the path elements
let javapath = g:javadoc_path
while (strlen(javapath))
let path = s:java_apidoc_getFirstPathElement(javapath, regex)
let allclasses = path.s:slash.'allclasses-noframe.html'
if (filereadable(allclasses))
call s:java_apidoc_search(allclasses, path, classname)
else
let allclasses = path.s:slash.'allclasses-frame.html'
if (filereadable(allclasses))
call s:java_apidoc_search(allclasses, path, classname)
endif
endif
let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
endwhile
endif
call setbufvar(bufnr("%"), "&hidden", bufhidden)
endfunction
function! s:java_apidoc_search(allclasses, path, classname)
let curr_buf = bufnr('%')
let html_buf = 0
execute(':view +0 '.a:allclasses)
call s:java_apidoc_searchHtml(a:path, a:classname)
let html_buf = bufnr('%')
execute(':b '.curr_buf)
execute(':bd '.html_buf)
endfunction
function! s:java_apidoc_searchHtml(path, classname)
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
while (lineno > 0)
let fpath = substitute(getline("."), '<A HREF="\([^ ]*\)".*$', '\1', 'i')
if has("win32")
let fpath = substitute(fpath, '/', '\', 'g')
let null = system(g:browser.' '.a:path.s:slash.fpath)
else
let null = system(g:browser.' '.a:path.s:slash.fpath.' &')
endif
let s:found = s:found + 1
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
endwhile
endfunction
" Return everything up to the first regex in a path
function! s:java_apidoc_getFirstPathElement(path, regex)
let lpath = matchstr(a:path, a:regex)
return lpath
endfunction
" Remove everything up to the first "," in a path
function! s:java_apidoc_removeFirstPathElement(path, regex)
let lpath = a:path
let lregex = a:regex
let lpath = substitute(lpath, lregex, "", "")
let lpath = substitute(lpath, "^,", "", "")
return lpath
endfunction
|
vim-scripts/java_apidoc.vim | 96bd735cabc0d800fc180a0ab2c2b1ca0516978c | Version 3.0: Now same script works on 'nix and win32. | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 6d633e8..4c22300 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,142 +1,154 @@
-" java_apidoc.vim v1.2 by Paul Etchells <[email protected]>
+" java_apidoc.vim v3.0 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc generated API for an imported package,
" or for a class name under the cursor. Uses the index files generated by
" javadoc to quickly search for the html page, and a Vim buffer to store them
" whilst its searching.
"
" Tested and working on GVim >= 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
"
" CONFIGURATION - BROWSER
" Uncomment for whichever browser you want to use on your system.
"-----------------------------------------------------------------------------
-"let browser="dillo"
+let browser="dillo"
"let browser="xterm -e lynx"
"let browser="opera"
-let browser="htmlview"
+" Windows users....
+"let browser="explorer"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - JAVA API PATH
" Set the javadoc_path variable to a comma separated list of paths to
" the tops of the Javadoc trees. Change these to match your api locations.
"-----------------------------------------------------------------------------
let javadoc_path="/home/etch/www/dox/java/j2se1.4.2/api,/opt/j2sdkee1.3.1/doc/api,/home/etch/www/dox/java/java-gnome-0.8.3/doc-core/api"
+" Windows users....
+"let javadoc_path="C:\\j2se1.4.2\\doc\\api"
"-----------------------------------------------------------------------------
"
" CONFIGURATION - KEY ASSIGNMENT
"-----------------------------------------------------------------------------
nmap <F2> :call OpenJavadoc()<CR><CR>
"-----------------------------------------------------------------------------
"
" Avoid loading this function twice
if exists("loaded_java_apidoc") || &cp
- finish
+ finish
endif
let loaded_java_apidoc = 1
-function! OpenJavadoc()
- let line = getline(".")
- let bufhidden = getbufvar(bufnr("%"), "&hidden")
- let regex = '^import\s\+\(\S\+\);$'
- let l = matchstr(line, regex)
- let file = substitute(l, regex, '\1', '')
- let null = ''
- let s:found = 0
- let classname = expand("<cword>")
- call setbufvar(bufnr("%"), "&hidden", 1)
+if has("win32")
+ let s:slash = '\'
+else
+ let s:slash = '/'
+endif
- let file = substitute(file, '\.', '/', 'g')
+function! OpenJavadoc()
+ let line = getline(".")
+ let bufhidden = getbufvar(bufnr("%"), "&hidden")
+ let regex = '^import\s\+\(\S\+\);$'
+ let l = matchstr(line, regex)
+ let file = substitute(l, regex, '\1', '')
+ let null = ''
+ let s:found = 0
+ let classname = expand("<cword>")
+ call setbufvar(bufnr("%"), "&hidden", 1)
- let javapath = g:javadoc_path
- let regex = "^[^,]*"
+ let file = substitute(file, '\.', s:slash, 'g')
- let trimmed_classname = substitute(classname, '\W', '', 'g')
- if (strlen(trimmed_classname) < 1)
- echo "Class name is too short"
- return
- endif
- while (strlen(javapath))
- let path = s:java_apidoc_getFirstPathElement(javapath, regex)
+ let javapath = g:javadoc_path
+ let regex = "^[^,]*"
- let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
- let lfile = path . "/" . file . ".html"
+ let trimmed_classname = substitute(classname, '\W', '', 'g')
+ if (strlen(trimmed_classname) < 1)
+ echo "Class name is too short"
+ return
+ endif
+ while (strlen(javapath))
+ let path = s:java_apidoc_getFirstPathElement(javapath, regex)
+ let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
+ let lfile = path . s:slash . file . ".html"
+ if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
+ let lfile = substitute(lfile, "\*\.html$", "", "")
+ if (isdirectory(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ endif
+ elseif (filereadable(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ break
+ endif
+ endwhile
- if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
- let lfile = substitute(lfile, "\*\.html$", "", "")
- if (isdirectory(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- let s:found = s:found + 1
- endif
- elseif (filereadable(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- let s:found = s:found + 1
- break
- endif
- endwhile
+ if (s:found == 0)
+ " Couldn't find the file directly, so search the allclasses
+ " files for a match, which incidentally, is the target name.
- if (s:found == 0)
- " Couldn't find the file directly, so search the allclasses
- " files for a match, which incidentally, is the target name.
-
- " Loop through the path elements
- let javapath = g:javadoc_path
- while (strlen(javapath))
- let path = s:java_apidoc_getFirstPathElement(javapath, regex)
- let allclasses = path.'/allclasses-noframe.html'
- if (filereadable(allclasses))
- call s:java_apidoc_search(allclasses, path, classname)
- else
- let allclasses = path.'/allclasses-frame.html'
- if (filereadable(allclasses))
- call s:java_apidoc_search(allclasses, path, classname)
- endif
- endif
- let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
- endwhile
- endif
+ " Loop through the path elements
+ let javapath = g:javadoc_path
+ while (strlen(javapath))
+ let path = s:java_apidoc_getFirstPathElement(javapath, regex)
+ let allclasses = path.s:slash.'allclasses-noframe.html'
+ if (filereadable(allclasses))
+ call s:java_apidoc_search(allclasses, path, classname)
+ else
+ let allclasses = path.s:slash.'allclasses-frame.html'
+ if (filereadable(allclasses))
+ call s:java_apidoc_search(allclasses, path, classname)
+ endif
+ endif
+ let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
+ endwhile
+ endif
- call setbufvar(bufnr("%"), "&hidden", bufhidden)
+ call setbufvar(bufnr("%"), "&hidden", bufhidden)
endfunction
function! s:java_apidoc_search(allclasses, path, classname)
- let curr_buf = bufnr('%')
- let html_buf = 0
- execute(':view +0 '.a:allclasses)
- call s:java_apidoc_searchHtml(a:path, a:classname)
- let html_buf = bufnr('%')
- execute(':b '.curr_buf)
- execute(':bd '.html_buf)
+ let curr_buf = bufnr('%')
+ let html_buf = 0
+ execute(':view +0 '.a:allclasses)
+ call s:java_apidoc_searchHtml(a:path, a:classname)
+ let html_buf = bufnr('%')
+ execute(':b '.curr_buf)
+ execute(':bd '.html_buf)
endfunction
function! s:java_apidoc_searchHtml(path, classname)
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
while (lineno > 0)
let fpath = substitute(getline("."), '<A HREF="\([^ ]*\)".*$', '\1', 'i')
- let null = system(g:browser.' '.a:path.'/'.fpath.' &')
+ if has("win32")
+ let fpath = substitute(fpath, '/', '\', 'g')
+ let null = system(g:browser.' '.a:path.s:slash.fpath)
+ else
+ let null = system(g:browser.' '.a:path.s:slash.fpath.' &')
+ endif
let s:found = s:found + 1
let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
endwhile
endfunction
" Return everything up to the first regex in a path
function! s:java_apidoc_getFirstPathElement(path, regex)
- let lpath = matchstr(a:path, a:regex)
- return lpath
+ let lpath = matchstr(a:path, a:regex)
+ return lpath
endfunction
" Remove everything up to the first "," in a path
function! s:java_apidoc_removeFirstPathElement(path, regex)
- let lpath = a:path
- let lregex = a:regex
- let lpath = substitute(lpath, lregex, "", "")
- let lpath = substitute(lpath, "^,", "", "")
- return lpath
+ let lpath = a:path
+ let lregex = a:regex
+ let lpath = substitute(lpath, lregex, "", "")
+ let lpath = substitute(lpath, "^,", "", "")
+ return lpath
endfunction
|
vim-scripts/java_apidoc.vim | b2d1873428a9bb3349364b6df78b294af4a3315b | Version 1.2 | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 8fb7b8b..6d633e8 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,145 +1,142 @@
-" java_apidoc.vim v1.0 by Paul Etchells <[email protected]>
+" java_apidoc.vim v1.2 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
-" Opens a browser showing the Javadoc for the imported package on the same
-" line as the cursor, or for the class name under the cursor.
+" Opens a browser showing the Javadoc generated API for an imported package,
+" or for a class name under the cursor. Uses the index files generated by
+" javadoc to quickly search for the html page, and a Vim buffer to store them
+" whilst its searching.
"
-" USE
-" Default install is activated with the <F2> key and destroys the j register.
-" Tested and working on GVim 6.1 on Linux.
+" Tested and working on GVim >= 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
-" 2) (gVIM) Add the following lines to ~/.vimrc
-"-----------------------------------------------------------------------------
-" au FileType java let browser="xterm --geometry 100x40 -e lynx"
-" au FileType java let java_api_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
-" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
-"-----------------------------------------------------------------------------
-"
-" (Note: is CTRL-V CTRL-R, is CTRL-V CTRL-M)
"
" CONFIGURATION - BROWSER
-" If you don't want to use xterm and lynx to show the help, you can change
-" 'browser="..."' to whatever browser is on your system.
-" e.g. To browse with Opera
+" Uncomment for whichever browser you want to use on your system.
"-----------------------------------------------------------------------------
-" au FileType java let browser="opera"
+"let browser="dillo"
+"let browser="xterm -e lynx"
+"let browser="opera"
+let browser="htmlview"
"-----------------------------------------------------------------------------
"
-" If you try this with Netscape it may complain about a 'lock' file, since it
-" tries to start a new instance of the browser for each page found. Of course,
-" you can just say 'OK' at the dialogue, but it gets irritating. Mozilla tries
-" to start a new instance with a different profile, so neither of these
-" browsers are usable with this macro :o(
-"
" CONFIGURATION - JAVA API PATH
-" Just set the java_api_path variable to a comma separated list of paths to
-" the tops of the Javadoc trees.
+" Set the javadoc_path variable to a comma separated list of paths to
+" the tops of the Javadoc trees. Change these to match your api locations.
+"-----------------------------------------------------------------------------
+let javadoc_path="/home/etch/www/dox/java/j2se1.4.2/api,/opt/j2sdkee1.3.1/doc/api,/home/etch/www/dox/java/java-gnome-0.8.3/doc-core/api"
+"-----------------------------------------------------------------------------
"
-" CONFIGURATION - KEY AND REGISTER ASSIGNMENT
-" The third line uses the <F2> key to start the macro, and the j register to
-" act as a temporary visual buffer for getting the word under the cursor into
-" the function call. Change these if you use them for something else.
-"
-" CAVEAT
-" It can be pretty slow when looking for a class name and your java_api_path
-" contains a lot of files.
+" CONFIGURATION - KEY ASSIGNMENT
+"-----------------------------------------------------------------------------
+nmap <F2> :call OpenJavadoc()<CR><CR>
+"-----------------------------------------------------------------------------
+"
+" Avoid loading this function twice
+if exists("loaded_java_apidoc") || &cp
+ finish
+endif
-" Set this to 0 if you really want all the class_use files as well.
-let s:skip_class_use = 1
+let loaded_java_apidoc = 1
-function! OpenJavadoc(classname)
- let line = getline(".")
- let regex = '^import\s\+\(\S\+\);$'
- let l = matchstr(line, regex)
- let file = substitute(l, regex, '\1', '')
- let null = ''
- let s:found = 0
+function! OpenJavadoc()
+ let line = getline(".")
+ let bufhidden = getbufvar(bufnr("%"), "&hidden")
+ let regex = '^import\s\+\(\S\+\);$'
+ let l = matchstr(line, regex)
+ let file = substitute(l, regex, '\1', '')
+ let null = ''
+ let s:found = 0
+ let classname = expand("<cword>")
+ call setbufvar(bufnr("%"), "&hidden", 1)
- let file = substitute(file, '\.', '/', 'g')
+ let file = substitute(file, '\.', '/', 'g')
- let javapath = g:javadoc_path
- let regex = "^[^,]*"
- let trimmed_classname = substitute(a:classname, '\W', '', 'g')
- if (strlen(trimmed_classname) < 1)
- echo "Class name is too short"
- return
- endif
- while (strlen(javapath))
- let path = GetFirstPathElement(javapath, regex)
- let javapath = RemoveFirstPathElement(javapath, regex)
- let lfile = path . "/" . file . ".html"
- if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
- let lfile = substitute(lfile, "\*\.html$", "", "")
- if (isdirectory(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- let s:found = s:found + 1
- endif
- elseif (filereadable(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- let s:found = s:found + 1
- break
- endif
- endwhile
+ let javapath = g:javadoc_path
+ let regex = "^[^,]*"
- if (s:found == 0)
- " Couldn't find the file directly, so do the equivalent of a system find
- " on each path element and sub-directory.
+ let trimmed_classname = substitute(classname, '\W', '', 'g')
+ if (strlen(trimmed_classname) < 1)
+ echo "Class name is too short"
+ return
+ endif
+ while (strlen(javapath))
+ let path = s:java_apidoc_getFirstPathElement(javapath, regex)
- " Loop through the given path elements
- let javapath = g:javadoc_path
- while (strlen(javapath))
- let path = GetFirstPathElement(javapath, regex)
- call FindTarget(path, a:classname.".html")
- let javapath = RemoveFirstPathElement(javapath, regex)
- endwhile
- endif
+ let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
+ let lfile = path . "/" . file . ".html"
- if (s:found == 1)
- echo "Found 1 page"
- else
- echo "Found ".s:found." pages"
- endif
+ if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
+ let lfile = substitute(lfile, "\*\.html$", "", "")
+ if (isdirectory(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ endif
+ elseif (filereadable(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ break
+ endif
+ endwhile
+
+ if (s:found == 0)
+ " Couldn't find the file directly, so search the allclasses
+ " files for a match, which incidentally, is the target name.
+
+ " Loop through the path elements
+ let javapath = g:javadoc_path
+ while (strlen(javapath))
+ let path = s:java_apidoc_getFirstPathElement(javapath, regex)
+ let allclasses = path.'/allclasses-noframe.html'
+ if (filereadable(allclasses))
+ call s:java_apidoc_search(allclasses, path, classname)
+ else
+ let allclasses = path.'/allclasses-frame.html'
+ if (filereadable(allclasses))
+ call s:java_apidoc_search(allclasses, path, classname)
+ endif
+ endif
+ let javapath = s:java_apidoc_removeFirstPathElement(javapath, regex)
+ endwhile
+ endif
+
+ call setbufvar(bufnr("%"), "&hidden", bufhidden)
endfunction
+function! s:java_apidoc_search(allclasses, path, classname)
+ let curr_buf = bufnr('%')
+ let html_buf = 0
+ execute(':view +0 '.a:allclasses)
+ call s:java_apidoc_searchHtml(a:path, a:classname)
+ let html_buf = bufnr('%')
+ execute(':b '.curr_buf)
+ execute(':bd '.html_buf)
+endfunction
-" Get every file within the path and see if it looks like the target.
-" If a directory is found then this function is called recursively.
-function! FindTarget(path, target)
- let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
- let null = ''
- while (strlen(findlist))
- let fpath = GetFirstPathElement(findlist, "[^,]*")
- if (match(fpath, "/class-use/") > -1 && s:skip_class_use)
- break
- endif
- let findlist = substitute(findlist, "[^,]*,", "", "")
- if (isdirectory(fpath))
- call FindTarget(fpath, a:target)
- else
- if (match(fpath, '/'.a:target) > -1)
- let null = system(g:browser.' '.fpath.' &')
- let s:found = s:found + 1
- endif
- endif
+function! s:java_apidoc_searchHtml(path, classname)
+ let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
+ while (lineno > 0)
+ let fpath = substitute(getline("."), '<A HREF="\([^ ]*\)".*$', '\1', 'i')
+ let null = system(g:browser.' '.a:path.'/'.fpath.' &')
+ let s:found = s:found + 1
+ let lineno = search('<a href="[a-z/]*/'.a:classname.'\.html\c', 'W')
endwhile
endfunction
" Return everything up to the first regex in a path
-function! GetFirstPathElement(path, regex)
- let lpath = matchstr(a:path, a:regex)
- return lpath
+function! s:java_apidoc_getFirstPathElement(path, regex)
+ let lpath = matchstr(a:path, a:regex)
+ return lpath
endfunction
" Remove everything up to the first "," in a path
-function! RemoveFirstPathElement(path, regex)
- let lpath = a:path
- let lregex = a:regex
- let lpath = substitute(lpath, lregex, "", "")
- let lpath = substitute(lpath, "^,", "", "")
- return lpath
+function! s:java_apidoc_removeFirstPathElement(path, regex)
+ let lpath = a:path
+ let lregex = a:regex
+ let lpath = substitute(lpath, lregex, "", "")
+ let lpath = substitute(lpath, "^,", "", "")
+ return lpath
endfunction
|
vim-scripts/java_apidoc.vim | 2d8432fd5c602b78a312074e4ca5afdf200ed6e6 | Version 1.1 | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 362063f..8fb7b8b 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,164 +1,145 @@
" java_apidoc.vim v1.0 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc for the imported package on the same
" line as the cursor, or for the class name under the cursor.
"
" USE
" Default install is activated with the <F2> key and destroys the j register.
" Tested and working on GVim 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
" 2) (gVIM) Add the following lines to ~/.vimrc
"-----------------------------------------------------------------------------
" au FileType java let browser="xterm --geometry 100x40 -e lynx"
-" au FileType java let javadoc_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
-" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
+" au FileType java let java_api_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
+" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
"-----------------------------------------------------------------------------
"
-" (Note: is CTRL-V CTRL-R,
is CTRL-V CTRL-M)
+" (Note: is CTRL-V CTRL-R, is CTRL-V CTRL-M)
"
" CONFIGURATION - BROWSER
" If you don't want to use xterm and lynx to show the help, you can change
" 'browser="..."' to whatever browser is on your system.
" e.g. To browse with Opera
"-----------------------------------------------------------------------------
" au FileType java let browser="opera"
"-----------------------------------------------------------------------------
"
" If you try this with Netscape it may complain about a 'lock' file, since it
" tries to start a new instance of the browser for each page found. Of course,
" you can just say 'OK' at the dialogue, but it gets irritating. Mozilla tries
" to start a new instance with a different profile, so neither of these
" browsers are usable with this macro :o(
"
" CONFIGURATION - JAVA API PATH
-" Just set the javadoc_path variable to a comma separated list of paths to
+" Just set the java_api_path variable to a comma separated list of paths to
" the tops of the Javadoc trees.
"
" CONFIGURATION - KEY AND REGISTER ASSIGNMENT
" The third line uses the <F2> key to start the macro, and the j register to
" act as a temporary visual buffer for getting the word under the cursor into
" the function call. Change these if you use them for something else.
"
" CAVEAT
-" It can be pretty slow when looking for a class name and your javadoc_path
+" It can be pretty slow when looking for a class name and your java_api_path
" contains a lot of files.
-"amended to work for Win environment"
+" Set this to 0 if you really want all the class_use files as well.
+let s:skip_class_use = 1
+
function! OpenJavadoc(classname)
- call Debug("classname = " . a:classname)
- let line = getline(".")
- call Debug ("line = " . line)
- let regex = '^import\s\+\(\S\+\);$'
- call Debug ("regex = " . regex)
- let l = matchstr(line, regex)
- call Debug ("l = " . l)
- let file = substitute(l, regex, '\1', '')
- call Debug ("file = " . file)
- let null = ''
+ let line = getline(".")
+ let regex = '^import\s\+\(\S\+\);$'
+ let l = matchstr(line, regex)
+ let file = substitute(l, regex, '\1', '')
+ let null = ''
+ let s:found = 0
+
+ let file = substitute(file, '\.', '/', 'g')
+
+ let javapath = g:javadoc_path
+ let regex = "^[^,]*"
+ let trimmed_classname = substitute(a:classname, '\W', '', 'g')
+ if (strlen(trimmed_classname) < 1)
+ echo "Class name is too short"
+ return
+ endif
+ while (strlen(javapath))
+ let path = GetFirstPathElement(javapath, regex)
+ let javapath = RemoveFirstPathElement(javapath, regex)
+ let lfile = path . "/" . file . ".html"
+ if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
+ let lfile = substitute(lfile, "\*\.html$", "", "")
+ if (isdirectory(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ endif
+ elseif (filereadable(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ let s:found = s:found + 1
+ break
+ endif
+ endwhile
- let file = substitute(file, '\.', '/', 'g')
- call Debug ("file = " . file)
- let javapath = g:javadoc_path
- let regex = "^[^,]*"
-
- call Debug ("javapath = " . javapath)
-
- if strlen(file) > 0
- while (strlen(javapath))
- let path = GetFirstPathElement(javapath, regex)
- call Debug ("path = " . path)
- let javapath = RemoveFirstPathElement(javapath, regex)
- call Debug ("javapath = " . javapath)
- let lfile = path . "/" . file . ".html"
- call Debug ("lfile = " . lfile)
+ if (s:found == 0)
+ " Couldn't find the file directly, so do the equivalent of a system find
+ " on each path element and sub-directory.
- if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
- let lfile = substitute(lfile, "\*\.html$", "", "")
- call Debug ("lfile = " . lfile)
- if (isdirectory(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- endif
- elseif (filereadable(expand(lfile)))
- call Debug ("lfile = " . lfile)
- let null = system('"'.g:browser.'" '.lfile)
- let null = 'found file already'
- break
- endif
- endwhile
- else
- call Debug("file = ''. skipping to look for classname.html")
- endif
+ " Loop through the given path elements
+ let javapath = g:javadoc_path
+ while (strlen(javapath))
+ let path = GetFirstPathElement(javapath, regex)
+ call FindTarget(path, a:classname.".html")
+ let javapath = RemoveFirstPathElement(javapath, regex)
+ endwhile
+ endif
- call Debug("null = " . null)
- call Debug("strlen(null) = " . strlen(null) )
- if (strlen(null) == 0)
- call Debug("looking for classname.html")
- " Couldn't find the file directly, so do the equivalent of a system find
- " on each path element and sub-directory.
-
- " Loop through the given path elements
- let javapath = g:javadoc_path
- call Debug("javapath = " . javapath)
- while (strlen(javapath))
- let path = GetFirstPathElement(javapath, regex)
- call Debug("path = " . path)
- call FindTarget(path, a:classname.".html")
- let javapath = RemoveFirstPathElement(javapath, regex)
- call Debug("javapath = " . javapath )
- endwhile
- endif
-" let null = system('"'.g:browser.'" '.'C:/j2sdk1.4.0_01/docs/api/java/lang/System.html' )
- call Debug ("Done")
+ if (s:found == 1)
+ echo "Found 1 page"
+ else
+ echo "Found ".s:found." pages"
+ endif
- return file
endfunction
" Get every file within the path and see if it looks like the target.
" If a directory is found then this function is called recursively.
function! FindTarget(path, target)
-" call Debug("FindTarget+")
- call Debug("looking for " . a:target . " in " . a:path)
- let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
- call Debug("findlist = " . findlist )
- let null = ''
- while (strlen(findlist))
- let fpath = GetFirstPathElement(findlist, "[^,]*")
-" call Debug("fpath = " . fpath )
- let findlist = substitute(findlist, "[^,]*,", "", "")
-" call Debug("findlist = " . findlist )
- if (isdirectory(fpath))
- call FindTarget(fpath, a:target)
- else
- if (match(fpath, '\\'.a:target) > -1)
- let null = system('"'.g:browser.'" '.fpath)
- break
- endif
- endif
- endwhile
-" call Debug("FindTarget-")
+ let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
+ let null = ''
+ while (strlen(findlist))
+ let fpath = GetFirstPathElement(findlist, "[^,]*")
+ if (match(fpath, "/class-use/") > -1 && s:skip_class_use)
+ break
+ endif
+ let findlist = substitute(findlist, "[^,]*,", "", "")
+ if (isdirectory(fpath))
+ call FindTarget(fpath, a:target)
+ else
+ if (match(fpath, '/'.a:target) > -1)
+ let null = system(g:browser.' '.fpath.' &')
+ let s:found = s:found + 1
+ endif
+ endif
+ endwhile
endfunction
" Return everything up to the first regex in a path
function! GetFirstPathElement(path, regex)
-" call Debug("GetFirstPathElement+")
-" call Debug("a:path = " . a:path)
-" call Debug("a:regex = " . a:regex)
-
- let lpath = matchstr(a:path, a:regex)
- return lpath
+ let lpath = matchstr(a:path, a:regex)
+ return lpath
endfunction
" Remove everything up to the first "," in a path
function! RemoveFirstPathElement(path, regex)
- let lpath = a:path
- let lregex = a:regex
- let lpath = substitute(lpath, lregex, "", "")
- let lpath = substitute(lpath, "^,", "", "")
- return lpath
+ let lpath = a:path
+ let lregex = a:regex
+ let lpath = substitute(lpath, lregex, "", "")
+ let lpath = substitute(lpath, "^,", "", "")
+ return lpath
endfunction
|
vim-scripts/java_apidoc.vim | e32d6203ef89b7d6900b814770b3972c889e66dc | Version 2.0 | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index 7272f4a..362063f 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,128 +1,164 @@
" java_apidoc.vim v1.0 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc for the imported package on the same
" line as the cursor, or for the class name under the cursor.
"
" USE
" Default install is activated with the <F2> key and destroys the j register.
" Tested and working on GVim 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
" 2) (gVIM) Add the following lines to ~/.vimrc
"-----------------------------------------------------------------------------
" au FileType java let browser="xterm --geometry 100x40 -e lynx"
" au FileType java let javadoc_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
"-----------------------------------------------------------------------------
"
" (Note: is CTRL-V CTRL-R,
is CTRL-V CTRL-M)
"
" CONFIGURATION - BROWSER
" If you don't want to use xterm and lynx to show the help, you can change
" 'browser="..."' to whatever browser is on your system.
" e.g. To browse with Opera
"-----------------------------------------------------------------------------
" au FileType java let browser="opera"
"-----------------------------------------------------------------------------
"
" If you try this with Netscape it may complain about a 'lock' file, since it
" tries to start a new instance of the browser for each page found. Of course,
" you can just say 'OK' at the dialogue, but it gets irritating. Mozilla tries
" to start a new instance with a different profile, so neither of these
" browsers are usable with this macro :o(
"
" CONFIGURATION - JAVA API PATH
" Just set the javadoc_path variable to a comma separated list of paths to
" the tops of the Javadoc trees.
"
" CONFIGURATION - KEY AND REGISTER ASSIGNMENT
" The third line uses the <F2> key to start the macro, and the j register to
" act as a temporary visual buffer for getting the word under the cursor into
" the function call. Change these if you use them for something else.
"
" CAVEAT
" It can be pretty slow when looking for a class name and your javadoc_path
" contains a lot of files.
+"amended to work for Win environment"
function! OpenJavadoc(classname)
+ call Debug("classname = " . a:classname)
let line = getline(".")
+ call Debug ("line = " . line)
let regex = '^import\s\+\(\S\+\);$'
+ call Debug ("regex = " . regex)
let l = matchstr(line, regex)
+ call Debug ("l = " . l)
let file = substitute(l, regex, '\1', '')
+ call Debug ("file = " . file)
let null = ''
let file = substitute(file, '\.', '/', 'g')
-
+ call Debug ("file = " . file)
let javapath = g:javadoc_path
let regex = "^[^,]*"
+
+ call Debug ("javapath = " . javapath)
+
+ if strlen(file) > 0
+ while (strlen(javapath))
+ let path = GetFirstPathElement(javapath, regex)
+ call Debug ("path = " . path)
+ let javapath = RemoveFirstPathElement(javapath, regex)
+ call Debug ("javapath = " . javapath)
+ let lfile = path . "/" . file . ".html"
+ call Debug ("lfile = " . lfile)
- while (strlen(javapath))
- let path = GetFirstPathElement(javapath, regex)
-
- let javapath = RemoveFirstPathElement(javapath, regex)
- let lfile = path . "/" . file . ".html"
-
- if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
- let lfile = substitute(lfile, "\*\.html$", "", "")
- if (isdirectory(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
+ if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
+ let lfile = substitute(lfile, "\*\.html$", "", "")
+ call Debug ("lfile = " . lfile)
+ if (isdirectory(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ endif
+ elseif (filereadable(expand(lfile)))
+ call Debug ("lfile = " . lfile)
+ let null = system('"'.g:browser.'" '.lfile)
+ let null = 'found file already'
+ break
endif
- elseif (filereadable(expand(lfile)))
- let null = system(g:browser.' '.lfile.' &')
- break
- endif
- endwhile
+ endwhile
+ else
+ call Debug("file = ''. skipping to look for classname.html")
+ endif
+ call Debug("null = " . null)
+ call Debug("strlen(null) = " . strlen(null) )
if (strlen(null) == 0)
+ call Debug("looking for classname.html")
" Couldn't find the file directly, so do the equivalent of a system find
" on each path element and sub-directory.
" Loop through the given path elements
let javapath = g:javadoc_path
+ call Debug("javapath = " . javapath)
while (strlen(javapath))
let path = GetFirstPathElement(javapath, regex)
+ call Debug("path = " . path)
call FindTarget(path, a:classname.".html")
let javapath = RemoveFirstPathElement(javapath, regex)
+ call Debug("javapath = " . javapath )
endwhile
endif
+" let null = system('"'.g:browser.'" '.'C:/j2sdk1.4.0_01/docs/api/java/lang/System.html' )
+ call Debug ("Done")
return file
endfunction
" Get every file within the path and see if it looks like the target.
" If a directory is found then this function is called recursively.
function! FindTarget(path, target)
+" call Debug("FindTarget+")
+ call Debug("looking for " . a:target . " in " . a:path)
let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
+ call Debug("findlist = " . findlist )
let null = ''
while (strlen(findlist))
let fpath = GetFirstPathElement(findlist, "[^,]*")
+" call Debug("fpath = " . fpath )
let findlist = substitute(findlist, "[^,]*,", "", "")
+" call Debug("findlist = " . findlist )
if (isdirectory(fpath))
- call FindTarget(fpath, a:target)
+ call FindTarget(fpath, a:target)
else
- if (match(fpath, '/'.a:target) > -1)
- let null = system(g:browser.' '.fpath.' &')
+ if (match(fpath, '\\'.a:target) > -1)
+ let null = system('"'.g:browser.'" '.fpath)
+ break
endif
endif
endwhile
+" call Debug("FindTarget-")
endfunction
" Return everything up to the first regex in a path
function! GetFirstPathElement(path, regex)
+" call Debug("GetFirstPathElement+")
+" call Debug("a:path = " . a:path)
+" call Debug("a:regex = " . a:regex)
+
let lpath = matchstr(a:path, a:regex)
return lpath
endfunction
" Remove everything up to the first "," in a path
function! RemoveFirstPathElement(path, regex)
let lpath = a:path
let lregex = a:regex
let lpath = substitute(lpath, lregex, "", "")
let lpath = substitute(lpath, "^,", "", "")
return lpath
endfunction
|
vim-scripts/java_apidoc.vim | fe46c0d5fa361be1e30de48788377ad5e6270935 | Version 1.0.1 | diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
index b03e8dd..7272f4a 100644
--- a/ftplugin/java_apidoc.vim
+++ b/ftplugin/java_apidoc.vim
@@ -1,128 +1,128 @@
" java_apidoc.vim v1.0 by Paul Etchells <[email protected]>
" based on work by Darren Greaves <[email protected]> - Thanx for
" giving me the idea and a good chunk of the code!
"
" DESCRIPTION
" Opens a browser showing the Javadoc for the imported package on the same
" line as the cursor, or for the class name under the cursor.
"
" USE
" Default install is activated with the <F2> key and destroys the j register.
" Tested and working on GVim 6.1 on Linux.
"
" INSTALL
" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
" 2) (gVIM) Add the following lines to ~/.vimrc
"-----------------------------------------------------------------------------
" au FileType java let browser="xterm --geometry 100x40 -e lynx"
-" au FileType java let java_api_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
+" au FileType java let javadoc_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
"-----------------------------------------------------------------------------
"
" (Note: is CTRL-V CTRL-R,
is CTRL-V CTRL-M)
"
" CONFIGURATION - BROWSER
" If you don't want to use xterm and lynx to show the help, you can change
" 'browser="..."' to whatever browser is on your system.
" e.g. To browse with Opera
"-----------------------------------------------------------------------------
" au FileType java let browser="opera"
"-----------------------------------------------------------------------------
"
" If you try this with Netscape it may complain about a 'lock' file, since it
" tries to start a new instance of the browser for each page found. Of course,
" you can just say 'OK' at the dialogue, but it gets irritating. Mozilla tries
" to start a new instance with a different profile, so neither of these
" browsers are usable with this macro :o(
"
" CONFIGURATION - JAVA API PATH
-" Just set the java_api_path variable to a comma separated list of paths to
+" Just set the javadoc_path variable to a comma separated list of paths to
" the tops of the Javadoc trees.
"
" CONFIGURATION - KEY AND REGISTER ASSIGNMENT
" The third line uses the <F2> key to start the macro, and the j register to
" act as a temporary visual buffer for getting the word under the cursor into
" the function call. Change these if you use them for something else.
"
" CAVEAT
-" It can be pretty slow when looking for a class name and your java_api_path
+" It can be pretty slow when looking for a class name and your javadoc_path
" contains a lot of files.
function! OpenJavadoc(classname)
let line = getline(".")
let regex = '^import\s\+\(\S\+\);$'
let l = matchstr(line, regex)
let file = substitute(l, regex, '\1', '')
let null = ''
let file = substitute(file, '\.', '/', 'g')
let javapath = g:javadoc_path
let regex = "^[^,]*"
while (strlen(javapath))
let path = GetFirstPathElement(javapath, regex)
let javapath = RemoveFirstPathElement(javapath, regex)
let lfile = path . "/" . file . ".html"
if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
let lfile = substitute(lfile, "\*\.html$", "", "")
if (isdirectory(expand(lfile)))
let null = system(g:browser.' '.lfile.' &')
endif
elseif (filereadable(expand(lfile)))
let null = system(g:browser.' '.lfile.' &')
break
endif
endwhile
if (strlen(null) == 0)
" Couldn't find the file directly, so do the equivalent of a system find
" on each path element and sub-directory.
" Loop through the given path elements
let javapath = g:javadoc_path
while (strlen(javapath))
let path = GetFirstPathElement(javapath, regex)
call FindTarget(path, a:classname.".html")
let javapath = RemoveFirstPathElement(javapath, regex)
endwhile
endif
return file
endfunction
" Get every file within the path and see if it looks like the target.
" If a directory is found then this function is called recursively.
function! FindTarget(path, target)
let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
let null = ''
while (strlen(findlist))
let fpath = GetFirstPathElement(findlist, "[^,]*")
let findlist = substitute(findlist, "[^,]*,", "", "")
if (isdirectory(fpath))
call FindTarget(fpath, a:target)
else
if (match(fpath, '/'.a:target) > -1)
let null = system(g:browser.' '.fpath.' &')
endif
endif
endwhile
endfunction
" Return everything up to the first regex in a path
function! GetFirstPathElement(path, regex)
let lpath = matchstr(a:path, a:regex)
return lpath
endfunction
" Remove everything up to the first "," in a path
function! RemoveFirstPathElement(path, regex)
let lpath = a:path
let lregex = a:regex
let lpath = substitute(lpath, lregex, "", "")
let lpath = substitute(lpath, "^,", "", "")
return lpath
endfunction
|
vim-scripts/java_apidoc.vim | 3f7ecb79827c21d2f1f71d585a2f296e66ae27ec | Version 1.0: Initial upload | diff --git a/README b/README
new file mode 100644
index 0000000..8961f4d
--- /dev/null
+++ b/README
@@ -0,0 +1,3 @@
+This is a mirror of http://www.vim.org/scripts/script.php?script_id=358
+
+When viewing/editing a Java source file, put the cursor on an 'import...' line, or on a class name such as 'String', and then press F2. A browser opens showing the API (generated JavaDoc HTML pages) for the imported package or class. The mechanism can be extended to include your own generated JavaDocs.
diff --git a/ftplugin/java_apidoc.vim b/ftplugin/java_apidoc.vim
new file mode 100644
index 0000000..b03e8dd
--- /dev/null
+++ b/ftplugin/java_apidoc.vim
@@ -0,0 +1,128 @@
+" java_apidoc.vim v1.0 by Paul Etchells <[email protected]>
+" based on work by Darren Greaves <[email protected]> - Thanx for
+" giving me the idea and a good chunk of the code!
+"
+" DESCRIPTION
+" Opens a browser showing the Javadoc for the imported package on the same
+" line as the cursor, or for the class name under the cursor.
+"
+" USE
+" Default install is activated with the <F2> key and destroys the j register.
+" Tested and working on GVim 6.1 on Linux.
+"
+" INSTALL
+" 1) Put this file in ~/.vim/ftplugin (or wherever Vim looks for plugins).
+" 2) (gVIM) Add the following lines to ~/.vimrc
+"-----------------------------------------------------------------------------
+" au FileType java let browser="xterm --geometry 100x40 -e lynx"
+" au FileType java let java_api_path="/home/etch/Dox/java-1.4-api,/home/etch/Dox/ejb"
+" au FileType java nmap <F2> viw"jy:call OpenJavadoc("j")
+"-----------------------------------------------------------------------------
+"
+" (Note: is CTRL-V CTRL-R,
is CTRL-V CTRL-M)
+"
+" CONFIGURATION - BROWSER
+" If you don't want to use xterm and lynx to show the help, you can change
+" 'browser="..."' to whatever browser is on your system.
+" e.g. To browse with Opera
+"-----------------------------------------------------------------------------
+" au FileType java let browser="opera"
+"-----------------------------------------------------------------------------
+"
+" If you try this with Netscape it may complain about a 'lock' file, since it
+" tries to start a new instance of the browser for each page found. Of course,
+" you can just say 'OK' at the dialogue, but it gets irritating. Mozilla tries
+" to start a new instance with a different profile, so neither of these
+" browsers are usable with this macro :o(
+"
+" CONFIGURATION - JAVA API PATH
+" Just set the java_api_path variable to a comma separated list of paths to
+" the tops of the Javadoc trees.
+"
+" CONFIGURATION - KEY AND REGISTER ASSIGNMENT
+" The third line uses the <F2> key to start the macro, and the j register to
+" act as a temporary visual buffer for getting the word under the cursor into
+" the function call. Change these if you use them for something else.
+"
+" CAVEAT
+" It can be pretty slow when looking for a class name and your java_api_path
+" contains a lot of files.
+
+function! OpenJavadoc(classname)
+ let line = getline(".")
+ let regex = '^import\s\+\(\S\+\);$'
+ let l = matchstr(line, regex)
+ let file = substitute(l, regex, '\1', '')
+ let null = ''
+
+ let file = substitute(file, '\.', '/', 'g')
+
+ let javapath = g:javadoc_path
+ let regex = "^[^,]*"
+
+ while (strlen(javapath))
+ let path = GetFirstPathElement(javapath, regex)
+
+ let javapath = RemoveFirstPathElement(javapath, regex)
+ let lfile = path . "/" . file . ".html"
+
+ if ((match(lfile, "\*\.html$") != -1) && has("gui_running"))
+ let lfile = substitute(lfile, "\*\.html$", "", "")
+ if (isdirectory(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ endif
+ elseif (filereadable(expand(lfile)))
+ let null = system(g:browser.' '.lfile.' &')
+ break
+ endif
+ endwhile
+
+ if (strlen(null) == 0)
+ " Couldn't find the file directly, so do the equivalent of a system find
+ " on each path element and sub-directory.
+
+ " Loop through the given path elements
+ let javapath = g:javadoc_path
+ while (strlen(javapath))
+ let path = GetFirstPathElement(javapath, regex)
+ call FindTarget(path, a:classname.".html")
+ let javapath = RemoveFirstPathElement(javapath, regex)
+ endwhile
+ endif
+
+ return file
+endfunction
+
+
+" Get every file within the path and see if it looks like the target.
+" If a directory is found then this function is called recursively.
+function! FindTarget(path, target)
+ let findlist = substitute(glob(a:path."/*").",", "\n", ",", "g")
+ let null = ''
+ while (strlen(findlist))
+ let fpath = GetFirstPathElement(findlist, "[^,]*")
+ let findlist = substitute(findlist, "[^,]*,", "", "")
+ if (isdirectory(fpath))
+ call FindTarget(fpath, a:target)
+ else
+ if (match(fpath, '/'.a:target) > -1)
+ let null = system(g:browser.' '.fpath.' &')
+ endif
+ endif
+ endwhile
+endfunction
+
+" Return everything up to the first regex in a path
+function! GetFirstPathElement(path, regex)
+ let lpath = matchstr(a:path, a:regex)
+ return lpath
+endfunction
+
+" Remove everything up to the first "," in a path
+function! RemoveFirstPathElement(path, regex)
+ let lpath = a:path
+ let lregex = a:regex
+ let lpath = substitute(lpath, lregex, "", "")
+ let lpath = substitute(lpath, "^,", "", "")
+ return lpath
+endfunction
|
m00natic/ir-lm | 0419c9febcafe982fbd600e740e0f4c28a03cb8b | GPL-ed. Some global variables become customizable. | diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..5cdc649
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,360 @@
+GNU GENERAL PUBLIC LICENSE
+
+Version 3, 29 June 2007
+
+Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+
+Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
+
+Preamble
+
+The GNU General Public License is a free, copyleft license for software and other kinds of works.
+
+The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your
+freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our
+software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
+
+To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software,
+or if you modify it: responsibilities to respect the freedom of others.
+
+For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or
+can get the source code. And you must show them these terms so they know their rights.
+
+Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
+
+For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked
+as changed, so that their problems will not be attributed erroneously to authors of previous versions.
+
+Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future
+versions of the GPL, as needed to protect the freedom of users.
+
+Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish
+to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+TERMS AND CONDITIONS
+
+0. Definitions.
+
+âThis Licenseâ refers to version 3 of the GNU General Public License.
+
+âCopyrightâ also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
+
+âThe Programâ refers to any copyrightable work licensed under this License. Each licensee is addressed as âyouâ. âLicenseesâ and ârecipientsâ may be individuals or organizations.
+
+To âmodifyâ a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a âmodified versionâ
+of the earlier work or a work âbased onâ the earlier work.
+
+A âcovered workâ means either the unmodified Program or a work based on the Program.
+
+To âpropagateâ a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer
+or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
+
+To âconveyâ a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
+
+An interactive user interface displays âAppropriate Legal Noticesâ to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
+
+1. Source Code.
+
+The âsource codeâ for a work means the preferred form of the work for making modifications to it. âObject codeâ means any non-source form of a work.
+
+A âStandard Interfaceâ means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+The âSystem Librariesâ of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A âMajor
+Componentâ, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce
+the work, or an object code interpreter used to run it.
+
+The âCorresponding Sourceâ for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts
+to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities
+but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
+
+The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
+
+The Corresponding Source for a work in source code form is that same work.
+
+2. Basic Permissions.
+
+All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of
+having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you
+do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies
+of your copyrighted material outside their relationship with you.
+
+Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
+
+3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such measures.
+
+When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect
+to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid
+circumvention of technological measures.
+
+4. Conveying Verbatim Copies.
+
+You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep
+intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a
+copy of this License along with the Program.
+
+You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
+
+5. Conveying Modified Source Versions.
+
+You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these
+conditions:
+
+ * a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
+ * b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to âkeep intact
+ all noticesâ.
+ * c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional
+ terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such
+ permission if you have separately received it.
+ * d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work
+ need not make them do so.
+
+A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an âaggregateâ if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
+
+6. Conveying Non-Source Forms.
+
+You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of
+these ways:
+
+ * a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used
+ for software interchange.
+ * b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you
+ offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is
+ covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2)
+ access to copy the Corresponding Source from a network server at no charge.
+ * c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you
+ received the object code with such an offer, in accord with subsection 6b.
+ * d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on
+ a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding
+ Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
+ * e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
+
+A âUser Productâ is either (1) a âconsumer productâ, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for
+incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user,
+ânormally usedâ refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is
+expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant
+mode of use of the product.
+
+âInstallation Informationâ for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User
+Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with
+solely because modification has been made.
+
+If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of
+the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be
+accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the
+work has been installed in ROM).
+
+The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the
+recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network
+or violates the rules and protocols for communication across the network.
+
+Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in
+source code form), and must require no special password or key for unpacking, reading or copying.
+
+7. Additional Terms.
+
+âAdditional permissionsâ are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used
+separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
+
+When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
+
+Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with
+terms:
+
+ * a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
+ * b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
+ * c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
+ * d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
+ * e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
+ * f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on those licensors and authors.
+
+All other non-permissive additional terms are considered âfurther restrictionsâ within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
+
+If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to
+find the applicable terms.
+
+Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
+
+8. Termination.
+
+You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third paragraph of section 11).
+
+However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally
+terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
+
+Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received
+notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
+
+Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not
+permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
+
+9. Acceptance Not Required for Having Copies.
+
+You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to
+receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not
+accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
+
+10. Automatic Licensing of Downstream Recipients.
+
+Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for
+enforcing compliance by third parties with this License.
+
+An âentity transactionâ is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give
+under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
+
+You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering
+for sale, or importing the Program or any portion of it.
+
+11. Patents.
+
+A âcontributorâ is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's âcontributor versionâ.
+
+A contributor's âessential patent claimsâ are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by
+this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes
+of this definition, âcontrolâ includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
+
+Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+In the following three paragraphs, a âpatent licenseâ is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant
+not to sue for patent infringement). To âgrantâ such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
+
+If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License,
+through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit
+of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. âKnowingly relyingâ
+means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more
+identifiable patents in that country that you have reason to believe are valid.
+
+If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving
+the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work
+and works based on it.
+
+A patent license is âdiscriminatoryâ if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make
+payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a
+discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or
+compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
+
+Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
+
+12. No Surrender of Others' Freedom.
+
+If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot
+convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree
+to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain
+entirely from conveying the Program.
+
+13. Use with the GNU Affero General Public License.
+
+Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public
+License, section 13, concerning interaction through a network will apply to the combination as such.
+
+14. Revised Versions of this License.
+
+The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ
+in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License âor any later versionâ applies to it, you have the option
+of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General
+Public License, you may choose any version ever published by the Free Software Foundation.
+
+If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to
+choose that version for the Program.
+
+Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later
+version.
+
+15. Disclaimer of Warranty.
+
+THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM âAS ISâ WITHOUT
+WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
+PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+16. Limitation of Liability.
+
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR
+LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+17. Interpretation of Sections 15 and 16.
+
+If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
+
+END OF TERMS AND CONDITIONS
+
+How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under
+these terms.
+
+To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least
+the âcopyrightâ line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an
+âabout boxâ.
+
+You should also get your employer (if you work as a programmer) or school, if any, to sign a âcopyright disclaimerâ for the program, if necessary. For more information on this, and how to apply and follow
+the GNU GPL, see <http://www.gnu.org/licenses/>.
+
+The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary
+applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/ir-lm.el b/ir-lm.el
index 2161f0c..af28aa7 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,978 +1,1016 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 5.VIII.2009 - Version 1.8
;; Abstracting away file-postings
;; structure
;; 31.VII.2009 - Version 1.7
;; Generating word processing function
;; on the fly, thus optimizing
;; depending on whether stop words or
;; stemmer are loaded
;; 18.VII.2009 - Version 1.6
;; highlighting of search words
;; minor bugfixes;
;; 15.VII.2009 - Version 1.5
;; bulgarian stemmer added
;; stop-word and stemmer files are now stored in separate directories
;; which are recursively processed
;; added stemming parameter
;; many corrections in merging
;; 14.VII.2009 - Version 1.4
;; correctly merge postings and info
;; on load or index (no duplicates,
;; no loading of older than index files)
;; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
;; remembering encoding for individual files
;; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
;; new command `ir-lm' giving a unified
;; interface of files and commands
;; command to change lambda
;; full cleaning of data
;; minor bugfixes
;; 10.VII.2009 - Version 1.1
;; added minumim possible score for query
;; so that irrelevant results are discarded
;; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
- (if (or (eq system-type 'windows-nt) (eq system-type 'ms-dos))
- "C:/ir/"
- "~/.ir/")
+ (eval-when-compile
+ (if (or (eq system-type 'windows-nt) (eq system-type 'ms-dos))
+ "C:/ir/"
+ "~/.ir/"))
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
-(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
-(defvar *ir-max-results* 30 "Maximum number of search results.")
-(defvar *ir-stem-level* 1 "Stemming level.")
-(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Customization
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;;;###autoload
+(defgroup ir-lm nil
+ "Basic mixed language model for information retrieval mode."
+ :group 'applications)
+
+;;;###autoload
+(defcustom ir-lm-lambda 0.5
+ "Lambda parameter in the mixed language model."
+ :group 'ir-lm
+ :type 'number)
+
+;;;###autoload
+(defcustom ir-lm-max-results 30
+ "Maximum number of search results."
+ :group 'ir-lm
+ :type 'integer)
+
+;;;###autoload
+(defcustom ir-lm-stem-level 1
+ "Stemming level."
+ :group 'ir-lm
+ :type 'integer)
+
+;;;###autoload
+(defcustom ir-lm-min-words 20
+ "Minimal number of words in paragraph."
+ :group 'ir-lm
+ :type 'integer)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; *ir-hashes* selectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro make-getter (getter-name)
"Create a macro for writing getters with name MAKE- GETTER-NAME -GETTER and argument GETTER-NAME."
(let ((getter-name-str (symbol-name getter-name)))
`(defmacro ,(intern (concat "make-" getter-name-str "-getter"))
(name &rest body)
,(concat "Create a selector for `*ir-hashes*' named "
getter-name-str "- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
named `" getter-name-str "'.
Do not use symbol `bla-arg' in the body.")
(let ((bla-arg ',getter-name))
`(defun ,(intern (concat ,getter-name-str "-"
(symbol-name name)))
(,bla-arg)
,@body)))))
;; getters for file structures
(make-getter ir-file)
(make-ir-file-getter name (car ir-file))
(make-ir-file-getter encoding (cadr ir-file))
(make-ir-file-getter time (car (cddr ir-file)))
(make-ir-file-getter paragraphs (cdr (cddr ir-file)))
;; getters for paragraph structures
(make-getter ir-paragraph)
(make-ir-paragraph-getter point (car ir-paragraph))
(make-ir-paragraph-getter total-words (cadr ir-paragraph))
(make-ir-paragraph-getter distinct-words (car (cddr ir-paragraph)))
(make-ir-paragraph-getter hash (cadr (cddr ir-paragraph)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(ir-paragraph-total-words sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
(ir-file-encoding file))
(format " [%d]" (ir-file-words (ir-file-paragraphs
file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
- "maximum results = " (format "%d\n" *ir-max-results*)
+ "maximum results = " (format "%d\n" ir-lm-max-results)
"minimum number of words in paragraph = "
- (format "%d\n" *ir-lm-min-words*)
- "lambda = " (format "%f\n" *ir-lm-lambda*)
- "stemming level = " (format "%d\n" *ir-stem-level*)
+ (format "%d\n" ir-lm-min-words)
+ "lambda = " (format "%f\n" ir-lm-lambda)
+ "stemming level = " (format "%d\n" ir-lm-stem-level)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
-(defun ir-lm-change-lambda (new)
+(defun ir-lm-change-lambda (symbol new)
"Set NEW value of the `lambda' parameter."
(interactive
- (list (read-number "New value for lambda (0 < lambda < 1) = ")))
+ (list 'ir-lm-lambda
+ (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
- (setq *ir-lm-lambda* new)
+ (set-default symbol new)
(ir-refresh-view)))
-(defun ir-change-stem-level (new)
+(defun ir-lm-change-stem-level (symbol new)
"Set NEW value of the stemming parameter."
(interactive
- (list (read-number "New level for stemming (> 0) = ")))
+ (list 'ir-lm-stem-level
+ (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
- (setq *ir-stem-level* new)
+ (set-default symbol new)
(ir-refresh-view)
(ir-load-auxiliary t)))
-(defun ir-lm-change-max-results (new)
+(defun ir-lm-change-max-results (symbol new)
"Set NEW value for maximum number of search results."
(interactive
- (list (read-number "Maximum number of search results = ")))
- (setq *ir-max-results* new)
+ (list 'ir-lm-max-results
+ (read-number "Maximum number of search results = ")))
+ (set-default symbol new)
(ir-refresh-view))
-(defun ir-lm-change-min-words (new)
+(defun ir-lm-change-min-words (symbol new)
"Set NEW minimum number of words for paragraph."
(interactive
- (list (read-number "Minumun number of words in paragraph = ")))
- (setq *ir-lm-min-words* new)
+ (list 'ir-lm-min-words
+ (read-number "Minumun number of words in paragraph = ")))
+ (set-default symbol new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-global-hash* nil
*ir-total-count* 0
*ir-words-count* 0)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word \(including hyphens and carrige return\) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
(setq word (concat word (if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
(or (and match (= 0 match))
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, must be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types "*")
nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let ((end-val (+ (or value 1) (gethash key h-table 0))))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to `*ir-global-hash*' and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p
parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to `*ir-global-hash*', return nil.
If PARENT-HASH-P, create new hash and add both to it
and `*ir-global-hash*', adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
(throw 'out (concat prefix (substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD. This is default implementation.
After loading stop-words and stemmers will be fset."
word)
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
"" ;stop words are marked as ""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check (puthash word (ir-process-new-word word)
*ir-word-cache*)))
(unless (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word (puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer-bg (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
-;; (>= w3 *ir-stem-level*)
+;; (>= w3 ir-lm-stem-level)
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer-bg (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
- (>= w3 *ir-stem-level*)
+ (>= w3 ir-lm-stem-level)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
(or force (null *ir-stop*)))
(setq *ir-stop* (make-hash-table :test 'equal :size 1000))
(maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir-bg (concat *ir-dir* "stem-rules/bg/")))
(when (and (file-exists-p stem-dir-bg)
(or force (null *ir-stem*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer-bg stem-dir-bg)))
(fset 'ir-process-new-word (ir-get-word-processor *ir-stop*
*ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro assess-paragraph ()
"Remove a bit of boiler-plate from `ir-lm-extract-words'."
- '(if (>= paragraph-total-count *ir-lm-min-words*)
+ '(if (>= paragraph-total-count ir-lm-min-words)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
(or (inc-hash-value wrd *ir-global-hash* (- cnt))
(setq *ir-words-count* (1- *ir-words-count*))))
paragraph)))
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(let ((prev (point-min))
(acc (list (current-time) encoding full-file-name)))
(let ((paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count
(1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count
(1+ paragraph-words-count)))
(when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph))
(when acc (push (nreverse acc) *ir-hashes*))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count*
(ir-paragraph-total-words post)))
(if save-globals-p
(maphash (lambda (key val)
(inc-hash-value key *ir-global-hash* (- val)))
(ir-paragraph-hash post))
(maphash (lambda (key val)
(or (inc-hash-value key *ir-global-hash* (- val))
(setq *ir-words-count* (1- *ir-words-count*))))
(ir-paragraph-hash post))))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (ir-file-paragraphs
(find-fn (lambda (post)
(equal file (ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (ir-file-name lst) (ir-file-encoding lst)
(ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (ir-paragraph-point sublst)
(ir-paragraph-total-words sublst)
(ir-paragraph-distinct-words sublst))
(hash-to-assoc (ir-paragraph-hash
sublst))))
(ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil ".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
+;;;###autoload
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file)
(ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (ir-file-name post) (ir-file-encoding post)
(ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (ir-paragraph-total-words
subpost))
(index-words (ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
(list (ir-paragraph-point subpost)
total-words index-words
(ir-assoc-to-hash (cdr (cddr subpost))
index-words nil
inc-globals-p))))
(ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (ir-file-name post))
(existing-file-time
(ir-file-time (find-fn (lambda (post)
(equal file-path
(ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((dont-inc-globals-p (null *ir-global-hash*)))
(when dont-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not dont-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
+;;;###autoload
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil ".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(or (and *ir-global-hash* append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda (setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(when (>= cnt place) (aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
- (min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
+ (min-score (ir-lm-posting-min-score query ir-lm-lambda)))
(dolist (file *ir-hashes*)
(let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
(dolist (post (ir-file-paragraphs file))
(let ((score
(ir-lm-posting-score (ir-paragraph-hash post)
(ir-paragraph-total-words
post)
- query *ir-lm-lambda*)))
+ query ir-lm-lambda)))
(when (> score min-score)
(setq best (ir-lm-insert-post
(vector score file-path
(ir-paragraph-point post)
(ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer
(car (nreverse (split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(catch 'end-results
(mapc (lambda (post)
(let ((score (aref post 0))
(file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse
(split-string file "/")))
'ir-lm-jump-to-result file marker t
encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding
query))))))
best)))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
- (or cnt (setq cnt *ir-max-results*))
+ (or cnt (setq cnt ir-lm-max-results))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
- (local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
+ (local-set-key "q" (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(let ((query (delete nil
(mapcar (lambda (word)
(ir-process-word
(downcase word)))
(split-string query-str)))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt)
query))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors (kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
- (local-set-key (kbd "i") 'ir-lm-index)
- (local-set-key (kbd "l") 'ir-lm-load-index)
- (local-set-key (kbd "w") 'ir-lm-write-index)
- (local-set-key (kbd "f") 'ir-lm-search)
- (local-set-key (kbd "c") 'ir-clear)
- (local-set-key (kbd "m") 'ir-lm-change-max-results)
- (local-set-key (kbd "p") 'ir-lm-change-min-words)
- (local-set-key (kbd "b") 'ir-lm-change-lambda)
- (local-set-key (kbd "s") 'ir-change-stem-level)
- (local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
- (local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
-
+ (local-set-key "i" 'ir-lm-index)
+ (local-set-key "l" 'ir-lm-load-index)
+ (local-set-key "w" 'ir-lm-write-index)
+ (local-set-key "f" 'ir-lm-search)
+ (local-set-key "c" 'ir-clear)
+ (local-set-key "m" 'ir-lm-change-max-results)
+ (local-set-key "p" 'ir-lm-change-min-words)
+ (local-set-key "b" 'ir-lm-change-lambda)
+ (local-set-key "s" 'ir-lm-change-stem-level)
+ (local-set-key "q" (lambda () (interactive) (kill-buffer)))
+ (local-set-key "r" (lambda () (interactive) (ir-refresh-view))))
+
+;;;###autoload
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
- 'ir-change-stem-level)
+ 'ir-lm-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
- "maximum results = " (format "%d\n" *ir-max-results*)
+ "maximum results = " (format "%d\n" ir-lm-max-results)
"minimum number of words in paragraph = "
- (format "%d\n" *ir-lm-min-words*)
- "lambda = " (format "%f\n" *ir-lm-lambda*)
- "stemming level = " (format "%d\n" *ir-stem-level*)
+ (format "%d\n" ir-lm-min-words)
+ "lambda = " (format "%f\n" ir-lm-lambda)
+ "stemming level = " (format "%d\n" ir-lm-stem-level)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
|
m00natic/ir-lm | 51e68b1e21f643c57eb9c5b0124ece169c0256d7 | removed dependancy on cl | diff --git a/ir-lm.el b/ir-lm.el
index 9543101..2161f0c 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,981 +1,978 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 5.VIII.2009 - Version 1.8
- ; Abstracting away file-postings
- ; structure
+;; Abstracting away file-postings
+;; structure
;; 31.VII.2009 - Version 1.7
- ; Generating word processing function
- ; on the fly, thus optimizing
- ; depending on whether stop words or
- ; stemmer are loaded
+;; Generating word processing function
+;; on the fly, thus optimizing
+;; depending on whether stop words or
+;; stemmer are loaded
;; 18.VII.2009 - Version 1.6
- ; highlighting of search words
- ; minor bugfixes
+;; highlighting of search words
+;; minor bugfixes;
;; 15.VII.2009 - Version 1.5
- ; bulgarian stemmer added
- ; stop-word and stemmer files
- ; are now stored in separate directories
- ; which are recursively processed
- ; added stemming parameter
- ; many corrections in merging
+;; bulgarian stemmer added
+;; stop-word and stemmer files are now stored in separate directories
+;; which are recursively processed
+;; added stemming parameter
+;; many corrections in merging
;; 14.VII.2009 - Version 1.4
- ; correctly merge postings and info
- ; on load or index (no duplicates,
- ; no loading of older than index files)
- ; added globs for filtering file types
+;; correctly merge postings and info
+;; on load or index (no duplicates,
+;; no loading of older than index files)
+;; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
- ; remembering encoding for individual files
- ; prune non-existing files on load
+;; remembering encoding for individual files
+;; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
- ; new command `ir-lm' giving a unified
- ; interface of files and commands
- ; command to change lambda
- ; full cleaning of data
- ; minor bugfixes
+;; new command `ir-lm' giving a unified
+;; interface of files and commands
+;; command to change lambda
+;; full cleaning of data
+;; minor bugfixes
;; 10.VII.2009 - Version 1.1
- ; added minumim possible score for query
- ; so that irrelevant results are discarded
- ; a bit of code refactoring and cleaning
+;; added minumim possible score for query
+;; so that irrelevant results are discarded
+;; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
- (if (or (eq system-type 'windows-nt)
- (eq system-type 'ms-dos))
+ (if (or (eq system-type 'windows-nt) (eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; *ir-hashes* selectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro make-getter (getter-name)
"Create a macro for writing getters with name MAKE- GETTER-NAME -GETTER and argument GETTER-NAME."
(let ((getter-name-str (symbol-name getter-name)))
`(defmacro ,(intern (concat "make-" getter-name-str "-getter"))
(name &rest body)
,(concat "Create a selector for `*ir-hashes*' named "
getter-name-str "- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
named `" getter-name-str "'.
Do not use symbol `bla-arg' in the body.")
(let ((bla-arg ',getter-name))
`(defun ,(intern (concat ,getter-name-str "-"
(symbol-name name)))
(,bla-arg)
,@body)))))
;; getters for file structures
(make-getter ir-file)
(make-ir-file-getter name (car ir-file))
(make-ir-file-getter encoding (cadr ir-file))
-(make-ir-file-getter time (caddr ir-file))
-(make-ir-file-getter paragraphs (cdddr ir-file))
+(make-ir-file-getter time (car (cddr ir-file)))
+(make-ir-file-getter paragraphs (cdr (cddr ir-file)))
;; getters for paragraph structures
(make-getter ir-paragraph)
(make-ir-paragraph-getter point (car ir-paragraph))
(make-ir-paragraph-getter total-words (cadr ir-paragraph))
-(make-ir-paragraph-getter distinct-words (caddr ir-paragraph))
-(make-ir-paragraph-getter hash (cadddr ir-paragraph))
+(make-ir-paragraph-getter distinct-words (car (cddr ir-paragraph)))
+(make-ir-paragraph-getter hash (cadr (cddr ir-paragraph)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(ir-paragraph-total-words sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
(ir-file-encoding file))
(format " [%d]" (ir-file-words (ir-file-paragraphs
file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
(setq *ir-lm-lambda* new)
(ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-global-hash* nil
*ir-total-count* 0
*ir-words-count* 0)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word \(including hyphens and carrige return\) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
(setq word (concat word (if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
(or (and match (= 0 match))
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, must be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types "*")
nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let ((end-val (+ (or value 1) (gethash key h-table 0))))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to `*ir-global-hash*' and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p
parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to `*ir-global-hash*', return nil.
If PARENT-HASH-P, create new hash and add both to it
and `*ir-global-hash*', adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
(throw 'out (concat prefix (substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD. This is default implementation.
After loading stop-words and stemmers will be fset."
word)
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
"" ;stop words are marked as ""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check (puthash word (ir-process-new-word word)
*ir-word-cache*)))
(unless (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word (puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer-bg (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
;; (>= w3 *ir-stem-level*)
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer-bg (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
(>= w3 *ir-stem-level*)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
(or force (null *ir-stop*)))
(setq *ir-stop* (make-hash-table :test 'equal :size 1000))
(maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir-bg (concat *ir-dir* "stem-rules/bg/")))
(when (and (file-exists-p stem-dir-bg)
(or force (null *ir-stem*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer-bg stem-dir-bg)))
(fset 'ir-process-new-word (ir-get-word-processor *ir-stop*
*ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+(defmacro assess-paragraph ()
+ "Remove a bit of boiler-plate from `ir-lm-extract-words'."
+ '(if (>= paragraph-total-count *ir-lm-min-words*)
+ (push (list paragraph-start paragraph-total-count
+ paragraph-words-count paragraph)
+ acc)
+ (setq *ir-total-count* ;if paragraph is too short, discard
+ (- *ir-total-count* paragraph-total-count))
+ (maphash (lambda (wrd cnt) ;and remove word counts
+ (or (inc-hash-value wrd *ir-global-hash* (- cnt))
+ (setq *ir-words-count* (1- *ir-words-count*))))
+ paragraph)))
+
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
- (macrolet ((assess-paragraph
- ()
- `(if (>= paragraph-total-count *ir-lm-min-words*)
- (push (list paragraph-start paragraph-total-count
- paragraph-words-count paragraph)
- acc)
- (setq *ir-total-count* ;if paragraph is too short, discard
- (- *ir-total-count* paragraph-total-count))
- (maphash (lambda (wrd cnt) ;and remove word counts
- (or (inc-hash-value wrd *ir-global-hash*
- (- cnt))
- (setq *ir-words-count*
- (1- *ir-words-count*))))
- paragraph))))
- (let* ((prev (point-min))
- (paragraph-start prev)
- (paragraph-total-count 0)
- (paragraph-words-count 0)
- (paragraph (make-hash-table :test 'equal))
- (acc (list (current-time) encoding full-file-name)))
+ (let ((prev (point-min))
+ (acc (list (current-time) encoding full-file-name)))
+ (let ((paragraph-start prev)
+ (paragraph-total-count 0)
+ (paragraph-words-count 0)
+ (paragraph (make-hash-table :test 'equal)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count
(1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count
(1+ paragraph-words-count)))
(when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
- (assess-paragraph)
- (when acc (push (nreverse acc) *ir-hashes*)))))
+ (assess-paragraph))
+ (when acc (push (nreverse acc) *ir-hashes*))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count*
(ir-paragraph-total-words post)))
(if save-globals-p
(maphash (lambda (key val)
(inc-hash-value key *ir-global-hash* (- val)))
(ir-paragraph-hash post))
(maphash (lambda (key val)
(or (inc-hash-value key *ir-global-hash* (- val))
(setq *ir-words-count* (1- *ir-words-count*))))
(ir-paragraph-hash post))))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (ir-file-paragraphs
(find-fn (lambda (post)
(equal file (ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (ir-file-name lst) (ir-file-encoding lst)
(ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (ir-paragraph-point sublst)
(ir-paragraph-total-words sublst)
(ir-paragraph-distinct-words sublst))
(hash-to-assoc (ir-paragraph-hash
sublst))))
(ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil ".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file)
(ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (ir-file-name post) (ir-file-encoding post)
(ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (ir-paragraph-total-words
subpost))
(index-words (ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
(list (ir-paragraph-point subpost)
total-words index-words
- (ir-assoc-to-hash (cdddr subpost)
+ (ir-assoc-to-hash (cdr (cddr subpost))
index-words nil
inc-globals-p))))
(ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (ir-file-name post))
(existing-file-time
(ir-file-time (find-fn (lambda (post)
(equal file-path
(ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
- ;discard posting and remove existing from *ir-hashes*
+ ;;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((dont-inc-globals-p (null *ir-global-hash*)))
(when dont-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not dont-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil ".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(or (and *ir-global-hash* append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda (setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(when (>= cnt place) (aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
(dolist (post (ir-file-paragraphs file))
(let ((score
(ir-lm-posting-score (ir-paragraph-hash post)
(ir-paragraph-total-words
post)
query *ir-lm-lambda*)))
(when (> score min-score)
(setq best (ir-lm-insert-post
(vector score file-path
(ir-paragraph-point post)
(ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer
(car (nreverse (split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(catch 'end-results
(mapc (lambda (post)
(let ((score (aref post 0))
(file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse
(split-string file "/")))
'ir-lm-jump-to-result file marker t
encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding
query))))))
best)))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(let ((query (delete nil
(mapcar (lambda (word)
(ir-process-word
(downcase word)))
(split-string query-str)))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt)
query))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors (kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
|
m00natic/ir-lm | e6a87e11b9fc8062ec48db01bb90909026844371 | minor fixes (and undoing the previous 2 commits) | diff --git a/ir-lm.el b/ir-lm.el
index 5726f5b..9543101 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,991 +1,981 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 5.VIII.2009 - Version 1.8
; Abstracting away file-postings
; structure
;; 31.VII.2009 - Version 1.7
; Generating word processing function
; on the fly, thus optimizing
; depending on whether stop words or
; stemmer are loaded
;; 18.VII.2009 - Version 1.6
; highlighting of search words
; minor bugfixes
;; 15.VII.2009 - Version 1.5
; bulgarian stemmer added
; stop-word and stemmer files
; are now stored in separate directories
; which are recursively processed
; added stemming parameter
; many corrections in merging
;; 14.VII.2009 - Version 1.4
; correctly merge postings and info
; on load or index (no duplicates,
; no loading of older than index files)
; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
; remembering encoding for individual files
; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
; new command `ir-lm' giving a unified
; interface of files and commands
; command to change lambda
; full cleaning of data
; minor bugfixes
;; 10.VII.2009 - Version 1.1
; added minumim possible score for query
; so that irrelevant results are discarded
; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
(if (or (eq system-type 'windows-nt)
(eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; *ir-hashes* selectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-(defmacro make-getter (getter-name arg)
- "Create a macro for writing getters with name MAKE- GETTER-NAME and argument ARG."
- `(defmacro ,(intern (concat "make-"
- (symbol-name getter-name)))
- (name &rest body)
- ,(concat "Create a selector for `*ir-hashes*' with name GET- NAME and BODY.
+(defmacro make-getter (getter-name)
+ "Create a macro for writing getters with name MAKE- GETTER-NAME -GETTER and argument GETTER-NAME."
+ (let ((getter-name-str (symbol-name getter-name)))
+ `(defmacro ,(intern (concat "make-" getter-name-str "-getter"))
+ (name &rest body)
+ ,(concat "Create a selector for `*ir-hashes*' named "
+ getter-name-str "- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
-named `" (symbol-name arg)"'.
+named `" getter-name-str "'.
Do not use symbol `bla-arg' in the body.")
- (let ((bla-arg ',arg))
- `(defun ,(intern (concat "get-"
- (symbol-name name)))
- (,bla-arg)
- ,@body))))
+ (let ((bla-arg ',getter-name))
+ `(defun ,(intern (concat ,getter-name-str "-"
+ (symbol-name name)))
+ (,bla-arg)
+ ,@body)))))
;; getters for file structures
-(make-getter ir-file-getter ir-file)
+(make-getter ir-file)
-(make-ir-file-getter ir-file-name (car ir-file))
-(make-ir-file-getter ir-file-encoding (cadr ir-file))
-(make-ir-file-getter ir-file-time (caddr ir-file))
-(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
+(make-ir-file-getter name (car ir-file))
+(make-ir-file-getter encoding (cadr ir-file))
+(make-ir-file-getter time (caddr ir-file))
+(make-ir-file-getter paragraphs (cdddr ir-file))
;; getters for paragraph structures
-(make-getter ir-paragraph-getter ir-paragraph)
+(make-getter ir-paragraph)
-(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
-(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
-(make-ir-paragraph-getter ir-paragraph-distinct-words
- (caddr ir-paragraph))
-(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
+(make-ir-paragraph-getter point (car ir-paragraph))
+(make-ir-paragraph-getter total-words (cadr ir-paragraph))
+(make-ir-paragraph-getter distinct-words (caddr ir-paragraph))
+(make-ir-paragraph-getter hash (cadddr ir-paragraph))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
- (cadr sexp))
+ (ir-paragraph-total-words sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
- (let ((file-path (get-ir-file-name file)))
+ (let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
- (get-ir-file-encoding file))
- (format " [%d]"
- (ir-file-words (get-ir-file-paragraphs
- file))))))))
+ (ir-file-encoding file))
+ (format " [%d]" (ir-file-words (ir-file-paragraphs
+ file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
(setq *ir-lm-lambda* new)
(ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
+ *ir-global-hash* nil
*ir-total-count* 0
- *ir-words-count* 0
- *ir-global-hash* nil)
+ *ir-words-count* 0)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
- "Get next word (including hyphens and carrige return) after position."
+ "Get next word \(including hyphens and carrige return\) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
- (setq word (concat word
- (if (equal (char-to-string
- (following-char)) "\n")
- ""
- "-")
+ (setq word (concat word (if (equal (char-to-string
+ (following-char)) "\n")
+ ""
+ "-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
- (if (and match
- (= 0 match))
- t
- (filter-name file-name (cdr patterns))))))
+ (or (and match (= 0 match))
+ (filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
-This is used when recursing, when calling, should be nil."
+This is used when recursing, when calling, must be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
- (split-string (or file-types
- "*") nil t))))
+ (split-string (or file-types "*")
+ nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
- (let* ((num (gethash key h-table 0))
- (val (or value 1))
- (end-val (+ num val)))
+ (let ((end-val (+ (or value 1) (gethash key h-table 0))))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
- "Add KEY VALUE to *ir-global-hash* and adjust global count of words."
+ "Add KEY VALUE to `*ir-global-hash*' and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
-(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
+(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p
+ parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
-If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
+If USE-GLOBAL-HASH-P, add to `*ir-global-hash*', return nil.
If PARENT-HASH-P, create new hash and add both to it
-and *ir-global-hash*, adjusting global counts,
+and `*ir-global-hash*', adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
- (if (and prefix
- suffix)
+ (if (and prefix suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
- (throw 'out (concat prefix
- (substring suffix 0 i)
+ (throw 'out (concat prefix (substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
- "Return processed WORD."
- (if (and *ir-stop*
- (gethash word *ir-stop* nil))
- "" ;stop words are marked as ""
- (if *ir-stem*
- (bg-stem word)
- word)))
+ "Return processed WORD. This is default implementation.
+After loading stop-words and stemmers will be fset."
+ word)
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
- ""
+ "" ;stop words are marked as ""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
- (setq hash-check
- (puthash word (ir-process-new-word word) *ir-word-cache*)))
- (or (equal "" hash-check) hash-check))) ;if not a stop word
+ (setq hash-check (puthash word (ir-process-new-word word)
+ *ir-word-cache*)))
+ (unless (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
- (dowords word
- (puthash word "1" *ir-stop*))))
+ (dowords word (puthash word "1" *ir-stop*))))
-;; (defun ir-load-stemmer (file) ;freezes compilation
+;; (defun ir-load-stemmer-bg (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
;; (>= w3 *ir-stem-level*)
;; (puthash w1 w2 *ir-stem*))))))
-(defun ir-load-stemmer (file)
+(defun ir-load-stemmer-bg (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
(>= w3 *ir-stem-level*)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
- (or force
- (null *ir-stop*)))
- (setq *ir-stop* (make-hash-table :test 'equal :size 300))
+ (or force (null *ir-stop*)))
+ (setq *ir-stop* (make-hash-table :test 'equal :size 1000))
(maprdir 'ir-load-stop-words stop-dir)))
- (let ((stem-dir (concat *ir-dir* "stem-rules/")))
- (if (file-exists-p stem-dir)
- (when (and (file-exists-p stem-dir)
- (or force
- (null *ir-stop*)))
- (setq *ir-stem* (make-hash-table :test 'equal :size 130514))
- (maprdir 'ir-load-stemmer stem-dir))))
- (fset 'ir-process-new-word
- (ir-get-word-processor *ir-stop* *ir-stem*))
+ (let ((stem-dir-bg (concat *ir-dir* "stem-rules/bg/")))
+ (when (and (file-exists-p stem-dir-bg)
+ (or force (null *ir-stem*)))
+ (setq *ir-stem* (make-hash-table :test 'equal :size 130514))
+ (maprdir 'ir-load-stemmer-bg stem-dir-bg)))
+ (fset 'ir-process-new-word (ir-get-word-processor *ir-stop*
+ *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(macrolet ((assess-paragraph
()
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
(or (inc-hash-value wrd *ir-global-hash*
(- cnt))
(setq *ir-words-count*
(1- *ir-words-count*))))
paragraph))))
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
- (setq paragraph-total-count (1+ paragraph-total-count)
+ (setq paragraph-total-count
+ (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
- (setq paragraph-words-count (1+ paragraph-words-count)))
+ (setq paragraph-words-count
+ (1+ paragraph-words-count)))
(when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
(when acc (push (nreverse acc) *ir-hashes*)))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
- (setq *ir-total-count* (- *ir-total-count* (cadr post)))
- (maphash (lambda (key val)
- (or (inc-hash-value key *ir-global-hash* (- val))
- save-globals-p
- (setq *ir-words-count* (1- *ir-words-count*))))
- (get-ir-paragraph-hash post)))
+ (setq *ir-total-count* (- *ir-total-count*
+ (ir-paragraph-total-words post)))
+ (if save-globals-p
+ (maphash (lambda (key val)
+ (inc-hash-value key *ir-global-hash* (- val)))
+ (ir-paragraph-hash post))
+ (maphash (lambda (key val)
+ (or (inc-hash-value key *ir-global-hash* (- val))
+ (setq *ir-words-count* (1- *ir-words-count*))))
+ (ir-paragraph-hash post))))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
- (let ((file-posts (get-ir-file-paragraphs
+ (let ((file-posts (ir-file-paragraphs
(find-fn (lambda (post)
- (equal file (get-ir-file-name post)))
+ (equal file (ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
- (equal file (get-ir-file-name
+ (equal file (ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
- (princ "\n")
- (prin1 (nconc (list (get-ir-file-name lst)
- (get-ir-file-encoding lst)
- (get-ir-file-time lst))
+ (princ "\n" (current-buffer))
+ (prin1 (nconc (list (ir-file-name lst) (ir-file-encoding lst)
+ (ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
- (list (get-ir-paragraph-point sublst)
- (get-ir-paragraph-total-words sublst)
- (get-ir-paragraph-distinct-words sublst))
- (hash-to-assoc (get-ir-paragraph-hash
+ (list (ir-paragraph-point sublst)
+ (ir-paragraph-total-words sublst)
+ (ir-paragraph-distinct-words sublst))
+ (hash-to-assoc (ir-paragraph-hash
sublst))))
- (get-ir-file-paragraphs lst)))))
+ (ir-file-paragraphs lst)))
+ (current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
- (list (read-file-name "Index file: " nil
- ".irlm" nil ".irlm")))
+ (list (read-file-name "Index file: " nil ".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
- (hash-to-assoc *ir-global-hash*)))
+ (hash-to-assoc *ir-global-hash*))
+ (current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
- (maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
+ (maprdir (lambda (file)
+ (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
- (nconc (list (get-ir-file-name post)
- (get-ir-file-encoding post)
- (get-ir-file-time post))
+ (nconc (list (ir-file-name post) (ir-file-encoding post)
+ (ir-file-time post))
(mapcar (lambda (subpost)
- (let ((total-words (get-ir-paragraph-total-words
+ (let ((total-words (ir-paragraph-total-words
subpost))
- (index-words (get-ir-paragraph-distinct-words
+ (index-words (ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
- (list (get-ir-paragraph-point subpost)
+ (list (ir-paragraph-point subpost)
total-words index-words
- (ir-assoc-to-hash (cdddr subpost) index-words
- nil inc-globals-p))))
- (get-ir-file-paragraphs post))))
+ (ir-assoc-to-hash (cdddr subpost)
+ index-words nil
+ inc-globals-p))))
+ (ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
- (let* ((file-path (get-ir-file-name post))
+ (let* ((file-path (ir-file-name post))
(existing-file-time
- (get-ir-file-time (find-fn (lambda (post)
- (equal file-path
- (get-ir-file-name post)))
- *ir-hashes*))))
- (if existing-file-time ;check if file is already in index
+ (ir-file-time (find-fn (lambda (post)
+ (equal file-path
+ (ir-file-name post)))
+ *ir-hashes*))))
+ (if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
- (get-ir-file-time post)) ;if post is newer
+ (ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
- (let ((not-inc-globals-p (null *ir-global-hash*)))
- (when not-inc-globals-p ;need global hash from file only if current is cleared
+ (let ((dont-inc-globals-p (null *ir-global-hash*)))
+ (when dont-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
- (not not-inc-globals-p))))
+ (not dont-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
- (list (read-file-name "Index file: " nil
- ".irlm" nil ".irlm")
+ (list (read-file-name "Index file: " nil ".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
- (or (and *ir-global-hash*
- append-p)
+ (or (and *ir-global-hash* append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
- (or lambda
- (setq lambda 0.5))
+ (or lambda (setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
- (when (>= cnt place)
- (aset best place new))))
+ (when (>= cnt place) (aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
- (let ((file-path (get-ir-file-name file)))
+ (let ((file-path (ir-file-name file)))
(when (file-exists-p file-path)
- (dolist (post (get-ir-file-paragraphs file))
+ (dolist (post (ir-file-paragraphs file))
(let ((score
- (ir-lm-posting-score (get-ir-paragraph-hash post)
- (get-ir-paragraph-total-words
+ (ir-lm-posting-score (ir-paragraph-hash post)
+ (ir-paragraph-total-words
post)
- query
- *ir-lm-lambda*)))
+ query *ir-lm-lambda*)))
(when (> score min-score)
- (setq best
- (ir-lm-insert-post
- (vector score file-path
- (get-ir-paragraph-point post)
- (get-ir-file-encoding file))
- best (1- cnt)))))))))
+ (setq best (ir-lm-insert-post
+ (vector score file-path
+ (ir-paragraph-point post)
+ (ir-file-encoding file))
+ best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
- (let ((jump-buffer (generate-new-buffer (car (nreverse
- (split-string file "/"))))))
+ (let ((jump-buffer (generate-new-buffer
+ (car (nreverse (split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
- (mapc (lambda (post)
- (let ((score (aref post 0))
- (file (aref post 1))
- (marker (aref post 2))
- (encoding (aref post 3))
- (preview ""))
- (if (<= score 0)
- (throw 'end-results nil) ;premature end of meaningful results
- (insert "\n")
- (insert (make-link (car (nreverse (split-string file "/")))
- 'ir-lm-jump-to-result file marker
- t encoding query))
- (insert (format " [%f]" (* score 1000000)))
- (when (number-or-marker-p marker)
- (with-temp-buffer
- (let ((coding-system-for-read encoding))
- (insert-file-contents file))
- (goto-char marker)
- (setq preview
- (buffer-substring-no-properties marker
- (line-end-position)))
- (kill-buffer (current-buffer)))
+ (catch 'end-results
+ (mapc (lambda (post)
+ (let ((score (aref post 0))
+ (file (aref post 1))
+ (marker (aref post 2))
+ (encoding (aref post 3))
+ (preview ""))
+ (if (<= score 0)
+ (throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
- (insert (make-link preview 'ir-lm-jump-to-result
- file marker nil encoding query))))))
- best))
+ (insert (make-link (car (nreverse
+ (split-string file "/")))
+ 'ir-lm-jump-to-result file marker t
+ encoding query))
+ (insert (format " [%f]" (* score 1000000)))
+ (when (number-or-marker-p marker)
+ (with-temp-buffer
+ (let ((coding-system-for-read encoding))
+ (insert-file-contents file))
+ (goto-char marker)
+ (setq preview
+ (buffer-substring-no-properties marker
+ (line-end-position)))
+ (kill-buffer (current-buffer)))
+ (insert "\n")
+ (insert (make-link preview 'ir-lm-jump-to-result
+ file marker nil encoding
+ query))))))
+ best)))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
- (catch 'end-results
- (let ((query (mapcar (lambda (word)
- (ir-process-word (downcase word)))
- (split-string query-str))))
- (ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
+ (let ((query (delete nil
+ (mapcar (lambda (word)
+ (ir-process-word
+ (downcase word)))
+ (split-string query-str)))))
+ (ir-lm-insert-results (ir-lm-get-best-scores query cnt)
+ query))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
- (ignore-errors
- (kill-buffer "*Quail Completions*"))
+ (ignore-errors (kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
|
m00natic/ir-lm | f55cc8c8f51517fde576cb0ce650f3279ed6723f | same as previous for print-posting | diff --git a/ir-lm.el b/ir-lm.el
index 9d334ed..5726f5b 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -72,921 +72,920 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro make-getter (getter-name arg)
"Create a macro for writing getters with name MAKE- GETTER-NAME and argument ARG."
`(defmacro ,(intern (concat "make-"
(symbol-name getter-name)))
(name &rest body)
,(concat "Create a selector for `*ir-hashes*' with name GET- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
named `" (symbol-name arg)"'.
Do not use symbol `bla-arg' in the body.")
(let ((bla-arg ',arg))
`(defun ,(intern (concat "get-"
(symbol-name name)))
(,bla-arg)
,@body))))
;; getters for file structures
(make-getter ir-file-getter ir-file)
(make-ir-file-getter ir-file-name (car ir-file))
(make-ir-file-getter ir-file-encoding (cadr ir-file))
(make-ir-file-getter ir-file-time (caddr ir-file))
(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
;; getters for paragraph structures
(make-getter ir-paragraph-getter ir-paragraph)
(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-distinct-words
(caddr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
(get-ir-file-encoding file))
(format " [%d]"
(ir-file-words (get-ir-file-paragraphs
file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
(setq *ir-lm-lambda* new)
(ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
(setq word (concat word
(if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
(if (and match
(= 0 match))
t
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types
"*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
(throw 'out (concat prefix
(substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
(or (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
;; (>= w3 *ir-stem-level*)
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
(>= w3 *ir-stem-level*)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
(or force
(null *ir-stop*)))
(setq *ir-stop* (make-hash-table :test 'equal :size 300))
(maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
(when (and (file-exists-p stem-dir)
(or force
(null *ir-stop*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(macrolet ((assess-paragraph
()
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
(or (inc-hash-value wrd *ir-global-hash*
(- cnt))
(setq *ir-words-count*
(1- *ir-words-count*))))
paragraph))))
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count (1+ paragraph-words-count)))
(when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
(when acc (push (nreverse acc) *ir-hashes*)))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
(maphash (lambda (key val)
(or (inc-hash-value key *ir-global-hash* (- val))
save-globals-p
(setq *ir-words-count* (1- *ir-words-count*))))
(get-ir-paragraph-hash post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (get-ir-file-paragraphs
(find-fn (lambda (post)
(equal file (get-ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (get-ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
- (princ "\n" (current-buffer))
+ (princ "\n")
(prin1 (nconc (list (get-ir-file-name lst)
(get-ir-file-encoding lst)
(get-ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (get-ir-paragraph-point sublst)
(get-ir-paragraph-total-words sublst)
(get-ir-paragraph-distinct-words sublst))
(hash-to-assoc (get-ir-paragraph-hash
sublst))))
- (get-ir-file-paragraphs lst)))
- (current-buffer)))
+ (get-ir-file-paragraphs lst)))))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*)))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (get-ir-file-name post)
(get-ir-file-encoding post)
(get-ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (get-ir-paragraph-total-words
subpost))
(index-words (get-ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
(list (get-ir-paragraph-point subpost)
total-words index-words
(ir-assoc-to-hash (cdddr subpost) index-words
nil inc-globals-p))))
(get-ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (get-ir-file-name post))
(existing-file-time
(get-ir-file-time (find-fn (lambda (post)
(equal file-path
(get-ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(get-ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
(when not-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(or (and *ir-global-hash*
append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(when (>= cnt place)
(aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(dolist (post (get-ir-file-paragraphs file))
(let ((score
(ir-lm-posting-score (get-ir-paragraph-hash post)
(get-ir-paragraph-total-words
post)
query
*ir-lm-lambda*)))
(when (> score min-score)
(setq best
(ir-lm-insert-post
(vector score file-path
(get-ir-paragraph-point post)
(get-ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(mapc (lambda (post)
(let ((score (aref post 0))
(file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse (split-string file "/")))
'ir-lm-jump-to-result file marker
t encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
(let ((query (mapcar (lambda (word)
(ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
|
m00natic/ir-lm | 123ab5911f2cdeaf2845515b2ea5860bce573052 | fix in ir-lm-write-index (mistaken argument to prin1) | diff --git a/ir-lm.el b/ir-lm.el
index a774e7d..9d334ed 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -94,900 +94,899 @@ Do not use symbol `bla-arg' in the body.")
(make-ir-file-getter ir-file-time (caddr ir-file))
(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
;; getters for paragraph structures
(make-getter ir-paragraph-getter ir-paragraph)
(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-distinct-words
(caddr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
(get-ir-file-encoding file))
(format " [%d]"
(ir-file-words (get-ir-file-paragraphs
file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
(setq *ir-lm-lambda* new)
(ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
(setq word (concat word
(if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
(if (and match
(= 0 match))
t
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types
"*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
(throw 'out (concat prefix
(substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
(or (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
;; (>= w3 *ir-stem-level*)
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
(>= w3 *ir-stem-level*)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
(or force
(null *ir-stop*)))
(setq *ir-stop* (make-hash-table :test 'equal :size 300))
(maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
(when (and (file-exists-p stem-dir)
(or force
(null *ir-stop*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(macrolet ((assess-paragraph
()
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
(or (inc-hash-value wrd *ir-global-hash*
(- cnt))
(setq *ir-words-count*
(1- *ir-words-count*))))
paragraph))))
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count (1+ paragraph-words-count)))
(when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
(when acc (push (nreverse acc) *ir-hashes*)))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
(maphash (lambda (key val)
(or (inc-hash-value key *ir-global-hash* (- val))
save-globals-p
(setq *ir-words-count* (1- *ir-words-count*))))
(get-ir-paragraph-hash post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (get-ir-file-paragraphs
(find-fn (lambda (post)
(equal file (get-ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (get-ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (get-ir-file-name lst)
(get-ir-file-encoding lst)
(get-ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (get-ir-paragraph-point sublst)
(get-ir-paragraph-total-words sublst)
(get-ir-paragraph-distinct-words sublst))
(hash-to-assoc (get-ir-paragraph-hash
sublst))))
(get-ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
- (hash-to-assoc *ir-global-hash*))
- (current-buffer))
+ (hash-to-assoc *ir-global-hash*)))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (get-ir-file-name post)
(get-ir-file-encoding post)
(get-ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (get-ir-paragraph-total-words
subpost))
(index-words (get-ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
(list (get-ir-paragraph-point subpost)
total-words index-words
(ir-assoc-to-hash (cdddr subpost) index-words
nil inc-globals-p))))
(get-ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (get-ir-file-name post))
(existing-file-time
(get-ir-file-time (find-fn (lambda (post)
(equal file-path
(get-ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(get-ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
(when not-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(or (and *ir-global-hash*
append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(when (>= cnt place)
(aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(dolist (post (get-ir-file-paragraphs file))
(let ((score
(ir-lm-posting-score (get-ir-paragraph-hash post)
(get-ir-paragraph-total-words
post)
query
*ir-lm-lambda*)))
(when (> score min-score)
(setq best
(ir-lm-insert-post
(vector score file-path
(get-ir-paragraph-point post)
(get-ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(mapc (lambda (post)
(let ((score (aref post 0))
(file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse (split-string file "/")))
'ir-lm-jump-to-result file marker
t encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
(let ((query (mapcar (lambda (word)
(ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
|
m00natic/ir-lm | 266edfd1ebd13b4c00859688db4c7fb1943b5ae0 | inserted a macrolet inside ir-lm-extract-words insted of defining a useless macro outside | diff --git a/ir-lm.el b/ir-lm.el
index 443394b..a774e7d 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,992 +1,993 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 5.VIII.2009 - Version 1.8
; Abstracting away file-postings
; structure
;; 31.VII.2009 - Version 1.7
; Generating word processing function
; on the fly, thus optimizing
; depending on whether stop words or
; stemmer are loaded
;; 18.VII.2009 - Version 1.6
; highlighting of search words
; minor bugfixes
;; 15.VII.2009 - Version 1.5
; bulgarian stemmer added
; stop-word and stemmer files
; are now stored in separate directories
; which are recursively processed
; added stemming parameter
; many corrections in merging
;; 14.VII.2009 - Version 1.4
; correctly merge postings and info
; on load or index (no duplicates,
; no loading of older than index files)
; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
; remembering encoding for individual files
; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
; new command `ir-lm' giving a unified
; interface of files and commands
; command to change lambda
; full cleaning of data
; minor bugfixes
;; 10.VII.2009 - Version 1.1
; added minumim possible score for query
; so that irrelevant results are discarded
; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
(if (or (eq system-type 'windows-nt)
(eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; *ir-hashes* selectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro make-getter (getter-name arg)
"Create a macro for writing getters with name MAKE- GETTER-NAME and argument ARG."
`(defmacro ,(intern (concat "make-"
(symbol-name getter-name)))
(name &rest body)
,(concat "Create a selector for `*ir-hashes*' with name GET- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
named `" (symbol-name arg)"'.
Do not use symbol `bla-arg' in the body.")
(let ((bla-arg ',arg))
`(defun ,(intern (concat "get-"
(symbol-name name)))
(,bla-arg)
,@body))))
;; getters for file structures
(make-getter ir-file-getter ir-file)
(make-ir-file-getter ir-file-name (car ir-file))
(make-ir-file-getter ir-file-encoding (cadr ir-file))
(make-ir-file-getter ir-file-time (caddr ir-file))
(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
;; getters for paragraph structures
(make-getter ir-paragraph-getter ir-paragraph)
(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-distinct-words
(caddr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (when underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil
(get-ir-file-encoding file))
(format " [%d]"
(ir-file-words (get-ir-file-paragraphs
file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (or (<= new 0) (>= new 1))
(message "Incorrect value for lambda.")
(setq *ir-lm-lambda* new)
(ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
(and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? ")
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(when (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
(when (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(when (forward-word)
(setq word (concat word
(if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(when patterns
(let ((match (string-match (car patterns) file-name)))
(if (and match
(= 0 match))
t
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
(or subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types
"*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(or (equal "." file)
(equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(when (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(when stem-suf
(throw 'out (concat prefix
(substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
(or (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (and (numberp w3)
;; (>= w3 *ir-stem-level*)
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(and (numberp w3)
(>= w3 *ir-stem-level*)
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(when (and (file-exists-p stop-dir)
(or force
(null *ir-stop*)))
(setq *ir-stop* (make-hash-table :test 'equal :size 300))
(maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
(when (and (file-exists-p stem-dir)
(or force
(null *ir-stop*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-(defmacro assess-paragraph ()
- "Assess paragraph during word search.
-Beware, only usefull in `ir-lm-extract-words'."
- `(if (>= paragraph-total-count *ir-lm-min-words*)
- (push (list paragraph-start paragraph-total-count
- paragraph-words-count paragraph)
- acc)
- (setq *ir-total-count* ;if paragraph is too short, discard
- (- *ir-total-count* paragraph-total-count))
- (maphash (lambda (wrd cnt) ;and remove word counts
- (or (inc-hash-value wrd *ir-global-hash* (- cnt))
- (setq *ir-words-count* (1- *ir-words-count*))))
- paragraph)))
-
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
- (let* ((prev (point-min))
- (paragraph-start prev)
- (paragraph-total-count 0)
- (paragraph-words-count 0)
- (paragraph (make-hash-table :test 'equal))
- (acc (list (current-time) encoding full-file-name)))
- (goto-char prev)
- (dowords word
- (setq word (ir-process-word (downcase word)))
- (let ((curr (line-beginning-position)))
- (when (string-match "\n.*\n" ;detect just ended paragraph
- (buffer-substring-no-properties prev curr))
- (assess-paragraph)
- (setq paragraph (make-hash-table :test 'equal)
- paragraph-total-count 0
- paragraph-words-count 0
- paragraph-start curr))
- (when word
- (setq paragraph-total-count (1+ paragraph-total-count)
- *ir-total-count* (1+ *ir-total-count*))
- (when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
- (setq paragraph-words-count (1+ paragraph-words-count)))
- (when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
- (setq *ir-words-count* (1+ *ir-words-count*))))
- (setq prev curr)))
- (kill-buffer (current-buffer))
- (assess-paragraph)
- (when acc (push (nreverse acc) *ir-hashes*))))
+ (macrolet ((assess-paragraph
+ ()
+ `(if (>= paragraph-total-count *ir-lm-min-words*)
+ (push (list paragraph-start paragraph-total-count
+ paragraph-words-count paragraph)
+ acc)
+ (setq *ir-total-count* ;if paragraph is too short, discard
+ (- *ir-total-count* paragraph-total-count))
+ (maphash (lambda (wrd cnt) ;and remove word counts
+ (or (inc-hash-value wrd *ir-global-hash*
+ (- cnt))
+ (setq *ir-words-count*
+ (1- *ir-words-count*))))
+ paragraph))))
+ (let* ((prev (point-min))
+ (paragraph-start prev)
+ (paragraph-total-count 0)
+ (paragraph-words-count 0)
+ (paragraph (make-hash-table :test 'equal))
+ (acc (list (current-time) encoding full-file-name)))
+ (goto-char prev)
+ (dowords word
+ (setq word (ir-process-word (downcase word)))
+ (let ((curr (line-beginning-position)))
+ (when (string-match "\n.*\n" ;detect just ended paragraph
+ (buffer-substring-no-properties
+ prev curr))
+ (assess-paragraph)
+ (setq paragraph (make-hash-table :test 'equal)
+ paragraph-total-count 0
+ paragraph-words-count 0
+ paragraph-start curr))
+ (when word
+ (setq paragraph-total-count (1+ paragraph-total-count)
+ *ir-total-count* (1+ *ir-total-count*))
+ (when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
+ (setq paragraph-words-count (1+ paragraph-words-count)))
+ (when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
+ (setq *ir-words-count* (1+ *ir-words-count*))))
+ (setq prev curr)))
+ (kill-buffer (current-buffer))
+ (assess-paragraph)
+ (when acc (push (nreverse acc) *ir-hashes*)))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
(maphash (lambda (key val)
(or (inc-hash-value key *ir-global-hash* (- val))
save-globals-p
(setq *ir-words-count* (1- *ir-words-count*))))
(get-ir-paragraph-hash post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (get-ir-file-paragraphs
(find-fn (lambda (post)
(equal file (get-ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (get-ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (get-ir-file-name lst)
(get-ir-file-encoding lst)
(get-ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (get-ir-paragraph-point sublst)
(get-ir-paragraph-total-words sublst)
(get-ir-paragraph-distinct-words sublst))
(hash-to-assoc (get-ir-paragraph-hash
sublst))))
(get-ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(unless (y-or-n-p "Use default encoding? ")
(read-coding-system "Choose encoding: " 'cp1251))
(when *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (get-ir-file-name post)
(get-ir-file-encoding post)
(get-ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (get-ir-paragraph-total-words
subpost))
(index-words (get-ir-paragraph-distinct-words
subpost)))
(when inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
(list (get-ir-paragraph-point subpost)
total-words index-words
(ir-assoc-to-hash (cdddr subpost) index-words
nil inc-globals-p))))
(get-ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (get-ir-file-name post))
(existing-file-time
(get-ir-file-time (find-fn (lambda (post)
(equal file-path
(get-ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(get-ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(when (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
(when not-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
(when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
(when *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(or (and *ir-global-hash*
append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(when (>= cnt place)
(aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
(when (file-exists-p file-path)
(dolist (post (get-ir-file-paragraphs file))
(let ((score
(ir-lm-posting-score (get-ir-paragraph-hash post)
(get-ir-paragraph-total-words
post)
query
*ir-lm-lambda*)))
(when (> score min-score)
(setq best
(ir-lm-insert-post
(vector score file-path
(get-ir-paragraph-point post)
(get-ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(mapc (lambda (post)
(let ((score (aref post 0))
(file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse (split-string file "/")))
'ir-lm-jump-to-result file marker
t encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
(let ((query (mapcar (lambda (word)
(ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert
(propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
-;;; ir-lm.el ends here
\ No newline at end of file
+;;; ir-lm.el ends here
|
m00natic/ir-lm | a057f2e713c106613b39e7228a51235d35f7ec24 | minor fixes | diff --git a/README b/README
index 90a7ec2..950ed87 100644
--- a/README
+++ b/README
@@ -1,83 +1,83 @@
This is an Emacs Lisp extension realizing a simple mixed language
model for information retrieval in paragraphs of files grouped in
multiple directories.
Paragraphs are assumed to be separated by a blank line. The formula
used for sorting relevance is:
P(w|p) = lambda*P(w|Mp) + (1 - lambda)*P(w|Mc)
where Mp is probabilities model for a paragraph, Mc - probabilities
-model for the whole collection and 0 < lambda <= 1.
+model for the whole collection and 0 < lambda < 1.
It's only been tested on cvs version of GNU/Emacs 23.1.50 onwards.
Files:
ir-lm.el (elisp source)
optional:
bg-stop-words.txt, stem-rules.txt, stem-rules2.txt, stem-rules3.txt
Adding (require 'ir-lm) to .emacs allows automatic loading of the
extension (after adding to load-path). Optional files are grouped in
directory "~/.ir/" on posix systems or "C:\\ir\" on windows systems.
Stop word files should be put in subdirectory "stop-words/". Files
with stemming rules must be in subdirectory "stem-rules/". These
directories are recursively scanned so any sort of subdirectory
structures would suffice and all files will be used accordingly. Stop
word files may contain different languages but stemming rules
processing is tuned only for bulgarian at the moment (not really a
problem to make a directory for each needed language with stemming
rules and then adding some specific functions for loading these rules
and stemming for each such subdirectory).
Commands:
ir-lm-index
Creates and loads an index of all files of directory and its
subdirectories. Index is not saved on disk. The option for file
types allows multiple glob filters (separated with space) to be
applied to file names, thus indexing just specific files. The coding
option allows choosing non-default encoding for all files. The option
for adding to current index determines whether index is freshly loaded
deleting current index or merging current and new indexes thus
allowing search in multiple indexes (treated as one from that moment).
If auxiliary files (stop words, stem rules) have not been loaded -
attempts to load them.
ir-lm-write-index
Writes current index to a chosen file.
ir-lm-load-index
Loads an index from a chosen file. The option for adding to current
configuration is analogous to the option in ir-lm-index, allowing
merging of multiple indexes. Index information for files not present
in the file system (as recorded in the index) is not loaded. When
merging indexes having information for identical (according to path)
files, most recently indexed version for such files is chosen. If
auxiliary files (stop words, stem rules) have not been loaded -
attempts to load them.
ir-lm-search
Searches indexed paragraphs for words showing a line resumes in a new
buffer and links to the result paragraphs. When a result is clicked
(enter also suffices), marker is positioned upon the result paragraph
and search terms are coloured.
ir-clear
Freeing index data from memory. (there probably is a problem with the
way it's done, as Emacs keeps showing as high memory use)
ir-lm-change-lambda
Allowing modifying the lambda search parameter for this language model
(defaults to 0.5)
ir-change-stem-level
Changes stemming level (it only applies for newly indexing and when
auxiliary hashes, stop-words and stem-rules have been cleared or not
yet loaded).
ir-lm-change-max-results
-Changes maximum number of search results showed.
+Changes maximum number of search results showed (defaults to 30).
ir-lm-change-min-words
Changes the minimum number of words needed for a paragraph to be
indexed (defaults to 20) on new indexing.
ir-lm
A convenient interface for the above commands as well as links to all
files in current index.
diff --git a/ir-lm.el b/ir-lm.el
index 5ad7f61..443394b 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,990 +1,992 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 5.VIII.2009 - Version 1.8
; Abstracting away file-postings
; structure
;; 31.VII.2009 - Version 1.7
; Generating word processing function
; on the fly, thus optimizing
; depending on whether stop words or
; stemmer are loaded
;; 18.VII.2009 - Version 1.6
; highlighting of search words
; minor bugfixes
;; 15.VII.2009 - Version 1.5
; bulgarian stemmer added
; stop-word and stemmer files
; are now stored in separate directories
; which are recursively processed
; added stemming parameter
; many corrections in merging
;; 14.VII.2009 - Version 1.4
; correctly merge postings and info
; on load or index (no duplicates,
; no loading of older than index files)
; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
; remembering encoding for individual files
; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
; new command `ir-lm' giving a unified
; interface of files and commands
; command to change lambda
; full cleaning of data
; minor bugfixes
;; 10.VII.2009 - Version 1.1
; added minumim possible score for query
; so that irrelevant results are discarded
; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
(if (or (eq system-type 'windows-nt)
(eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; *ir-hashes* selectors
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro make-getter (getter-name arg)
"Create a macro for writing getters with name MAKE- GETTER-NAME and argument ARG."
`(defmacro ,(intern (concat "make-"
(symbol-name getter-name)))
(name &rest body)
,(concat "Create a selector for `*ir-hashes*' with name GET- NAME and BODY.
This selector has one argument with structure as `*ir-hashes*'
named `" (symbol-name arg)"'.
Do not use symbol `bla-arg' in the body.")
(let ((bla-arg ',arg))
`(defun ,(intern (concat "get-"
(symbol-name name)))
(,bla-arg)
,@body))))
;; getters for file structures
(make-getter ir-file-getter ir-file)
(make-ir-file-getter ir-file-name (car ir-file))
(make-ir-file-getter ir-file-encoding (cadr ir-file))
(make-ir-file-getter ir-file-time (caddr ir-file))
(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
;; getters for paragraph structures
(make-getter ir-paragraph-getter ir-paragraph)
(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-distinct-words
(caddr ir-paragraph))
(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; visualisation and set-er commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
- 'face (if underline-p
- '((:foreground "green") (:underline t)))
+ 'face (when underline-p
+ '((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
- (if (file-exists-p file-path)
- (insert "\n" (make-link file-path 'ir-lm-jump-to-result
- file-path 1 nil
- (get-ir-file-encoding file))
- (format " [%d]"
- (ir-file-words (get-ir-file-paragraphs
- file))))))))
+ (when (file-exists-p file-path)
+ (insert "\n" (make-link file-path 'ir-lm-jump-to-result
+ file-path 1 nil
+ (get-ir-file-encoding file))
+ (format " [%d]"
+ (ir-file-words (get-ir-file-paragraphs
+ file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
- (if (and (> new 0) (< new 1))
- (progn (setq *ir-lm-lambda* new)
- (ir-refresh-view))
- (message "Incorrect value for lambda.")))
+ (if (or (<= new 0) (>= new 1))
+ (message "Incorrect value for lambda.")
+ (setq *ir-lm-lambda* new)
+ (ir-refresh-view)))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
- (if (and (or *ir-word-cache* *ir-stem* *ir-stop*)
- (y-or-n-p "Clear auxiliary caches as well? "))
- (setq *ir-stop* nil
- *ir-stem* nil
- *ir-word-cache* nil))
+ (and (or *ir-word-cache* *ir-stem* *ir-stop*)
+ (y-or-n-p "Clear auxiliary caches as well? ")
+ (setq *ir-stop* nil
+ *ir-stem* nil
+ *ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
- (if (funcall fn item)
- (throw 'out item)))))
+ (when (funcall fn item)
+ (throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
- (if (forward-word)
- (let ((word (current-word t t)))
- (while (equal (char-to-string (following-char)) "-")
- (if (forward-word)
- (setq word (concat word
- (if (equal (char-to-string
- (following-char)) "\n")
- ""
- "-")
- (current-word t t)))))
- word)))
+ (when (forward-word)
+ (let ((word (current-word t t)))
+ (while (equal (char-to-string (following-char)) "-")
+ (when (forward-word)
+ (setq word (concat word
+ (if (equal (char-to-string
+ (following-char)) "\n")
+ ""
+ "-")
+ (current-word t t)))))
+ word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
- (if patterns
- (let ((match (string-match (car patterns) file-name)))
- (if (and match
- (= 0 match))
- t
- (filter-name file-name (cdr patterns))))))
+ (when patterns
+ (let ((match (string-match (car patterns) file-name)))
+ (if (and match
+ (= 0 match))
+ t
+ (filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
- (unless subdir-p ;executed only once, in top directory
- (setq file-types (mapcar 'glob-to-regex
- (split-string (or file-types
- "*") nil t))))
+ (or subdir-p ;executed only once, in top directory
+ (setq file-types (mapcar 'glob-to-regex
+ (split-string (or file-types
+ "*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
- (if (and (not (equal "." file))
- (not (equal ".." file)))
+ (or (equal "." file)
+ (equal ".." file)
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
- (if (filter-name file file-types)
- (funcall fn file-full)))))))
+ (when (filter-name file file-types)
+ (funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
- (if (null *ir-global-hash*) ;else use global, return nil
+ (or *ir-global-hash* ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
- (if stem-suf
- (throw 'out (concat prefix
- (substring suffix 0 i)
- stem-suf))))))
+ (when stem-suf
+ (throw 'out (concat prefix
+ (substring suffix 0 i)
+ stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
- (if (not (equal "" hash-check)) hash-check))) ;if not a stop word
+ (or (equal "" hash-check) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
-;; (if (and (numberp w3)
-;; (>= w3 *ir-stem-level*))
-;; (puthash w1 w2 *ir-stem*))))))
+;; (and (numberp w3)
+;; (>= w3 *ir-stem-level*)
+;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
- (if (and (numberp w3)
- (>= w3 *ir-stem-level*))
- (puthash w1 w2 *ir-stem*)))))))
+ (and (numberp w3)
+ (>= w3 *ir-stem-level*)
+ (puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
- (if (file-exists-p stop-dir)
- (when (or force
- (null *ir-stop*))
- (setq *ir-stop* (make-hash-table :test 'equal :size 300))
- (maprdir 'ir-load-stop-words stop-dir))))
+ (when (and (file-exists-p stop-dir)
+ (or force
+ (null *ir-stop*)))
+ (setq *ir-stop* (make-hash-table :test 'equal :size 300))
+ (maprdir 'ir-load-stop-words stop-dir)))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
- (when (or force
- (null *ir-stem*))
+ (when (and (file-exists-p stem-dir)
+ (or force
+ (null *ir-stop*)))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro assess-paragraph ()
"Assess paragraph during word search.
Beware, only usefull in `ir-lm-extract-words'."
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
- (if (not (inc-hash-value wrd *ir-global-hash* (- cnt)))
+ (or (inc-hash-value wrd *ir-global-hash* (- cnt))
(setq *ir-words-count* (1- *ir-words-count*))))
paragraph)))
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
- (if (= 1 (inc-hash-value word paragraph)) ;new paragraph word
- (setq paragraph-words-count (1+ paragraph-words-count)))
- (if (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
- (setq *ir-words-count* (1+ *ir-words-count*))))
+ (when (= 1 (inc-hash-value word paragraph)) ;new paragraph word
+ (setq paragraph-words-count (1+ paragraph-words-count)))
+ (when (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
+ (setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
- (if acc (push (nreverse acc) *ir-hashes*))))
+ (when acc (push (nreverse acc) *ir-hashes*))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
(maphash (lambda (key val)
- (if (and (not (inc-hash-value key *ir-global-hash* (- val)))
- (not save-globals-p))
+ (or (inc-hash-value key *ir-global-hash* (- val))
+ save-globals-p
(setq *ir-words-count* (1- *ir-words-count*))))
(get-ir-paragraph-hash post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(let ((file-posts (get-ir-file-paragraphs
(find-fn (lambda (post)
(equal file (get-ir-file-name post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
(equal file (get-ir-file-name
file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (get-ir-file-name lst)
(get-ir-file-encoding lst)
(get-ir-file-time lst))
(mapcar (lambda (sublst)
(nconc
(list (get-ir-paragraph-point sublst)
(get-ir-paragraph-total-words sublst)
(get-ir-paragraph-distinct-words sublst))
(hash-to-assoc (get-ir-paragraph-hash
sublst))))
(get-ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
- (if (not (y-or-n-p "Use default encoding? "))
- (read-coding-system "Choose encoding: " 'cp1251))
- (if *ir-global-hash*
- (y-or-n-p "Add to existing configuration? "))))
+ (unless (y-or-n-p "Use default encoding? ")
+ (read-coding-system "Choose encoding: " 'cp1251))
+ (when *ir-global-hash*
+ (y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (get-ir-file-name post)
(get-ir-file-encoding post)
(get-ir-file-time post))
(mapcar (lambda (subpost)
(let ((total-words (get-ir-paragraph-total-words
subpost))
(index-words (get-ir-paragraph-distinct-words
subpost)))
- (if inc-globals-p
- (setq *ir-total-count*
- (+ *ir-total-count* total-words)))
+ (when inc-globals-p
+ (setq *ir-total-count*
+ (+ *ir-total-count* total-words)))
(list (get-ir-paragraph-point subpost)
total-words index-words
(ir-assoc-to-hash (cdddr subpost) index-words
nil inc-globals-p))))
(get-ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (get-ir-file-name post))
(existing-file-time
(get-ir-file-time (find-fn (lambda (post)
(equal file-path
(get-ir-file-name post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time
(get-ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
- (if (file-exists-p file-path) ;load only existing files
- (ir-lm-get-file-posting post inc-globals-p)))))
+ (when (file-exists-p file-path) ;load only existing files
+ (ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
- (if not-inc-globals-p ;need global hash from file only if current is cleared
- (let ((global-hash (read-from-whole-string
- (buffer-substring-no-properties
- (line-beginning-position)
- (line-end-position)))))
- (setq *ir-total-count* (car global-hash)
- *ir-words-count* (cadr global-hash))
- (ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
+ (when not-inc-globals-p ;need global hash from file only if current is cleared
+ (let ((global-hash (read-from-whole-string
+ (buffer-substring-no-properties
+ (line-beginning-position)
+ (line-end-position)))))
+ (setq *ir-total-count* (car global-hash)
+ *ir-words-count* (cadr global-hash))
+ (ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
- (if file-sexp (push file-sexp *ir-hashes*))))))
+ (when file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
- (if *ir-global-hash*
- (y-or-n-p
- "Add to existing configuration or overwrite? "))))
+ (when *ir-global-hash*
+ (y-or-n-p
+ "Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
- (if (not (and *ir-global-hash*
- append-p))
+ (or (and *ir-global-hash*
+ append-p)
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
- (if (>= cnt place)
- (aset best place new))))
+ (when (>= cnt place)
+ (aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (get-ir-file-name file)))
- (if (file-exists-p file-path)
- (dolist (post (get-ir-file-paragraphs file))
- (let ((score
- (ir-lm-posting-score (get-ir-paragraph-hash post)
- (get-ir-paragraph-total-words
- post)
- query
- *ir-lm-lambda*)))
- (if (> score min-score)
- (setq best
- (ir-lm-insert-post
- (vector score file-path
- (get-ir-paragraph-point post)
- (get-ir-file-encoding file))
- best (1- cnt)))))))))
+ (when (file-exists-p file-path)
+ (dolist (post (get-ir-file-paragraphs file))
+ (let ((score
+ (ir-lm-posting-score (get-ir-paragraph-hash post)
+ (get-ir-paragraph-total-words
+ post)
+ query
+ *ir-lm-lambda*)))
+ (when (> score min-score)
+ (setq best
+ (ir-lm-insert-post
+ (vector score file-path
+ (get-ir-paragraph-point post)
+ (get-ir-file-encoding file))
+ best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
- (if (string-match "\n.*\n" ;detect just ended paragraph
- (buffer-substring-no-properties
- prev curr))
- (throw 'out nil))
+ (when (string-match "\n.*\n" ;detect just ended paragraph
+ (buffer-substring-no-properties
+ prev curr))
+ (throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(mapc (lambda (post)
- (let ((file (aref post 1))
- (score (aref post 0))
+ (let ((score (aref post 0))
+ (file (aref post 1))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse (split-string file "/")))
'ir-lm-jump-to-result file marker
t encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
(let ((query (mapcar (lambda (word)
(ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
- (insert (propertize "Information Retrieval - Basic Mixed Language Model"
- 'face '((:foreground "green") (:underline t)))
- "\n\nOptions:\n"
- (make-link "i -> index new directory"
- 'ir-lm-index)
- "\n"
- (make-link "l -> load existing index from file"
- 'ir-lm-load-index)
- "\n"
- (make-link "w -> write current index\(es\) to file"
- 'ir-lm-write-index)
- "\n"
- (make-link "f -> search in current loaded index\(es\)"
- 'ir-lm-search)
- "\n"
- (make-link "c -> clear current index\(es\)"
- 'ir-clear)
- "\n"
- (make-link "m -> change maximum search results"
- 'ir-lm-change-max-results)
- "\n"
- (make-link "p -> change minimum number of words in paragraph"
- 'ir-lm-change-min-words)
- "\n"
- (make-link "b -> change lambda"
- 'ir-lm-change-lambda)
- "\n"
- (make-link "s -> change stemming level"
- 'ir-change-stem-level)
- "\n"
- (make-link "q -> quit \(without clearing\)"
- (lambda () (interactive) (kill-buffer)))
- "\n\n"
- "maximum results = " (format "%d\n" *ir-max-results*)
- "minimum number of words in paragraph = "
- (format "%d\n" *ir-lm-min-words*)
- "lambda = " (format "%f\n" *ir-lm-lambda*)
- "stemming level = " (format "%d\n" *ir-stem-level*)
- "total words in texts = " (format "%d\n" *ir-total-count*)
- "words in index = " (format "%d\n" *ir-words-count*)
- "Currently indexed files [total words]:\n")
+ (insert
+ (propertize "Information Retrieval - Basic Mixed Language Model"
+ 'face '((:foreground "green") (:underline t)))
+ "\n\nOptions:\n"
+ (make-link "i -> index new directory"
+ 'ir-lm-index)
+ "\n"
+ (make-link "l -> load existing index from file"
+ 'ir-lm-load-index)
+ "\n"
+ (make-link "w -> write current index\(es\) to file"
+ 'ir-lm-write-index)
+ "\n"
+ (make-link "f -> search in current loaded index\(es\)"
+ 'ir-lm-search)
+ "\n"
+ (make-link "c -> clear current index\(es\)"
+ 'ir-clear)
+ "\n"
+ (make-link "m -> change maximum search results"
+ 'ir-lm-change-max-results)
+ "\n"
+ (make-link "p -> change minimum number of words in paragraph"
+ 'ir-lm-change-min-words)
+ "\n"
+ (make-link "b -> change lambda"
+ 'ir-lm-change-lambda)
+ "\n"
+ (make-link "s -> change stemming level"
+ 'ir-change-stem-level)
+ "\n"
+ (make-link "q -> quit \(without clearing\)"
+ (lambda () (interactive) (kill-buffer)))
+ "\n\n"
+ "maximum results = " (format "%d\n" *ir-max-results*)
+ "minimum number of words in paragraph = "
+ (format "%d\n" *ir-lm-min-words*)
+ "lambda = " (format "%f\n" *ir-lm-lambda*)
+ "stemming level = " (format "%d\n" *ir-stem-level*)
+ "total words in texts = " (format "%d\n" *ir-total-count*)
+ "words in index = " (format "%d\n" *ir-words-count*)
+ "Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
\ No newline at end of file
|
m00natic/ir-lm | 0ea8b2a59578746c06da3273b182b221e30fdbd9 | Abstracting away file-postings structure | diff --git a/ir-lm.el b/ir-lm.el
index 1ece415..5ad7f61 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,926 +1,990 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
+;; 5.VIII.2009 - Version 1.8
+ ; Abstracting away file-postings
+ ; structure
;; 31.VII.2009 - Version 1.7
; Generating word processing function
; on the fly, thus optimizing
; depending on whether stop words or
; stemmer are loaded
;; 18.VII.2009 - Version 1.6
; highlighting of search words
; minor bugfixes
;; 15.VII.2009 - Version 1.5
; bulgarian stemmer added
; stop-word and stemmer files
; are now stored in separate directories
; which are recursively processed
; added stemming parameter
; many corrections in merging
;; 14.VII.2009 - Version 1.4
; correctly merge postings and info
; on load or index (no duplicates,
; no loading of older than index files)
; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
; remembering encoding for individual files
; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
; new command `ir-lm' giving a unified
; interface of files and commands
; command to change lambda
; full cleaning of data
; minor bugfixes
;; 10.VII.2009 - Version 1.1
; added minumim possible score for query
; so that irrelevant results are discarded
; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
(if (or (eq system-type 'windows-nt)
(eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; *ir-hashes* selectors
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(defmacro make-getter (getter-name arg)
+ "Create a macro for writing getters with name MAKE- GETTER-NAME and argument ARG."
+ `(defmacro ,(intern (concat "make-"
+ (symbol-name getter-name)))
+ (name &rest body)
+ ,(concat "Create a selector for `*ir-hashes*' with name GET- NAME and BODY.
+This selector has one argument with structure as `*ir-hashes*'
+named `" (symbol-name arg)"'.
+Do not use symbol `bla-arg' in the body.")
+ (let ((bla-arg ',arg))
+ `(defun ,(intern (concat "get-"
+ (symbol-name name)))
+ (,bla-arg)
+ ,@body))))
+
+;; getters for file structures
+(make-getter ir-file-getter ir-file)
+
+(make-ir-file-getter ir-file-name (car ir-file))
+(make-ir-file-getter ir-file-encoding (cadr ir-file))
+(make-ir-file-getter ir-file-time (caddr ir-file))
+(make-ir-file-getter ir-file-paragraphs (cdddr ir-file))
+
+;; getters for paragraph structures
+(make-getter ir-paragraph-getter ir-paragraph)
+
+(make-ir-paragraph-getter ir-paragraph-point (car ir-paragraph))
+(make-ir-paragraph-getter ir-paragraph-total-words (cadr ir-paragraph))
+(make-ir-paragraph-getter ir-paragraph-distinct-words
+ (caddr ir-paragraph))
+(make-ir-paragraph-getter ir-paragraph-hash (cadddr ir-paragraph))
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;;; visualisation and set-er commands
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (if underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
(apply '+ (mapcar (lambda (sexp)
(cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
- (let ((file-path (car file)))
+ (let ((file-path (get-ir-file-name file)))
(if (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
- file-path 1 nil (cadr file))
- (format " [%d]" (ir-file-words (cdddr file))))))))
+ file-path 1 nil
+ (get-ir-file-encoding file))
+ (format " [%d]"
+ (ir-file-words (get-ir-file-paragraphs
+ file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (and (> new 0) (< new 1))
(progn (setq *ir-lm-lambda* new)
(ir-refresh-view))
(message "Incorrect value for lambda.")))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
(if (and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? "))
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(if (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
(if (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(if (forward-word)
(setq word (concat word
(if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
`(let ,(mapcar (lambda (var)
`(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
(mapcar (lambda (var)
`(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(if patterns
(let ((match (string-match (car patterns) file-name)))
(if (and match
(= 0 match))
t
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
(unless subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types
"*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(if (and (not (equal "." file))
(not (equal ".." file)))
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(if (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
(maphash (lambda (key val)
(push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(if (null *ir-global-hash*) ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(if stem-suf
(throw 'out (concat prefix
(substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
(if (not (equal "" hash-check)) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (if (and (numberp w3)
;; (>= w3 *ir-stem-level*))
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(if (and (numberp w3)
(>= w3 *ir-stem-level*))
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(if (file-exists-p stop-dir)
(when (or force
(null *ir-stop*))
(setq *ir-stop* (make-hash-table :test 'equal :size 300))
(maprdir 'ir-load-stop-words stop-dir))))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
(when (or force
(null *ir-stem*))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro assess-paragraph ()
"Assess paragraph during word search.
Beware, only usefull in `ir-lm-extract-words'."
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
(maphash (lambda (wrd cnt) ;and remove word counts
(if (not (inc-hash-value wrd *ir-global-hash* (- cnt)))
(setq *ir-words-count* (1- *ir-words-count*))))
paragraph)))
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(if (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count (1+ paragraph-words-count)))
(if (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
(if acc (push (nreverse acc) *ir-hashes*))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
(maphash (lambda (key val)
(if (and (not (inc-hash-value key *ir-global-hash* (- val)))
(not save-globals-p))
(setq *ir-words-count* (1- *ir-words-count*))))
- (cadddr post)))
+ (get-ir-paragraph-hash post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
- (let ((file-posts (cdddr (find-fn (lambda (post)
- (equal file (car post)))
- *ir-hashes*))))
+ (let ((file-posts (get-ir-file-paragraphs
+ (find-fn (lambda (post)
+ (equal file (get-ir-file-name post)))
+ *ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
(lambda (file-post)
- (equal file (car file-post)))))))
+ (equal file (get-ir-file-name
+ file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
- (prin1 (nconc (list (car lst) (cadr lst) (caddr lst)) ;(file-path encoding time)
+ (prin1 (nconc (list (get-ir-file-name lst)
+ (get-ir-file-encoding lst)
+ (get-ir-file-time lst))
(mapcar (lambda (sublst)
- (nconc (list (car sublst) ;(point total-words words words-hash)
- (cadr sublst) (caddr sublst))
- (hash-to-assoc (cadddr sublst))))
- (cdddr lst)))
+ (nconc
+ (list (get-ir-paragraph-point sublst)
+ (get-ir-paragraph-total-words sublst)
+ (get-ir-paragraph-distinct-words sublst))
+ (hash-to-assoc (get-ir-paragraph-hash
+ sublst))))
+ (get-ir-file-paragraphs lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(if (not (y-or-n-p "Use default encoding? "))
(read-coding-system "Choose encoding: " 'cp1251))
(if *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
(maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
- (nconc (list (car post) (cadr post) (caddr post)) ;(file-name encoding time)
+ (nconc (list (get-ir-file-name post)
+ (get-ir-file-encoding post)
+ (get-ir-file-time post))
(mapcar (lambda (subpost)
- (let ((total-words (cadr subpost))
- (index-words (caddr subpost)))
+ (let ((total-words (get-ir-paragraph-total-words
+ subpost))
+ (index-words (get-ir-paragraph-distinct-words
+ subpost)))
(if inc-globals-p
(setq *ir-total-count*
(+ *ir-total-count* total-words)))
- (list (car subpost) total-words index-words
+ (list (get-ir-paragraph-point subpost)
+ total-words index-words
(ir-assoc-to-hash (cdddr subpost) index-words
nil inc-globals-p))))
- (cdddr post))))
+ (get-ir-file-paragraphs post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
- (let* ((file-path (car post))
+ (let* ((file-path (get-ir-file-name post))
(existing-file-time
- (caddr (find-fn (lambda (post) (equal file-path (car post)))
- *ir-hashes*))))
+ (get-ir-file-time (find-fn (lambda (post)
+ (equal file-path
+ (get-ir-file-name post)))
+ *ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
- (when (time-less-p existing-file-time (caddr post)) ;if post is newer
+ (when (time-less-p existing-file-time
+ (get-ir-file-time post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(if (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
(if not-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
(if file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
(if *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(if (not (and *ir-global-hash*
append-p))
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
(mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(+ (* lambda
(/ (float (gethash word hash 0))
base))
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*)))
1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(apply '* (mapcar (lambda (word)
(let ((global-count
(gethash word *ir-global-hash* 0)))
(if (> global-count 0)
(* (- 1 lambda)
(/ (float global-count)
*ir-total-count*))
1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(if (>= cnt place)
(aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
- (let ((file-path (car file)))
+ (let ((file-path (get-ir-file-name file)))
(if (file-exists-p file-path)
- (dolist (post (cdddr file))
+ (dolist (post (get-ir-file-paragraphs file))
(let ((score
- (ir-lm-posting-score (cadddr post)
- (cadr post)
+ (ir-lm-posting-score (get-ir-paragraph-hash post)
+ (get-ir-paragraph-total-words
+ post)
query
*ir-lm-lambda*)))
(if (> score min-score)
(setq best
- (ir-lm-insert-post ;[score file point encoding]
+ (ir-lm-insert-post
(vector score file-path
- (car post) (cadr file) (cadr post))
+ (get-ir-paragraph-point post)
+ (get-ir-file-encoding file))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(if (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
(mapc (lambda (post)
(let ((file (aref post 1))
(score (aref post 0))
(marker (aref post 2))
(encoding (aref post 3))
(preview ""))
(if (<= score 0)
(throw 'end-results nil) ;premature end of meaningful results
(insert "\n")
(insert (make-link (car (nreverse (split-string file "/")))
'ir-lm-jump-to-result file marker
t encoding query))
(insert (format " [%f]" (* score 1000000)))
(when (number-or-marker-p marker)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char marker)
(setq preview
(buffer-substring-no-properties marker
(line-end-position)))
(kill-buffer (current-buffer)))
(insert "\n")
(insert (make-link preview 'ir-lm-jump-to-result
file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
(lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
(lambda () (interactive) (forward-line -2)))
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
(let ((query (mapcar (lambda (word)
(ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
(local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert (propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
(lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
\ No newline at end of file
|
m00natic/ir-lm | 0bfa9f8dc31dd1751f07e8c4f66b9f5f0315174d | minor fixes | diff --git a/README b/README
index 9a23e3a..90a7ec2 100644
--- a/README
+++ b/README
@@ -1,83 +1,83 @@
This is an Emacs Lisp extension realizing a simple mixed language
model for information retrieval in paragraphs of files grouped in
multiple directories.
Paragraphs are assumed to be separated by a blank line. The formula
used for sorting relevance is:
P(w|p) = lambda*P(w|Mp) + (1 - lambda)*P(w|Mc)
where Mp is probabilities model for a paragraph, Mc - probabilities
model for the whole collection and 0 < lambda <= 1.
It's only been tested on cvs version of GNU/Emacs 23.1.50 onwards.
Files:
ir-lm.el (elisp source)
optional:
bg-stop-words.txt, stem-rules.txt, stem-rules2.txt, stem-rules3.txt
Adding (require 'ir-lm) to .emacs allows automatic loading of the
extension (after adding to load-path). Optional files are grouped in
directory "~/.ir/" on posix systems or "C:\\ir\" on windows systems.
Stop word files should be put in subdirectory "stop-words/". Files
with stemming rules must be in subdirectory "stem-rules/". These
directories are recursively scanned so any sort of subdirectory
structures would suffice and all files will be used accordingly. Stop
word files may contain different languages but stemming rules
processing is tuned only for bulgarian at the moment (not really a
problem to make a directory for each needed language with stemming
rules and then adding some specific functions for loading these rules
and stemming for each such subdirectory).
Commands:
ir-lm-index
Creates and loads an index of all files of directory and its
-subdirectories. Index is not saved on the disk. The option for file
+subdirectories. Index is not saved on disk. The option for file
types allows multiple glob filters (separated with space) to be
applied to file names, thus indexing just specific files. The coding
option allows choosing non-default encoding for all files. The option
for adding to current index determines whether index is freshly loaded
deleting current index or merging current and new indexes thus
-allowing search in multiple indexes (treated as one from that
-moment). If auxiliary files (stop words, stem rules) have not been
-loaded - attempts to load them.
+allowing search in multiple indexes (treated as one from that moment).
+If auxiliary files (stop words, stem rules) have not been loaded -
+attempts to load them.
ir-lm-write-index
Writes current index to a chosen file.
ir-lm-load-index
Loads an index from a chosen file. The option for adding to current
configuration is analogous to the option in ir-lm-index, allowing
merging of multiple indexes. Index information for files not present
in the file system (as recorded in the index) is not loaded. When
merging indexes having information for identical (according to path)
files, most recently indexed version for such files is chosen. If
auxiliary files (stop words, stem rules) have not been loaded -
attempts to load them.
ir-lm-search
Searches indexed paragraphs for words showing a line resumes in a new
buffer and links to the result paragraphs. When a result is clicked
(enter also suffices), marker is positioned upon the result paragraph
and search terms are coloured.
ir-clear
Freeing index data from memory. (there probably is a problem with the
way it's done, as Emacs keeps showing as high memory use)
ir-lm-change-lambda
Allowing modifying the lambda search parameter for this language model
(defaults to 0.5)
ir-change-stem-level
Changes stemming level (it only applies for newly indexing and when
auxiliary hashes, stop-words and stem-rules have been cleared or not
yet loaded).
ir-lm-change-max-results
Changes maximum number of search results showed.
ir-lm-change-min-words
Changes the minimum number of words needed for a paragraph to be
indexed (defaults to 20) on new indexing.
ir-lm
A convenient interface for the above commands as well as links to all
files in current index.
diff --git a/ir-lm.el b/ir-lm.el
index 18678ee..1ece415 100644
--- a/ir-lm.el
+++ b/ir-lm.el
@@ -1,926 +1,926 @@
;;; ir-lm.el --- Basic Mixed Language Model for Information Retrieval
;by Andrey Kotlarski [email protected]
;;; Commentary:
;;; History:
;; 31.VII.2009 - Version 1.7
; Generating word processing function
; on the fly, thus optimizing
; depending on whether stop words or
; stemmer are loaded
;; 18.VII.2009 - Version 1.6
; highlighting of search words
; minor bugfixes
;; 15.VII.2009 - Version 1.5
; bulgarian stemmer added
; stop-word and stemmer files
; are now stored in separate directories
; which are recursively processed
; added stemming parameter
; many corrections in merging
;; 14.VII.2009 - Version 1.4
; correctly merge postings and info
; on load or index (no duplicates,
; no loading of older than index files)
; added globs for filtering file types
;; 13.VII.2009 - Version 1.3
; remembering encoding for individual files
; prune non-existing files on load
;; 12.VII.2009 - Version 1.2
; new command `ir-lm' giving a unified
; interface of files and commands
; command to change lambda
; full cleaning of data
; minor bugfixes
;; 10.VII.2009 - Version 1.1
; added minumim possible score for query
; so that irrelevant results are discarded
; a bit of code refactoring and cleaning
;; 09.VII.2009 - Version 1.0
;;; Code:
(defconst *ir-dir*
(if (or (eq system-type 'windows-nt)
(eq system-type 'ms-dos))
"C:/ir/"
"~/.ir/")
"Directory for auxiliary files.")
;; *ir-hashes* structure is ((file-path encoding time (point-in-file total-words-in-paragraph
;; distinct-words-in-paragraph hash-of-word-counts) ...) ...)
(defvar *ir-hashes* nil "List of postings grouped in files.")
(defvar *ir-global-hash* nil "Global hash table of words and their count.")
(defvar *ir-total-count* 0 "Count of all words in index.")
(defvar *ir-words-count* 0 "Count of all distinct words in index.")
(defvar *ir-word-cache* nil "Cache of raw word -> transformation.")
(defvar *ir-stop* nil "Hash table of stop words.")
(defvar *ir-stem* nil "Hash table of stemmer.")
(defvar *ir-lm-lambda* 0.5 "Parameter in the mixed language model.")
(defvar *ir-max-results* 30 "Maximum number of search results.")
(defvar *ir-stem-level* 1 "Stemming level.")
(defvar *ir-lm-min-words* 20 "Minimal number of words in paragraph.")
(defun make-link (text cmd &optional file point underline-p encoding query)
"Return a TEXT propertized as a link that invokes CMD when clicked.
FILE is to be opened and cursor moved to position POINT.
UNDERLINE-P determines wether text should be underlined.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of search terms."
(let ((map (make-sparse-keymap)))
(define-key map [mouse-1] cmd)
(define-key map (kbd "RET") cmd)
(propertize text
'keymap map
'face (if underline-p
'((:foreground "green") (:underline t)))
'mouse-face 'highlight
'rear-nonsticky t
'read-only t
'file file
'point point
'encoding encoding
'query query)))
(defun ir-file-words (paragraphs)
"Get total count of words for file by summing count in its PARAGRAPHS."
- (apply '+ (mapcar '(lambda (sexp)
- (cadr sexp))
+ (apply '+ (mapcar (lambda (sexp)
+ (cadr sexp))
paragraphs)))
(defun ir-list-index ()
"List all files currently in index."
(dolist (file *ir-hashes*)
(let ((file-path (car file)))
(if (file-exists-p file-path)
(insert "\n" (make-link file-path 'ir-lm-jump-to-result
file-path 1 nil (cadr file))
(format " [%d]" (ir-file-words (cdddr file))))))))
(defun ir-refresh-view ()
"Refresh file names in current index."
(ignore-errors
(with-current-buffer "*Information retrieval*"
(goto-char (point-min))
(forward-line 14)
(setq inhibit-read-only t)
(let ((start (point)))
(forward-line 5)
(delete-region start (line-end-position)))
(insert
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d" *ir-words-count*))
(forward-line 2)
(delete-region (point) (point-max))
(ir-list-index)
(setq inhibit-read-only nil)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3))))
(defun ir-lm-change-lambda (new)
"Set NEW value of the `lambda' parameter."
(interactive
(list (read-number "New value for lambda (0 < lambda < 1) = ")))
(if (and (> new 0) (< new 1))
(progn (setq *ir-lm-lambda* new)
(ir-refresh-view))
(message "Incorrect value for lambda.")))
(defun ir-change-stem-level (new)
"Set NEW value of the stemming parameter."
(interactive
(list (read-number "New level for stemming (> 0) = ")))
(if (< new 1)
(message "Incorrect value for stemming.")
(setq *ir-stem-level* new)
(ir-refresh-view)
(ir-load-auxiliary t)))
(defun ir-lm-change-max-results (new)
"Set NEW value for maximum number of search results."
(interactive
(list (read-number "Maximum number of search results = ")))
(setq *ir-max-results* new)
(ir-refresh-view))
(defun ir-lm-change-min-words (new)
"Set NEW minimum number of words for paragraph."
(interactive
(list (read-number "Minumun number of words in paragraph = ")))
(setq *ir-lm-min-words* new)
(ir-refresh-view))
(defun ir-clear (&optional all)
"Clear global hashes and reset global variables.
If ALL is non-nil - ask to clear words' cache as well."
(interactive
(list t))
(setq *ir-hashes* nil
*ir-total-count* 0
*ir-words-count* 0
*ir-global-hash* nil)
(when all
(if (and (or *ir-word-cache* *ir-stem* *ir-stop*)
(y-or-n-p "Clear auxiliary caches as well? "))
(setq *ir-stop* nil
*ir-stem* nil
*ir-word-cache* nil))
(message "Index cleared.")
(ir-refresh-view))
(garbage-collect))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; utilities
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-fn (fn lst)
"Return first item that satisfies FN in LST. Nil if no such."
(catch 'out
(dolist (item lst)
(if (funcall fn item)
(throw 'out item)))))
(defun delete-fn (lst fn)
"Destructively delete first element of LST for which FN is non-nil."
(if (funcall fn (car lst))
(cdr lst)
(let ((prev lst)
(curr (cdr lst)))
(catch 'out
(while curr
(if (not (funcall fn (car curr)))
(setq prev curr
curr (cdr curr))
(setcdr prev (cdr curr))
(throw 'out nil))))
lst)))
(defun get-next-word ()
"Get next word (including hyphens and carrige return) after position."
(if (forward-word)
(let ((word (current-word t t)))
(while (equal (char-to-string (following-char)) "-")
(if (forward-word)
(setq word (concat word
(if (equal (char-to-string
(following-char)) "\n")
""
"-")
(current-word t t)))))
word)))
(defmacro dowords (vars &rest body)
"Bind VARS to consecutive words and execute BODY."
(if (listp vars)
- `(let ,(mapcar '(lambda (var)
- `(,var (get-next-word)))
+ `(let ,(mapcar (lambda (var)
+ `(,var (get-next-word)))
vars)
(while ,(car vars)
,@body
(setq ,@(apply 'nconc
- (mapcar '(lambda (var)
- `(,var (get-next-word)))
+ (mapcar (lambda (var)
+ `(,var (get-next-word)))
vars)))))
`(let ((,vars (get-next-word)))
(while ,vars
,@body
(setq ,vars (get-next-word))))))
(defun replace-regex-str (word regex str)
"In WORD replace REGEX with STR."
(mapconcat 'identity (split-string word regex) str))
(defun glob-to-regex (glob)
"Turn a GLOB to a reg-exp."
(replace-regex-str
(replace-regex-str (replace-regex-str glob "\\." "\\.")
"?" ".")
"\\*" ".*"))
(defun filter-name (file-name patterns)
"Check whether FILE-NAME is fully matched by any of the PATTERNS."
(if patterns
(let ((match (string-match (car patterns) file-name)))
(if (and match
(= 0 match))
t
(filter-name file-name (cdr patterns))))))
(defun maprdir (fn dir &optional file-types subdir-p)
"Apply FN over all files in DIR and its subdirectories.
FILE-TYPES determines file name patterns for calling FN upon.
Default is all files. If SUBDIR-P is nil,
we are in the top level directory, otherwize we are lower.
This is used when recursing, when calling, should be nil."
(unless subdir-p ;executed only once, in top directory
(setq file-types (mapcar 'glob-to-regex
(split-string (or file-types
"*") nil t))))
(dolist (file (directory-files dir))
(let ((file-full (concat dir file)))
(if (and (not (equal "." file))
(not (equal ".." file)))
(if (file-directory-p file-full)
(maprdir fn (concat file-full "/") file-types t)
(if (filter-name file file-types)
(funcall fn file-full)))))))
(defun inc-hash-value (key h-table &optional value)
"Increment value for KEY in H-TABLE with VALUE.
If VALUE is nil, use 1.
If KEY doesn't exist, set initial value to VALUE.
If end value of KEY is <=0, remove key.
Return new val if key is added/changed, nil if key is removed."
(let* ((num (gethash key h-table 0))
(val (or value 1))
(end-val (+ num val)))
(if (> end-val 0)
(puthash key end-val h-table)
(remhash key h-table))))
(defun hash-to-assoc (h-table)
"Turn a H-TABLE to assoc-list."
(let ((a-list nil))
- (maphash '(lambda (key val)
- (push (cons key val) a-list))
+ (maphash (lambda (key val)
+ (push (cons key val) a-list))
h-table)
a-list))
(defun ir-pair-to-global-hash (key value)
"Add KEY VALUE to *ir-global-hash* and adjust global count of words."
(or (gethash key *ir-global-hash* nil)
(setq *ir-words-count* (1+ *ir-words-count*)))
(inc-hash-value key *ir-global-hash* value))
(defun ir-assoc-to-hash (a-list &optional size use-global-hash-p parent-hash-p)
"Turn A-LIST to a hash-table with size SIZE.
If USE-GLOBAL-HASH-P, add to *ir-global-hash*, return nil.
If PARENT-HASH-P, create new hash and add both to it
and *ir-global-hash*, adjusting global counts,
return the newly created one."
(if (not use-global-hash-p)
(let ((h-table (make-hash-table :test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list h-table)
(let ((key (car cell))
(val (cdr cell)))
(ir-pair-to-global-hash key val)
(inc-hash-value key h-table val)))
(dolist (cell a-list h-table)
(inc-hash-value (car cell) h-table (cdr cell)))))
(if (null *ir-global-hash*) ;else use global, return nil
(setq *ir-global-hash* (make-hash-table
:test 'equal :size size)))
(if parent-hash-p
(dolist (cell a-list)
(ir-pair-to-global-hash (car cell) (cdr cell)))
(dolist (cell a-list)
(inc-hash-value (car cell) *ir-global-hash* (cdr cell))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Word processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun bg-stem (word)
"Return stemmed version of WORD."
(if (string-match "\\(.*?[аÑоÑеийÑÑ]\\)\\(.*\\)" word)
(let ((prefix (match-string-no-properties 1 word))
(suffix (match-string-no-properties 2 word)))
(if (and prefix
suffix)
(catch 'out
(dotimes (i (length suffix) word)
(let ((stem-suf (gethash (substring suffix i)
*ir-stem* nil)))
(if stem-suf
(throw 'out (concat prefix
(substring suffix 0 i)
stem-suf))))))
word))
word))
(defun ir-process-new-word (word)
"Return processed WORD."
(if (and *ir-stop*
(gethash word *ir-stop* nil))
"" ;stop words are marked as ""
(if *ir-stem*
(bg-stem word)
word)))
(defmacro ir-build-word-processor (&optional stop-p stem-p)
"Build optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
`(lambda (word)
,(if stop-p
`(if (gethash word *ir-stop* nil)
""
,(if stem-p
'(bg-stem word)
'word))
(if stem-p
'(bg-stem word)
'word))))
(defun ir-get-word-processor (stop-p stem-p)
"Return optimized word processing function.
STOP-P determines whether stop words should be checked.
STEM-P determines whether stemming should be applied."
(cond
((and stop-p stem-p) (ir-build-word-processor t t))
(stop-p (ir-build-word-processor t))
(stem-p (ir-build-word-processor nil t))
(t (ir-build-word-processor))))
(defun ir-process-word (word)
"Return hashed processed value for WORD.
If no such is found, process and cache."
(let ((hash-check (gethash word *ir-word-cache* nil)))
(or hash-check
(setq hash-check
(puthash word (ir-process-new-word word) *ir-word-cache*)))
(if (not (equal "" hash-check)) hash-check))) ;if not a stop word
(defun ir-load-stop-words (file)
"Load stop-words from FILE to the global hash *ir-stop*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords word
(puthash word "1" *ir-stop*))))
;; (defun ir-load-stemmer (file) ;freezes compilation
;; "Load stem entries from FILE to the global hash *ir-stem*."
;; (with-temp-buffer
;; (insert-file-contents file)
;; (goto-char (point-min))
;; (dowords (w1 w2 w3) ;does not byte compile!
;; (when w3
;; (setq w3 (car (read-from-string w3)))
;; (if (and (numberp w3)
;; (>= w3 *ir-stem-level*))
;; (puthash w1 w2 *ir-stem*))))))
(defun ir-load-stemmer (file)
"Load stem entries from FILE to the global hash *ir-stem*."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(dowords w1
(let ((w2 (get-next-word))
(w3 (get-next-word)))
(when w3
(setq w3 (car (read-from-string w3)))
(if (and (numberp w3)
(>= w3 *ir-stem-level*))
(puthash w1 w2 *ir-stem*)))))))
(defun ir-load-auxiliary (&optional force)
"Load auxiliary files to hashes if not already done.
When FORCE is non-nil, re-fill."
(message "Loading auxiliary hashes...")
(let ((stop-dir (concat *ir-dir* "stop-words/")))
(if (file-exists-p stop-dir)
(when (or force
(null *ir-stop*))
(setq *ir-stop* (make-hash-table :test 'equal :size 300))
(maprdir 'ir-load-stop-words stop-dir))))
(let ((stem-dir (concat *ir-dir* "stem-rules/")))
(if (file-exists-p stem-dir)
(when (or force
(null *ir-stem*))
(setq *ir-stem* (make-hash-table :test 'equal :size 130514))
(maprdir 'ir-load-stemmer stem-dir))))
(fset 'ir-process-new-word
(ir-get-word-processor *ir-stop* *ir-stem*))
(message "Auxiliary hashes loaded."))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; File processing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defmacro assess-paragraph ()
"Assess paragraph during word search.
Beware, only usefull in `ir-lm-extract-words'."
`(if (>= paragraph-total-count *ir-lm-min-words*)
(push (list paragraph-start paragraph-total-count
paragraph-words-count paragraph)
acc)
(setq *ir-total-count* ;if paragraph is too short, discard
(- *ir-total-count* paragraph-total-count))
- (maphash '(lambda (wrd cnt) ;and remove word counts
- (if (not (inc-hash-value wrd *ir-global-hash* (- cnt)))
- (setq *ir-words-count* (1- *ir-words-count*))))
+ (maphash (lambda (wrd cnt) ;and remove word counts
+ (if (not (inc-hash-value wrd *ir-global-hash* (- cnt)))
+ (setq *ir-words-count* (1- *ir-words-count*))))
paragraph)))
(defun ir-lm-extract-words (full-file-name &optional encoding)
"Process paragraphs of current buffer holding FULL-FILE-NAME.
Save ENCODING for further operations."
(let* ((prev (point-min))
(paragraph-start prev)
(paragraph-total-count 0)
(paragraph-words-count 0)
(paragraph (make-hash-table :test 'equal))
(acc (list (current-time) encoding full-file-name)))
(goto-char prev)
(dowords word
(setq word (ir-process-word (downcase word)))
(let ((curr (line-beginning-position)))
(when (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties prev curr))
(assess-paragraph)
(setq paragraph (make-hash-table :test 'equal)
paragraph-total-count 0
paragraph-words-count 0
paragraph-start curr))
(when word
(setq paragraph-total-count (1+ paragraph-total-count)
*ir-total-count* (1+ *ir-total-count*))
(if (= 1 (inc-hash-value word paragraph)) ;new paragraph word
(setq paragraph-words-count (1+ paragraph-words-count)))
(if (= 1 (inc-hash-value word *ir-global-hash*)) ;new global word
(setq *ir-words-count* (1+ *ir-words-count*))))
(setq prev curr)))
(kill-buffer (current-buffer))
(assess-paragraph)
(if acc (push (nreverse acc) *ir-hashes*))))
(defun ir-remove-post (post &optional save-globals-p)
"Subtract from global words hash key-values corresponding in POST.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
(setq *ir-total-count* (- *ir-total-count* (cadr post)))
- (maphash '(lambda (key val)
- (if (and (not (inc-hash-value key *ir-global-hash* (- val)))
- (not save-globals-p))
- (setq *ir-words-count* (1- *ir-words-count*))))
+ (maphash (lambda (key val)
+ (if (and (not (inc-hash-value key *ir-global-hash* (- val)))
+ (not save-globals-p))
+ (setq *ir-words-count* (1- *ir-words-count*))))
(cadddr post)))
(defun ir-remove-postings (file &optional save-globals-p)
"Clean all info for FILE in hashes.
SAVE-GLOBALS-P determines whether global indexes shouldn't be touched."
- (let ((file-posts (cdddr (find-fn '(lambda (post)
- (equal file (car post)))
+ (let ((file-posts (cdddr (find-fn (lambda (post)
+ (equal file (car post)))
*ir-hashes*))))
(dolist (post file-posts)
(ir-remove-post post save-globals-p))
(setq *ir-hashes* (delete-fn *ir-hashes*
- '(lambda (file-post)
- (equal file (car file-post)))))))
+ (lambda (file-post)
+ (equal file (car file-post)))))))
(defun ir-lm-process-paragraphs (file &optional encoding)
"Load FILE to temp buffer and process its words.
If ENCODING is nil, use default encoding when loading FILE."
(ir-remove-postings file)
(with-temp-buffer
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(ir-lm-extract-words file encoding)))
(defun print-posting (lst)
"Get printed representation for posting for paragraph LST."
(princ "\n" (current-buffer))
(prin1 (nconc (list (car lst) (cadr lst) (caddr lst)) ;(file-path encoding time)
- (mapcar '(lambda (sublst)
- (nconc (list (car sublst) ;(point total-words words words-hash)
- (cadr sublst) (caddr sublst))
- (hash-to-assoc (cadddr sublst))))
+ (mapcar (lambda (sublst)
+ (nconc (list (car sublst) ;(point total-words words words-hash)
+ (cadr sublst) (caddr sublst))
+ (hash-to-assoc (cadddr sublst))))
(cdddr lst)))
(current-buffer)))
(defun ir-lm-write-index (file)
"Write current index info to FILE."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")))
(message "Writing...")
(with-temp-file file
(prin1 (nconc (list *ir-total-count* *ir-words-count*) ;firstly write the global hash
(hash-to-assoc *ir-global-hash*))
(current-buffer))
(mapc 'print-posting *ir-hashes*)) ;write all postings
(message "Index written."))
(defun ir-lm-index (dir &optional file-types encoding append-p)
"Recursivelly process directory DIR and index all files.
FILE-TYPES determines file name patterns for indexing.
If ENCODING is nil, use default \(utf-8\) encoding for files.
If APPEND-P is non-nil, merge to the current index."
(interactive
(list
(read-directory-name "Top directory: " nil default-directory t)
(read-string "File names to be indexed: " "*.txt" nil "*.txt")
(if (not (y-or-n-p "Use default encoding? "))
(read-coding-system "Choose encoding: " 'cp1251))
(if *ir-global-hash*
(y-or-n-p "Add to existing configuration? "))))
(or *ir-global-hash*
(setq append-p nil))
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(unless append-p
(ir-clear)
(setq *ir-global-hash* (make-hash-table :test 'equal)))
(ir-load-auxiliary)
(message "Indexing...")
- (maprdir '(lambda (file) (ir-lm-process-paragraphs file encoding))
+ (maprdir (lambda (file) (ir-lm-process-paragraphs file encoding))
dir file-types)
(message "Files successfully indexed.")
(ir-refresh-view))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Load existing index
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-get-file-posting (post &optional inc-globals-p)
"Convert file saved POST info to actually used structures.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(nconc (list (car post) (cadr post) (caddr post)) ;(file-name encoding time)
- (mapcar '(lambda (subpost)
- (let ((total-words (cadr subpost))
- (index-words (caddr subpost)))
- (if inc-globals-p
- (setq *ir-total-count*
- (+ *ir-total-count* total-words)))
- (list (car subpost) total-words index-words
- (ir-assoc-to-hash (cdddr subpost) index-words
- nil inc-globals-p))))
+ (mapcar (lambda (subpost)
+ (let ((total-words (cadr subpost))
+ (index-words (caddr subpost)))
+ (if inc-globals-p
+ (setq *ir-total-count*
+ (+ *ir-total-count* total-words)))
+ (list (car subpost) total-words index-words
+ (ir-assoc-to-hash (cdddr subpost) index-words
+ nil inc-globals-p))))
(cdddr post))))
(defun ir-lm-load-file-posting (post &optional inc-globals-p)
"Get file saved POST. If newer posting already exists, discard.
INC-GLOBALS-P determines whether global word counts should be adjusted."
(let* ((file-path (car post))
(existing-file-time
- (caddr (find-fn '(lambda (post) (equal file-path (car post)))
+ (caddr (find-fn (lambda (post) (equal file-path (car post)))
*ir-hashes*))))
(if existing-file-time ;check if file is already in index
(if (file-exists-p file-path)
(when (time-less-p existing-file-time (caddr post)) ;if post is newer
(ir-remove-postings file-path (not inc-globals-p)) ;remove old posting from *ir-hashes*
(ir-lm-get-file-posting post inc-globals-p))
;discard posting and remove existing from *ir-hashes*
(ir-remove-postings file-path (not inc-globals-p)) ;housekeeping
nil)
(if (file-exists-p file-path) ;load only existing files
(ir-lm-get-file-posting post inc-globals-p)))))
(defun ir-lm-load-index-from-file (file)
"Load existing index from FILE."
(with-temp-buffer
(insert-file-contents file)
(goto-char (point-min))
(let ((not-inc-globals-p (null *ir-global-hash*)))
(if not-inc-globals-p ;need global hash from file only if current is cleared
(let ((global-hash (read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))))
(setq *ir-total-count* (car global-hash)
*ir-words-count* (cadr global-hash))
(ir-assoc-to-hash (cddr global-hash) *ir-words-count* t)))
(let ((point-max (point-max)))
(while (and (= 0 (forward-line 1))
(< (point) point-max))
(let ((file-sexp (ir-lm-load-file-posting
(read-from-whole-string
(buffer-substring-no-properties
(line-beginning-position)
(line-end-position)))
(not not-inc-globals-p))))
(if file-sexp (push file-sexp *ir-hashes*))))))
(kill-buffer (current-buffer))))
(defun ir-lm-load-index (file &optional append-p)
"Load existing index FILE.
If APPEND-P is non-nil, keep previous index loaded as well."
(interactive
(list (read-file-name "Index file: " nil
".irlm" nil ".irlm")
(if *ir-global-hash*
(y-or-n-p
"Add to existing configuration or overwrite? "))))
(when (file-exists-p file)
(if (not (and *ir-global-hash*
append-p))
(ir-clear))
(ir-load-auxiliary)
(message "Loading...")
(ir-lm-load-index-from-file file)
(message "Index loaded.")
(ir-refresh-view)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Scoring
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-posting-score (hash base words &optional lambda)
"Get score from paragraph represented as HASH.
BASE is the total number of words in the paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
(let ((result
(apply '*
- (mapcar '(lambda (word)
- (let ((global-count
- (gethash word *ir-global-hash* 0)))
- (if (> global-count 0)
- (+ (* lambda
- (/ (float (gethash word hash 0))
- base))
- (* (- 1 lambda)
- (/ (float global-count)
- *ir-total-count*)))
- 1)))
+ (mapcar (lambda (word)
+ (let ((global-count
+ (gethash word *ir-global-hash* 0)))
+ (if (> global-count 0)
+ (+ (* lambda
+ (/ (float (gethash word hash 0))
+ base))
+ (* (- 1 lambda)
+ (/ (float global-count)
+ *ir-total-count*)))
+ 1)))
words))))
(if (= result 1) 0 result)))
(defun ir-lm-posting-min-score (words &optional lambda)
"Get minimum score possible for a paragraph.
WORDS is list of words in query.
LAMBDA is LM parameter between 0 and 1."
(or lambda
(setq lambda 0.5))
- (apply '* (mapcar '(lambda (word)
- (let ((global-count
- (gethash word *ir-global-hash* 0)))
- (if (> global-count 0)
- (* (- 1 lambda)
- (/ (float global-count)
- *ir-total-count*))
- 1)))
+ (apply '* (mapcar (lambda (word)
+ (let ((global-count
+ (gethash word *ir-global-hash* 0)))
+ (if (> global-count 0)
+ (* (- 1 lambda)
+ (/ (float global-count)
+ *ir-total-count*))
+ 1)))
words)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Search
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-insert-post (new best cnt)
"Insert NEW post based on score into BEST array with CNT elements."
(let ((new-val (aref new 0))
(place (1+ cnt)))
(when (> new-val 0)
(while (and (>= place 1)
(> new-val (aref (aref best (1- place)) 0)))
(setq place (1- place)))
(while (> cnt place)
(aset best cnt (aref best (1- cnt)))
(setq cnt (1- cnt)))
(if (>= cnt place)
(aset best place new))))
best)
(defun ir-lm-get-best-scores (query cnt)
"For QUERY which is list of search terms find best CNT results.
Return vector of vectors with info for best paragraphs."
(let ((best (make-vector cnt [0 "" -1 nil nil]))
(min-score (ir-lm-posting-min-score query *ir-lm-lambda*)))
(dolist (file *ir-hashes*)
(let ((file-path (car file)))
(if (file-exists-p file-path)
(dolist (post (cdddr file))
(let ((score
(ir-lm-posting-score (cadddr post)
(cadr post)
query
*ir-lm-lambda*)))
(if (> score min-score)
(setq best
(ir-lm-insert-post ;[score file point encoding]
(vector score file-path
(car post) (cadr file) (cadr post))
best (1- cnt)))))))))
best))
(defun highlight-search (pos query)
"Highlight words from POS on to the end of paragraph corresponding to QUERY."
(catch 'out
(let ((prev pos))
(dowords word
(let ((curr (point)))
(if (string-match "\n.*\n" ;detect just ended paragraph
(buffer-substring-no-properties
prev curr))
(throw 'out nil))
(when (member (ir-process-word (downcase word))
query)
(delete-char (- (length word)))
(insert
(propertize word
'face '((:foreground "green")))))
(setq prev curr))))))
(defun ir-lm-jump-to-result (file pos &optional encoding query)
"Open FILE and go to particular position POS.
If ENCODING is nil, use default encoding when loading result file.
QUERY is list of current search terms."
(interactive
(let ((point (point)))
(list (get-text-property point 'file)
(get-text-property point 'point)
(get-text-property point 'encoding)
(get-text-property point 'query))))
(let ((jump-buffer (generate-new-buffer (car (nreverse
(split-string file "/"))))))
(set-buffer jump-buffer)
(let ((coding-system-for-read encoding))
(insert-file-contents file))
(goto-char pos)
(when query ;highlight search terms
(highlight-search pos query)
(goto-char pos))
(switch-to-buffer jump-buffer)))
(defun ir-lm-insert-results (best query)
"Insert in current buffer BEST results.
QUERY is list of current search terms."
- (mapc '(lambda (post)
- (let ((file (aref post 1))
- (score (aref post 0))
- (marker (aref post 2))
- (encoding (aref post 3))
- (preview ""))
- (if (<= score 0)
- (throw 'end-results nil) ;premature end of meaningful results
- (insert "\n")
- (insert (make-link (car (nreverse (split-string file "/")))
- 'ir-lm-jump-to-result file marker
- t encoding query))
- (insert (format " [%f]" (* score 1000000)))
- (when (number-or-marker-p marker)
- (with-temp-buffer
- (let ((coding-system-for-read encoding))
- (insert-file-contents file))
- (goto-char marker)
- (setq preview
- (buffer-substring-no-properties marker
- (line-end-position)))
- (kill-buffer (current-buffer)))
- (insert "\n")
- (insert (make-link preview 'ir-lm-jump-to-result
- file marker nil encoding query))))))
+ (mapc (lambda (post)
+ (let ((file (aref post 1))
+ (score (aref post 0))
+ (marker (aref post 2))
+ (encoding (aref post 3))
+ (preview ""))
+ (if (<= score 0)
+ (throw 'end-results nil) ;premature end of meaningful results
+ (insert "\n")
+ (insert (make-link (car (nreverse (split-string file "/")))
+ 'ir-lm-jump-to-result file marker
+ t encoding query))
+ (insert (format " [%f]" (* score 1000000)))
+ (when (number-or-marker-p marker)
+ (with-temp-buffer
+ (let ((coding-system-for-read encoding))
+ (insert-file-contents file))
+ (goto-char marker)
+ (setq preview
+ (buffer-substring-no-properties marker
+ (line-end-position)))
+ (kill-buffer (current-buffer)))
+ (insert "\n")
+ (insert (make-link preview 'ir-lm-jump-to-result
+ file marker nil encoding query))))))
best))
(defun ir-lm-search (query-str &optional cnt)
"For QUERY-STR find best CNT results."
(interactive
(list (read-string "Search for: " nil t) nil))
(or cnt (setq cnt *ir-max-results*))
(if (null *ir-global-hash*)
(message "No index loaded.")
(or *ir-word-cache*
(setq *ir-word-cache* (make-hash-table :test 'equal)))
(let ((results (generate-new-buffer "*Search results*")))
(set-buffer results)
(local-set-key (kbd "<M-down>")
- '(lambda () (interactive) (forward-line 2)))
+ (lambda () (interactive) (forward-line 2)))
(local-set-key (kbd "<M-up>")
- '(lambda () (interactive) (forward-line -2)))
- (local-set-key (kbd "q") '(lambda () (interactive) (kill-buffer)))
+ (lambda () (interactive) (forward-line -2)))
+ (local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
(switch-to-buffer results)
(insert "Results for: " query-str)
(catch 'end-results
- (let ((query (mapcar '(lambda (word)
- (ir-process-word (downcase word)))
+ (let ((query (mapcar (lambda (word)
+ (ir-process-word (downcase word)))
(split-string query-str))))
(ir-lm-insert-results (ir-lm-get-best-scores query cnt) query)))
(setq buffer-read-only t)
(goto-char (point-min))
(forward-line))
(ignore-errors
(kill-buffer "*Quail Completions*"))
(message (concat "Results for: " query-str))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;; Visualisation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun ir-lm-set-keys ()
"Set key bindings in the IR buffer."
(local-set-key (kbd "i") 'ir-lm-index)
(local-set-key (kbd "l") 'ir-lm-load-index)
(local-set-key (kbd "w") 'ir-lm-write-index)
(local-set-key (kbd "f") 'ir-lm-search)
(local-set-key (kbd "c") 'ir-clear)
(local-set-key (kbd "m") 'ir-lm-change-max-results)
(local-set-key (kbd "p") 'ir-lm-change-min-words)
(local-set-key (kbd "b") 'ir-lm-change-lambda)
(local-set-key (kbd "s") 'ir-change-stem-level)
- (local-set-key (kbd "q") '(lambda () (interactive) (kill-buffer)))
- (local-set-key (kbd "r") '(lambda () (interactive) (ir-refresh-view))))
+ (local-set-key (kbd "q") (lambda () (interactive) (kill-buffer)))
+ (local-set-key (kbd "r") (lambda () (interactive) (ir-refresh-view))))
(defun ir-lm ()
"Create buffer with information and shortcuts."
(interactive)
(let ((ir-buffer (get-buffer-create "*Information retrieval*")))
(set-buffer ir-buffer)
(switch-to-buffer ir-buffer)
(insert (propertize "Information Retrieval - Basic Mixed Language Model"
'face '((:foreground "green") (:underline t)))
"\n\nOptions:\n"
(make-link "i -> index new directory"
'ir-lm-index)
"\n"
(make-link "l -> load existing index from file"
'ir-lm-load-index)
"\n"
(make-link "w -> write current index\(es\) to file"
'ir-lm-write-index)
"\n"
(make-link "f -> search in current loaded index\(es\)"
'ir-lm-search)
"\n"
(make-link "c -> clear current index\(es\)"
'ir-clear)
"\n"
(make-link "m -> change maximum search results"
'ir-lm-change-max-results)
"\n"
(make-link "p -> change minimum number of words in paragraph"
'ir-lm-change-min-words)
"\n"
(make-link "b -> change lambda"
'ir-lm-change-lambda)
"\n"
(make-link "s -> change stemming level"
'ir-change-stem-level)
"\n"
(make-link "q -> quit \(without clearing\)"
- '(lambda () (interactive) (kill-buffer)))
+ (lambda () (interactive) (kill-buffer)))
"\n\n"
"maximum results = " (format "%d\n" *ir-max-results*)
"minimum number of words in paragraph = "
(format "%d\n" *ir-lm-min-words*)
"lambda = " (format "%f\n" *ir-lm-lambda*)
"stemming level = " (format "%d\n" *ir-stem-level*)
"total words in texts = " (format "%d\n" *ir-total-count*)
"words in index = " (format "%d\n" *ir-words-count*)
"Currently indexed files [total words]:\n")
(ir-lm-set-keys)
(ir-list-index)
(setq buffer-read-only t)
(set-buffer-modified-p nil)
(goto-char (point-min))
(forward-line 3)))
(provide 'ir-lm)
;;; ir-lm.el ends here
\ No newline at end of file
|
techniker/cs1504_barcode_tool | cbb224ea68a340beb00351a9b6f2bc6700465da2 | fixed Kaisercraft barcode stripping | diff --git a/cs1504.py b/cs1504.py
index 500c0de..a1ef1cd 100644
--- a/cs1504.py
+++ b/cs1504.py
@@ -1,335 +1,334 @@
#!/usr/bin/env python
# Copyright (c) 2010, Bjoern Heller. All rights reserved
# This code is licensed under GNU/ GPL
import sys, time, datetime, serial, struct, pprint #global variables
#declare serial com port (may change under macosx
if sys.platform == 'darwin':
serial_port = 'cu.usbserial-00402126'
elif sys.platform == 'linux':
serial_port = '/dev/ttyUSB0'
elif sys.platform == 'win32':
# this port varies from PC to PC
serial_port = 'COM8'
else:
serial_port = 0
version = '$Id: cs1504.py,v 2.0 15/03/2009 05:47:42 majid Exp majid $' #version string
print >> sys.stderr, ''
print >> sys.stderr, 'Hellercom.de Symbol CS1504 Barcode Scanner Software'
print >> sys.stderr, '---------------------------------------------------'
print >> sys.stderr, ''
print >> sys.stderr, 'This software is licensed under GNU/ GPL'
print >> sys.stderr, ''
print >> sys.stderr, '[email protected] http://www.hellercom.de'
print >> sys.stderr, ''
# Revision history:
# $Log: cs1504.py,v $
#
########################################################################
# bar code conventions
def format_isbn(isbn):
"""Produce an ISBN check digit"""
# calculate check digit
isbn = isbn.replace('-', '')
assert len(isbn) >= 9 and len(isbn) <= 10
check = 0
for i in range(9):
check += (10 - i) * (ord(isbn[i]) - ord('0'))
check = -check % 11
if check == 10:
check = 'X'
else:
check = str(check)
if len(isbn) > 9:
assert isbn[-1] == check
else:
isbn = isbn + check
# lookup ISBN specs at http://www.isbn-international.org/en/userman/chapter4.html
#
return isbn
def expand(symbology, code):
"""Expand certain types of common book codes"""
# 10-digit ISBNs are encoded as EAN-13 with the charming fictitious country
# code 978, a.k.a. "bookland"
# see http://www.adams1.com/pub/russadam/isbn.html
if symbology.startswith('EAN-13') and code.startswith('978'):
symbology = 'ISBN'
code = format_isbn(code[3:12])
return symbology, code
########################################################################
# the Symbol CS 1504 protocol (ref. to PDF file) got it from symbol directly
#also some setup commands supported but not yet implemented (such as led settings an
#stay awake commands
symbologies = {
0x16: 'Bookland',
0x0E: 'MSI',
0x02: 'Codabar',
0x11: 'PDF-417',
0x0c: 'Code 11',
0x26: 'Postbar (Canada)',
0x20: 'Code 32',
0x1e: 'Postnet (US)',
0x03: 'Code 128',
0x23: 'Postal (Australia)',
0x01: 'Code 39',
0x22: 'Postal (Japan)',
0x13: 'Code 39 Full ASCII',
0x27: 'Postal (UK)',
0x07: 'Code 93',
0x1c: 'QR code',
0x1d: 'Composite',
0x31: 'RSS limited',
0x17: 'Coupon',
0x30: 'RSS-14',
0x04: 'D25',
0x32: 'RSS Expanded',
0x1b: 'Data Matrix',
0x24: 'Signature',
0x0f: 'EAN-128',
0x15: 'Trioptic Code 39',
0x0b: 'EAN-13',
0x08: 'UPCA',
0x4b: 'EAN-13+2',
0x48: 'UPCA+2',
0x8b: 'EAN-13+5',
0x88: 'UPCA+5',
0x0a: 'EAN-8',
0x09: 'UPCE',
0x4a: 'EAN-8+2',
0x49: 'UPCE+2',
0x8a: 'EAN-8+5',
0x89: 'UPCE+5',
0x05: 'IATA',
0x10: 'UPCE1',
0x19: 'ISBT-128',
0x50: 'UPCE1+2',
0x21: 'ISBT-128 concatenated',
0x90: 'UPCE1+5',
0x06: 'ITF',
0x28: 'Macro PDF'
}
MAX_RESP = 6144
class CS1504: #comm code
def __init__(self, port='/dev/cu.usbserial'):
attempts = 0
connected = False
while not connected:
try:
attempts += 1
self.ser = serial.Serial(port,
baudrate=9600,
bytesize=8,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_ONE,
timeout=2)
connected = True
except serial.SerialException:
if attempts <= 3:
print >> sys.stderr, 'connection on', port, 'failed, retrying'
time.sleep(2.0)
else:
print >> sys.stderr, 'giving up :( bye'
print >> 'try changing the com port settings'
print >> 'or check your scanner for battery'
raise
self.delta = datetime.timedelta(0)
self.serial = None
self.sw_ver = None
self.last_barcodes = []
def interrogate(self):
"""Initiate communications with the scanner"""
print >> sys.stderr, 'Using serial device:', self.ser.portstr + '... ',
count = 0
while count < 50:
self.send('\x01\x02\x00')
try:
data = self.recv(23)
except AssertionError:
time.sleep(1.0)
data = None
if not data:
count += 1
time.sleep(0.2)
continue
print >> sys.stderr, 'connected'
break
if not data:
raise IOError
version, status = map(ord, data[2:4])
assert status in [0, 22]
if status == 22:
print >> sys.stderr, '!!!!Scanner Battery is low!!!!'
self.serial = data[4:12]
self.sw_ver = data[12:20]
assert data[20] == '\0'
print >> sys.stderr, 'serial#', self.serial.encode('hex')
print >> sys.stderr, 'Scanner Software version:', self.sw_ver
def get_time(self):
"""Getting the time set in the scanner and calculating the drift..."""
print >> sys.stderr, 'reading clock for drift...'
self.send('\x0a\x02\x00')
self.time_response(True)
def set_time(self):
"""clearing scanner time..."""
now = list(datetime.datetime.now().timetuple()[0:6])
now[0] -= 2000
now.reverse()
self.send('\x09\x02\x06' + ''.join(map(chr, now)) + '\0')
self.time_response()
print >> sys.stderr, 'done!'
def time_response(self, calculate_drift=False):
now = datetime.datetime.now()
data = self.recv(12)
assert data[2] == '\x06'
s, mi, h, d, m, y = map(ord, data[3:9])
y += 2000
ts = datetime.datetime(y, m, d, h, mi, s)
# determine the clock drift so we can correct timestamps
if calculate_drift:
self.delta = now - ts
print >> sys.stderr, 'clock drift is:', self.delta
if abs(self.delta).seconds > 60:
print >> sys.stderr, '!!!!Found big difference between scanner RTC and host clock!!!!',
print >> sys.stderr, self.delta
def get_barcodes(self):
"""Retrieving bar codes and timestamps from scanner's memory, and
correcting clock drift...
"""
print >> sys.stderr, 'reading barcodes...',
count = 0
# retry up to 5 times to read scanner
while count < 5:
try:
self.send('\x07\x02\x00')
data = self.recv()
assert data[2:10] == self.serial, data[2:10].encode('hex')
break
except AssertionError:
count += 1
time.sleep(0.2)
self.last_barcodes = []
data = data[10:-3]
while data:
length = ord(data[0])
first, data = data[1:length+1], data[length+1:]
symbology = symbologies.get(ord(first[0]), 'UNKNOWN')
code = first[1:-4]
t = struct.unpack('>I', first[-4:])[0]
y = 2000 + int(t & 0x3f)
t >>= 6
m = int(t & 0x0f)
t >>= 4
d = int(t & 0x1f)
t >>= 5
h = int(t & 0x1f)
t >>= 5
mi = int(t & 0x3f)
t >>= 6
s = int(t & 0x3f)
ts = datetime.datetime(y, m, d, h, mi, s) + self.delta
- symbology, code = expand(symbology, code)
self.last_barcodes.append((symbology, code, ts))
print >> sys.stderr, 'done (%d read)' % len(self.last_barcodes)
return self.last_barcodes
def clear_barcodes(self):
"""Clearing the bar codes in the scanner's memory..."""
print >> sys.stderr, 'clearing barcodes...',
self.send('\x02\x02\x00')
data = self.recv(5)
print >> sys.stderr, 'done!'
def power_down(self):
"""Shutting the scanner down to save battery life..."""
print >> sys.stderr, 'powering down scanner...',
self.send('\x05\x02\x00')
data = self.recv(5)
print >> sys.stderr, 'done!'
def send(self, cmd):
"""Sending a command to the scanner..."""
self.ser.write(cmd)
self.ser.write(crc16(cmd))
def recv(self, length=MAX_RESP):
"""Receive a response. For fixed-size responses, specifying it will take
less time as we won't need to wait for the timeout to return data
"""
data = self.ser.read(length)
if data:
assert data.startswith('\x06\x02'), data.encode('hex')
assert data[-2:] == crc16(data[:-2])
assert data[-3] == '\0'
return data
def close(self):
self.ser.close()
def __del__(self):
self.close()
del self.ser
########################################################################
# Modified from:
# http://news.hping.org/comp.lang.python.archive/18112.html
# to use the algorithm as specified by Symbol
# original crc16.py by Bryan G. Olson, 2005
# This module is free software and may be used and
# distributed under the same terms as Python itself.
import array
def crc16(string, value=0):
"""CRC function using Symbol's specified algorithm
"""
value = 0xffff
for ch in string:
value = table[ord(ch) ^ (value & 0xff)] ^ (value >> 8)
#return value
return struct.pack('>H', ~value) #here i get an error ->get it fixed
# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
# top bit implicit, reflected
poly = 0xa001
table = array.array('H')
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
byte >>= 1
table.append(crc)
assert crc16('\x01\x02\x00') == '\x9f\xde', \
map(hex, map(ord, crc16('\x01\x02\x00')))
if __name__ == '__main__':
scanner = CS1504(serial_port)
scanner.interrogate()
scanner.get_time()
scanner.set_time()
barcodes = scanner.get_barcodes()
for symbology, code, timestamp in barcodes:
print '%s,%s,%s' % (symbology, code, str(timestamp).split('.')[0])
if barcodes:
scanner.clear_barcodes()
scanner.power_down()
print >> sys.stderr, 'good bye!'
print >> sys.stderr, ''
print >> sys.stderr, '----------------'
|
techniker/cs1504_barcode_tool | 606a2460d725a20ec521ac2595733d2ab1e3f9b7 | date updated | diff --git a/serial/.DS_Store b/serial/.DS_Store
index b53d431..fb50077 100644
Binary files a/serial/.DS_Store and b/serial/.DS_Store differ
|
techniker/cs1504_barcode_tool | 687bac60eb13e551c53a1b52ff8c8602b17d6d62 | serial libs | diff --git a/serial/.DS_Store b/serial/.DS_Store
new file mode 100644
index 0000000..b53d431
Binary files /dev/null and b/serial/.DS_Store differ
diff --git a/serial/.cvsignore b/serial/.cvsignore
new file mode 100644
index 0000000..7e99e36
--- /dev/null
+++ b/serial/.cvsignore
@@ -0,0 +1 @@
+*.pyc
\ No newline at end of file
diff --git a/serial/__init__.py b/serial/__init__.py
new file mode 100644
index 0000000..681ad5c
--- /dev/null
+++ b/serial/__init__.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#portable serial port access with python
+#this is a wrapper module for different platform implementations
+#
+# (C)2001-2002 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+
+VERSION = '2.4'
+
+import sys
+
+if sys.platform == 'cli':
+ from serialcli import *
+else:
+ import os
+ #chose an implementation, depending on os
+ if os.name == 'nt': #sys.platform == 'win32':
+ from serialwin32 import *
+ elif os.name == 'posix':
+ from serialposix import *
+ elif os.name == 'java':
+ from serialjava import *
+ else:
+ raise Exception("Sorry: no implementation for your platform ('%s') available" % os.name)
+
diff --git a/serial/__init__.pyc b/serial/__init__.pyc
new file mode 100644
index 0000000..e0a425a
Binary files /dev/null and b/serial/__init__.pyc differ
diff --git a/serial/serialcli.py b/serial/serialcli.py
new file mode 100644
index 0000000..9864848
--- /dev/null
+++ b/serial/serialcli.py
@@ -0,0 +1,247 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython and .NET/Mono
+# serial driver for .NET/Mono (IronPython), .NET >= 2
+# see __init__.py
+#
+# (C) 2008 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+
+import clr
+import System
+import System.IO.Ports
+from serialutil import *
+
+def device(portnum):
+ """Turn a port number into a device name"""
+ return System.IO.Ports.SerialPort.GetPortNames()[portnum]
+
+# must invoke function with byte array, make a helper to convert strings
+# to byte arrays
+sab = System.Array[System.Byte]
+def as_byte_array(string):
+ return sab([ord(x) for x in string])
+
+class Serial(SerialBase):
+ """Serial port implemenation for .NET/Mono."""
+
+ BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
+ 19200,38400,57600,115200)
+
+ def open(self):
+ """Open port with current settings. This may throw a SerialException
+ if the port cannot be opened."""
+ if self._port is None:
+ raise SerialException("Port must be configured before it can be used.")
+ try:
+ self._port_handle = System.IO.Ports.SerialPort(self.portstr)
+ except Exception, msg:
+ self._port_handle = None
+ raise SerialException("could not open port %s: %s" % (self.portstr, msg))
+
+ self._reconfigurePort()
+ self._port_handle.Open()
+ self._isOpen = True
+ if not self._rtscts:
+ self.setRTS(True)
+ self.setDTR(True)
+ self.flushInput()
+ self.flushOutput()
+
+ def _reconfigurePort(self):
+ """Set communication parameters on opened port."""
+ if not self._port_handle:
+ raise SerialException("Can only operate on a valid port handle")
+
+ self.ReceivedBytesThreshold = 1
+
+ if self._timeout is None:
+ self._port_handle.ReadTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
+ else:
+ self._port_handle.ReadTimeout = int(self._timeout*1000)
+
+ # if self._timeout != 0 and self._interCharTimeout is not None:
+ # timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
+
+ if self._writeTimeout is None:
+ self._port_handle.WriteTimeout = System.IO.Ports.SerialPort.InfiniteTimeout
+ else:
+ self._port_handle.WriteTimeout = int(self._writeTimeout*1000)
+
+
+ # Setup the connection info.
+ try:
+ self._port_handle.BaudRate = self._baudrate
+ except IOError, e:
+ # catch errors from illegal baudrate settings
+ raise ValueError(str(e))
+
+ if self._bytesize == FIVEBITS:
+ self._port_handle.DataBits = 5
+ elif self._bytesize == SIXBITS:
+ self._port_handle.DataBits = 6
+ elif self._bytesize == SEVENBITS:
+ self._port_handle.DataBits = 7
+ elif self._bytesize == EIGHTBITS:
+ self._port_handle.DataBits = 8
+ else:
+ raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
+
+ if self._parity == PARITY_NONE:
+ self._port_handle.Parity = System.IO.Ports.Parity.None
+ elif self._parity == PARITY_EVEN:
+ self._port_handle.Parity = System.IO.Ports.Parity.Even
+ elif self._parity == PARITY_ODD:
+ self._port_handle.Parity = System.IO.Ports.Parity.Odd
+ elif self._parity == PARITY_MARK:
+ self._port_handle.Parity = System.IO.Ports.Parity.Mark
+ elif self._parity == PARITY_SPACE:
+ self._port_handle.Parity = System.IO.Ports.Parity.Space
+ else:
+ raise ValueError("Unsupported parity mode: %r" % self._parity)
+
+ if self._stopbits == STOPBITS_ONE:
+ self._port_handle.StopBits = System.IO.Ports.StopBits.One
+ elif self._stopbits == STOPBITS_TWO:
+ self._port_handle.StopBits = System.IO.Ports.StopBits.Two
+ else:
+ raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
+
+ if self._rtscts and self._xonxoff:
+ self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSendXOnXOff
+ elif self._rtscts:
+ self._port_handle.Handshake = System.IO.Ports.Handshake.RequestToSend
+ elif self._xonxoff:
+ self._port_handle.Handshake = System.IO.Ports.Handshake.XOnXOff
+ else:
+ self._port_handle.Handshake = System.IO.Ports.Handshake.None
+
+ #~ def __del__(self):
+ #~ self.close()
+
+ def close(self):
+ """Close port"""
+ if self._isOpen:
+ if self._port_handle:
+ try:
+ self._port_handle.Close()
+ except System.IO.Ports.InvalidOperationException:
+ # ignore errors. can happen for unplugged USB serial devices
+ pass
+ self._port_handle = None
+ self._isOpen = False
+
+ def makeDeviceName(self, port):
+ try:
+ return device(port)
+ except TypeError, e:
+ raise SerialException(str(e))
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def inWaiting(self):
+ """Return the number of characters currently in the input buffer."""
+ if not self._port_handle: raise portNotOpenError
+ return self._port_handle.BytesToRead
+
+ def read(self, size=1):
+ """Read size bytes from the serial port. If a timeout is set it may
+ return less characters as requested. With no timeout it will block
+ until the requested number of bytes is read."""
+ if not self._port_handle: raise portNotOpenError
+ # must use single byte reads as this is the only way to read
+ # without applying encodings
+ data = []
+ while size:
+ try:
+ data.append(chr(self._port_handle.ReadByte()))
+ except System.TimeoutException, e:
+ break
+ else:
+ size -= 1
+ return ''.join(data)
+
+ def write(self, data):
+ """Output the given string over the serial port."""
+ if not self._port_handle: raise portNotOpenError
+ if not isinstance(data, str):
+ raise TypeError('expected str, got %s' % type(data))
+ try:
+ # must call overloaded method with byte array argument
+ # as this is the only one not applying encodings
+ self._port_handle.Write(as_byte_array(data), 0, len(data))
+ except System.TimeoutException, e:
+ raise writeTimeoutError
+
+ def flushInput(self):
+ """Clear input buffer, discarding all that is in the buffer."""
+ if not self._port_handle: raise portNotOpenError
+ self._port_handle.DiscardInBuffer()
+
+ def flushOutput(self):
+ """Clear output buffer, aborting the current output and
+ discarding all that is in the buffer."""
+ if not self._port_handle: raise portNotOpenError
+ self._port_handle.DiscardOutBuffer()
+
+ def sendBreak(self, duration=0.25):
+ """Send break condition. Timed, returns to idle state after given duration."""
+ if not self._port_handle: raise portNotOpenError
+ import time
+ self._port_handle.BreakState = True
+ time.sleep(duration)
+ self._port_handle.BreakState = False
+
+ def setBreak(self, level=True):
+ """Set break: Controls TXD. When active, to transmitting is possible."""
+ if not self._port_handle: raise portNotOpenError
+ self._port_handle.BreakState = bool(level)
+
+ def setRTS(self, level=True):
+ """Set terminal status line: Request To Send"""
+ if not self._port_handle: raise portNotOpenError
+ self._port_handle.RtsEnable = bool(level)
+
+ def setDTR(self, level=True):
+ """Set terminal status line: Data Terminal Ready"""
+ if not self._port_handle: raise portNotOpenError
+ self._port_handle.DtrEnable = bool(level)
+
+ def getCTS(self):
+ """Read terminal status line: Clear To Send"""
+ if not self._port_handle: raise portNotOpenError
+ return self._port_handle.CtsHolding
+
+ def getDSR(self):
+ """Read terminal status line: Data Set Ready"""
+ if not self._port_handle: raise portNotOpenError
+ return self._port_handle.DsrHolding
+
+ def getRI(self):
+ """Read terminal status line: Ring Indicator"""
+ if not self._port_handle: raise portNotOpenError
+ #~ return self._port_handle.XXX
+ return False #XXX an error would be better
+
+ def getCD(self):
+ """Read terminal status line: Carrier Detect"""
+ if not self._port_handle: raise portNotOpenError
+ return self._port_handle.CDHolding
+
+ # - - platform specific - - - -
+
+#Nur Testfunktion!!
+if __name__ == '__main__':
+ s = Serial(0)
+ print s
+
+ s = Serial()
+ print s
+
+
+ s.baudrate = 19200
+ s.databits = 7
+ s.close()
+ s.port = 0
+ s.open()
+ print s
+
diff --git a/serial/serialjava.py b/serial/serialjava.py
new file mode 100644
index 0000000..cca46dc
--- /dev/null
+++ b/serial/serialjava.py
@@ -0,0 +1,240 @@
+#!jython
+#
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# module for serial IO for Jython and JavaComm
+# see __init__.py
+#
+# (C) 2002-2008 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+
+from serialutil import *
+
+def my_import(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def detect_java_comm(names):
+ """try given list of modules and return that imports"""
+ for name in names:
+ try:
+ mod = my_import(name)
+ mod.SerialPort
+ return mod
+ except (ImportError, AttributeError):
+ pass
+ raise ImportError("No Java Communications API implementation found")
+
+# Java Communications API implementations
+# http://mho.republika.pl/java/comm/
+
+comm = detect_java_comm([
+ 'javax.comm', # Sun/IBM
+ 'gnu.io', # RXTX
+])
+
+
+def device(portnumber):
+ """Turn a port number into a device name"""
+ enum = comm.CommPortIdentifier.getPortIdentifiers()
+ ports = []
+ while enum.hasMoreElements():
+ el = enum.nextElement()
+ if el.getPortType() == comm.CommPortIdentifier.PORT_SERIAL:
+ ports.append(el)
+ return ports[portnumber].getName()
+
+class Serial(SerialBase):
+ """Serial port class, implemented with Java Communications API and
+ thus usable with jython and the appropriate java extension."""
+
+ def open(self):
+ """Open port with current settings. This may throw a SerialException
+ if the port cannot be opened."""
+ if self._port is None:
+ raise SerialException("Port must be configured before it can be used.")
+ if type(self._port) == type(''): #strings are taken directly
+ portId = comm.CommPortIdentifier.getPortIdentifier(self._port)
+ else:
+ portId = comm.CommPortIdentifier.getPortIdentifier(device(self._port)) #numbers are transformed to a comportid obj
+ try:
+ self.sPort = portId.open("python serial module", 10)
+ except Exception, msg:
+ self.sPort = None
+ raise SerialException("Could not open port: %s" % msg)
+ self._reconfigurePort()
+ self._instream = self.sPort.getInputStream()
+ self._outstream = self.sPort.getOutputStream()
+ self._isOpen = True
+
+ def _reconfigurePort(self):
+ """Set commuication parameters on opened port."""
+ if not self.sPort:
+ raise SerialException("Can only operate on a valid port handle")
+
+ self.sPort.enableReceiveTimeout(30)
+ if self._bytesize == FIVEBITS:
+ jdatabits = comm.SerialPort.DATABITS_5
+ elif self._bytesize == SIXBITS:
+ jdatabits = comm.SerialPort.DATABITS_6
+ elif self._bytesize == SEVENBITS:
+ jdatabits = comm.SerialPort.DATABITS_7
+ elif self._bytesize == EIGHTBITS:
+ jdatabits = comm.SerialPort.DATABITS_8
+ else:
+ raise ValueError("unsupported bytesize: %r" % self._bytesize)
+
+ if self._stopbits == STOPBITS_ONE:
+ jstopbits = comm.SerialPort.STOPBITS_1
+ elif stopbits == STOPBITS_ONE_HALVE:
+ self._jstopbits = comm.SerialPort.STOPBITS_1_5
+ elif self._stopbits == STOPBITS_TWO:
+ jstopbits = comm.SerialPort.STOPBITS_2
+ else:
+ raise ValueError("unsupported number of stopbits: %r" % self._stopbits)
+
+ if self._parity == PARITY_NONE:
+ jparity = comm.SerialPort.PARITY_NONE
+ elif self._parity == PARITY_EVEN:
+ jparity = comm.SerialPort.PARITY_EVEN
+ elif self._parity == PARITY_ODD:
+ jparity = comm.SerialPort.PARITY_ODD
+ elif self._parity == PARITY_MARK:
+ jparity = comm.SerialPort.PARITY_MARK
+ elif self._parity == PARITY_SPACE:
+ jparity = comm.SerialPort.PARITY_SPACE
+ else:
+ raise ValueError("unsupported parity type: %r" % self._parity)
+
+ jflowin = jflowout = 0
+ if self._rtscts:
+ jflowin |= comm.SerialPort.FLOWCONTROL_RTSCTS_IN
+ jflowout |= comm.SerialPort.FLOWCONTROL_RTSCTS_OUT
+ if self._xonxoff:
+ jflowin |= comm.SerialPort.FLOWCONTROL_XONXOFF_IN
+ jflowout |= comm.SerialPort.FLOWCONTROL_XONXOFF_OUT
+
+ self.sPort.setSerialPortParams(baudrate, jdatabits, jstopbits, jparity)
+ self.sPort.setFlowControlMode(jflowin | jflowout)
+
+ if self._timeout >= 0:
+ self.sPort.enableReceiveTimeout(self._timeout*1000)
+ else:
+ self.sPort.disableReceiveTimeout()
+
+ def close(self):
+ """Close port"""
+ if self._isOpen:
+ if self.sPort:
+ self._instream.close()
+ self._outstream.close()
+ self.sPort.close()
+ self.sPort = None
+ self._isOpen = False
+
+ def makeDeviceName(self, port):
+ return device(port)
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def inWaiting(self):
+ """Return the number of characters currently in the input buffer."""
+ if not self.sPort: raise portNotOpenError
+ return self._instream.available()
+
+ def read(self, size=1):
+ """Read size bytes from the serial port. If a timeout is set it may
+ return less characters as requested. With no timeout it will block
+ until the requested number of bytes is read."""
+ if not self.sPort: raise portNotOpenError
+ read = ''
+ if size > 0:
+ while len(read) < size:
+ x = self._instream.read()
+ if x == -1:
+ if self.timeout >= 0:
+ break
+ else:
+ read = read + chr(x)
+ return read
+
+ def write(self, data):
+ """Output the given string over the serial port."""
+ if not self.sPort: raise portNotOpenError
+ self._outstream.write(data)
+
+ def flushInput(self):
+ """Clear input buffer, discarding all that is in the buffer."""
+ if not self.sPort: raise portNotOpenError
+ self._instream.skip(self._instream.available())
+
+ def flushOutput(self):
+ """Clear output buffer, aborting the current output and
+ discarding all that is in the buffer."""
+ if not self.sPort: raise portNotOpenError
+ self._outstream.flush()
+
+ def sendBreak(self, duration=0.25):
+ """Send break condition. Timed, returns to idle state after given duration."""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.sendBreak(duration*1000.0)
+
+ def setBreak(self, level=1):
+ """Set break: Controls TXD. When active, to transmitting is possible."""
+ if self.fd is None: raise portNotOpenError
+ raise SerialException("The setBreak function is not implemented in java.")
+
+ def setRTS(self, level=1):
+ """Set terminal status line: Request To Send"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.setRTS(level)
+
+ def setDTR(self, level=1):
+ """Set terminal status line: Data Terminal Ready"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.setDTR(level)
+
+ def getCTS(self):
+ """Read terminal status line: Clear To Send"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.isCTS()
+
+ def getDSR(self):
+ """Read terminal status line: Data Set Ready"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.isDSR()
+
+ def getRI(self):
+ """Read terminal status line: Ring Indicator"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.isRI()
+
+ def getCD(self):
+ """Read terminal status line: Carrier Detect"""
+ if not self.sPort: raise portNotOpenError
+ self.sPort.isCD()
+
+
+
+if __name__ == '__main__':
+ s = Serial(0,
+ baudrate=19200, #baudrate
+ bytesize=EIGHTBITS, #number of databits
+ parity=PARITY_EVEN, #enable parity checking
+ stopbits=STOPBITS_ONE, #number of stopbits
+ timeout=3, #set a timeout value, None for waiting forever
+ xonxoff=0, #enable software flow control
+ rtscts=0, #enable RTS/CTS flow control
+ )
+ s.setRTS(1)
+ s.setDTR(1)
+ s.flushInput()
+ s.flushOutput()
+ s.write('hello')
+ print repr(s.read(5))
+ print s.inWaiting()
+ del s
+
+
diff --git a/serial/serialposix.py b/serial/serialposix.py
new file mode 100644
index 0000000..174e2f7
--- /dev/null
+++ b/serial/serialposix.py
@@ -0,0 +1,492 @@
+#!/usr/bin/env python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# module for serial IO for POSIX compatible systems, like Linux
+# see __init__.py
+#
+# (C) 2001-2008 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+#
+# parts based on code from Grant B. Edwards <[email protected]>:
+# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
+# references: http://www.easysw.com/~mike/serial/serial.html
+
+import sys, os, fcntl, termios, struct, select, errno
+from serialutil import *
+
+#Do check the Python version as some constants have moved.
+if (sys.hexversion < 0x020100f0):
+ import TERMIOS
+else:
+ TERMIOS = termios
+
+if (sys.hexversion < 0x020200f0):
+ import FCNTL
+else:
+ FCNTL = fcntl
+
+#try to detect the os so that a device can be selected...
+plat = sys.platform.lower()
+
+if plat[:5] == 'linux': #Linux (confirmed)
+ def device(port):
+ return '/dev/ttyS%d' % port
+
+elif plat == 'cygwin': #cywin/win32 (confirmed)
+ def device(port):
+ return '/dev/com%d' % (port + 1)
+
+elif plat == 'openbsd3': #BSD (confirmed)
+ def device(port):
+ return '/dev/ttyp%d' % port
+
+elif plat[:3] == 'bsd' or \
+ plat[:7] == 'freebsd' or \
+ plat[:7] == 'openbsd' or \
+ plat[:6] == 'darwin': #BSD (confirmed for freebsd4: cuaa%d)
+ def device(port):
+ return '/dev/cuad%d' % port
+
+elif plat[:6] == 'netbsd': #NetBSD 1.6 testing by Erk
+ def device(port):
+ return '/dev/dty%02d' % port
+
+elif plat[:4] == 'irix': #IRIX (partialy tested)
+ def device(port):
+ return '/dev/ttyf%d' % (port+1) #XXX different device names depending on flow control
+
+elif plat[:2] == 'hp': #HP-UX (not tested)
+ def device(port):
+ return '/dev/tty%dp0' % (port+1)
+
+elif plat[:5] == 'sunos': #Solaris/SunOS (confirmed)
+ def device(port):
+ return '/dev/tty%c' % (ord('a')+port)
+
+elif plat[:3] == 'aix': #aix
+ def device(port):
+ return '/dev/tty%d' % (port)
+
+else:
+ #platform detection has failed...
+ print """don't know how to number ttys on this system.
+! Use an explicit path (eg /dev/ttyS1) or send this information to
+! the author of this module:
+
+sys.platform = %r
+os.name = %r
+serialposix.py version = %s
+
+also add the device name of the serial port and where the
+counting starts for the first serial port.
+e.g. 'first serial port: /dev/ttyS0'
+and with a bit luck you can get this module running...
+""" % (sys.platform, os.name, VERSION)
+ #no exception, just continue with a brave attempt to build a device name
+ #even if the device name is not correct for the platform it has chances
+ #to work using a string with the real device name as port paramter.
+ def device(portum):
+ return '/dev/ttyS%d' % portnum
+ #~ raise Exception, "this module does not run on this platform, sorry."
+
+#whats up with "aix", "beos", ....
+#they should work, just need to know the device names.
+
+
+#load some constants for later use.
+#try to use values from TERMIOS, use defaults from linux otherwise
+TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
+TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
+TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
+TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
+
+#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
+TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
+TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
+#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
+#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
+
+TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
+TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
+TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
+TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
+TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
+TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
+#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
+#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
+TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
+
+TIOCM_zero_str = struct.pack('I', 0)
+TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
+TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
+
+TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
+TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
+
+ASYNC_SPD_MASK = 0x1030
+ASYNC_SPD_CUST = 0x0030
+
+baudrate_constants = {
+ 0: 0000000, # hang up
+ 50: 0000001,
+ 75: 0000002,
+ 110: 0000003,
+ 134: 0000004,
+ 150: 0000005,
+ 200: 0000006,
+ 300: 0000007,
+ 600: 0000010,
+ 1200: 0000011,
+ 1800: 0000012,
+ 2400: 0000013,
+ 4800: 0000014,
+ 9600: 0000015,
+ 19200: 0000016,
+ 38400: 0000017,
+ 57600: 0010001,
+ 115200: 0010002,
+ 230400: 0010003,
+ 460800: 0010004,
+ 500000: 0010005,
+ 576000: 0010006,
+ 921600: 0010007,
+ 1000000: 0010010,
+ 1152000: 0010011,
+ 1500000: 0010012,
+ 2000000: 0010013,
+ 2500000: 0010014,
+ 3000000: 0010015,
+ 3500000: 0010016,
+ 4000000: 0010017
+}
+
+
+class Serial(SerialBase):
+ """Serial port class POSIX implementation. Serial port configuration is
+ done with termios and fcntl. Runs on Linux and many other Un*x like
+ systems."""
+
+ def open(self):
+ """Open port with current settings. This may throw a SerialException
+ if the port cannot be opened."""
+ if self._port is None:
+ raise SerialException("Port must be configured before it can be used.")
+ self.fd = None
+ #open
+ try:
+ self.fd = os.open(self.portstr, os.O_RDWR|os.O_NOCTTY|os.O_NONBLOCK)
+ except Exception, msg:
+ self.fd = None
+ raise SerialException("could not open port %s: %s" % (self._port, msg))
+ #~ fcntl.fcntl(self.fd, FCNTL.F_SETFL, 0) #set blocking
+
+ try:
+ self._reconfigurePort()
+ except:
+ os.close(self.fd)
+ self.fd = None
+ else:
+ self._isOpen = True
+ #~ self.flushInput()
+
+
+ def _reconfigurePort(self):
+ """Set communication parameters on opened port."""
+ if self.fd is None:
+ raise SerialException("Can only operate on a valid port handle")
+ custom_baud = None
+
+ vmin = vtime = 0 #timeout is done via select
+ if self._interCharTimeout is not None:
+ vmin = 1
+ vtime = int(self._interCharTimeout * 10)
+ try:
+ iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.fd)
+ except termios.error, msg: #if a port is nonexistent but has a /dev file, it'll fail here
+ raise SerialException("Could not configure port: %s" % msg)
+ #set up raw mode / no echo / binary
+ cflag |= (TERMIOS.CLOCAL|TERMIOS.CREAD)
+ lflag &= ~(TERMIOS.ICANON|TERMIOS.ECHO|TERMIOS.ECHOE|TERMIOS.ECHOK|TERMIOS.ECHONL|
+ TERMIOS.ISIG|TERMIOS.IEXTEN) #|TERMIOS.ECHOPRT
+ for flag in ('ECHOCTL', 'ECHOKE'): #netbsd workaround for Erk
+ if hasattr(TERMIOS, flag):
+ lflag &= ~getattr(TERMIOS, flag)
+
+ oflag &= ~(TERMIOS.OPOST)
+ iflag &= ~(TERMIOS.INLCR|TERMIOS.IGNCR|TERMIOS.ICRNL|TERMIOS.IGNBRK)
+ if hasattr(TERMIOS, 'IUCLC'):
+ iflag &= ~TERMIOS.IUCLC
+ if hasattr(TERMIOS, 'PARMRK'):
+ iflag &= ~TERMIOS.PARMRK
+
+ #setup baudrate
+ try:
+ ispeed = ospeed = getattr(TERMIOS,'B%s' % (self._baudrate))
+ except AttributeError:
+ try:
+ ispeed = ospeed = baudrate_constants[self._baudrate]
+ except KeyError:
+ #~ raise ValueError('Invalid baud rate: %r' % self._baudrate)
+ # may need custom baud rate, it isnt in our list.
+ ispeed = ospeed = getattr(TERMIOS, 'B38400')
+ custom_baud = int(self._baudrate) # store for later
+
+ #setup char len
+ cflag &= ~TERMIOS.CSIZE
+ if self._bytesize == 8:
+ cflag |= TERMIOS.CS8
+ elif self._bytesize == 7:
+ cflag |= TERMIOS.CS7
+ elif self._bytesize == 6:
+ cflag |= TERMIOS.CS6
+ elif self._bytesize == 5:
+ cflag |= TERMIOS.CS5
+ else:
+ raise ValueError('Invalid char len: %r' % self._bytesize)
+ #setup stopbits
+ if self._stopbits == STOPBITS_ONE:
+ cflag &= ~(TERMIOS.CSTOPB)
+ elif self._stopbits == STOPBITS_TWO:
+ cflag |= (TERMIOS.CSTOPB)
+ else:
+ raise ValueError('Invalid stopit specification: %r' % self._stopbits)
+ #setup parity
+ iflag &= ~(TERMIOS.INPCK|TERMIOS.ISTRIP)
+ if self._parity == PARITY_NONE:
+ cflag &= ~(TERMIOS.PARENB|TERMIOS.PARODD)
+ elif self._parity == PARITY_EVEN:
+ cflag &= ~(TERMIOS.PARODD)
+ cflag |= (TERMIOS.PARENB)
+ elif self._parity == PARITY_ODD:
+ cflag |= (TERMIOS.PARENB|TERMIOS.PARODD)
+ else:
+ raise ValueError('Invalid parity: %r' % self._parity)
+ #setup flow control
+ #xonxoff
+ if hasattr(TERMIOS, 'IXANY'):
+ if self._xonxoff:
+ iflag |= (TERMIOS.IXON|TERMIOS.IXOFF) #|TERMIOS.IXANY)
+ else:
+ iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF|TERMIOS.IXANY)
+ else:
+ if self._xonxoff:
+ iflag |= (TERMIOS.IXON|TERMIOS.IXOFF)
+ else:
+ iflag &= ~(TERMIOS.IXON|TERMIOS.IXOFF)
+ #rtscts
+ if hasattr(TERMIOS, 'CRTSCTS'):
+ if self._rtscts:
+ cflag |= (TERMIOS.CRTSCTS)
+ else:
+ cflag &= ~(TERMIOS.CRTSCTS)
+ elif hasattr(TERMIOS, 'CNEW_RTSCTS'): #try it with alternate constant name
+ if self._rtscts:
+ cflag |= (TERMIOS.CNEW_RTSCTS)
+ else:
+ cflag &= ~(TERMIOS.CNEW_RTSCTS)
+ #XXX should there be a warning if setting up rtscts (and xonxoff etc) fails??
+
+ #buffer
+ #vmin "minimal number of characters to be read. = for non blocking"
+ if vmin < 0 or vmin > 255:
+ raise ValueError('Invalid vmin: %r ' % vmin)
+ cc[TERMIOS.VMIN] = vmin
+ #vtime
+ if vtime < 0 or vtime > 255:
+ raise ValueError('Invalid vtime: %r' % vtime)
+ cc[TERMIOS.VTIME] = vtime
+ #activate settings
+ termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
+
+ # apply custom baud rate, if any
+ if custom_baud is not None:
+ import array
+ buf = array.array('i', [0] * 32)
+
+ # get serial_struct
+ FCNTL.ioctl(self.fd, TERMIOS.TIOCGSERIAL, buf)
+
+ # set custom divisor
+ buf[6] = buf[7] / custom_baud
+
+ # update flags
+ buf[4] &= ~ASYNC_SPD_MASK
+ buf[4] |= ASYNC_SPD_CUST
+
+ # set serial_struct
+ try:
+ res = FCNTL.ioctl(self.fd, TERMIOS.TIOCSSERIAL, buf)
+ except IOError:
+ raise ValueError('Failed to set custom baud rate: %r' % self._baudrate)
+
+ def close(self):
+ """Close port"""
+ if self._isOpen:
+ if self.fd is not None:
+ os.close(self.fd)
+ self.fd = None
+ self._isOpen = False
+
+ def makeDeviceName(self, port):
+ return device(port)
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def inWaiting(self):
+ """Return the number of characters currently in the input buffer."""
+ #~ s = fcntl.ioctl(self.fd, TERMIOS.FIONREAD, TIOCM_zero_str)
+ s = fcntl.ioctl(self.fd, TIOCINQ, TIOCM_zero_str)
+ return struct.unpack('I',s)[0]
+
+ def read(self, size=1):
+ """Read size bytes from the serial port. If a timeout is set it may
+ return less characters as requested. With no timeout it will block
+ until the requested number of bytes is read."""
+ if self.fd is None: raise portNotOpenError
+ read = ''
+ inp = None
+ if size > 0:
+ while len(read) < size:
+ #print "\tread(): size",size, "have", len(read) #debug
+ ready,_,_ = select.select([self.fd],[],[], self._timeout)
+ if not ready:
+ break #timeout
+ buf = os.read(self.fd, size-len(read))
+ read = read + buf
+ if (self._timeout >= 0 or self._interCharTimeout > 0) and not buf:
+ break #early abort on timeout
+ return read
+
+ def write(self, data):
+ """Output the given string over the serial port."""
+ if self.fd is None: raise portNotOpenError
+ if not isinstance(data, str):
+ raise TypeError('expected str, got %s' % type(data))
+ t = len(data)
+ d = data
+ while t > 0:
+ try:
+ if self._writeTimeout is not None and self._writeTimeout > 0:
+ _,ready,_ = select.select([],[self.fd],[], self._writeTimeout)
+ if not ready:
+ raise writeTimeoutError
+ n = os.write(self.fd, d)
+ if self._writeTimeout is not None and self._writeTimeout > 0:
+ _,ready,_ = select.select([],[self.fd],[], self._writeTimeout)
+ if not ready:
+ raise writeTimeoutError
+ d = d[n:]
+ t = t - n
+ except OSError,v:
+ if v.errno != errno.EAGAIN:
+ raise
+
+ def flush(self):
+ """Flush of file like objects. In this case, wait until all data
+ is written."""
+ self.drainOutput()
+
+ def flushInput(self):
+ """Clear input buffer, discarding all that is in the buffer."""
+ if self.fd is None:
+ raise portNotOpenError
+ termios.tcflush(self.fd, TERMIOS.TCIFLUSH)
+
+ def flushOutput(self):
+ """Clear output buffer, aborting the current output and
+ discarding all that is in the buffer."""
+ if self.fd is None:
+ raise portNotOpenError
+ termios.tcflush(self.fd, TERMIOS.TCOFLUSH)
+
+ def sendBreak(self, duration=0.25):
+ """Send break condition. Timed, returns to idle state after given duration."""
+ if self.fd is None:
+ raise portNotOpenError
+ termios.tcsendbreak(self.fd, int(duration/0.25))
+
+ def setBreak(self, level=1):
+ """Set break: Controls TXD. When active, to transmitting is possible."""
+ if self.fd is None: raise portNotOpenError
+ if level:
+ fcntl.ioctl(self.fd, TIOCSBRK)
+ else:
+ fcntl.ioctl(self.fd, TIOCCBRK)
+
+ def setRTS(self, level=1):
+ """Set terminal status line: Request To Send"""
+ if self.fd is None: raise portNotOpenError
+ if level:
+ fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_RTS_str)
+ else:
+ fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_RTS_str)
+
+ def setDTR(self, level=1):
+ """Set terminal status line: Data Terminal Ready"""
+ if self.fd is None: raise portNotOpenError
+ if level:
+ fcntl.ioctl(self.fd, TIOCMBIS, TIOCM_DTR_str)
+ else:
+ fcntl.ioctl(self.fd, TIOCMBIC, TIOCM_DTR_str)
+
+ def getCTS(self):
+ """Read terminal status line: Clear To Send"""
+ if self.fd is None: raise portNotOpenError
+ s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+ return struct.unpack('I',s)[0] & TIOCM_CTS != 0
+
+ def getDSR(self):
+ """Read terminal status line: Data Set Ready"""
+ if self.fd is None: raise portNotOpenError
+ s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+ return struct.unpack('I',s)[0] & TIOCM_DSR != 0
+
+ def getRI(self):
+ """Read terminal status line: Ring Indicator"""
+ if self.fd is None: raise portNotOpenError
+ s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+ return struct.unpack('I',s)[0] & TIOCM_RI != 0
+
+ def getCD(self):
+ """Read terminal status line: Carrier Detect"""
+ if self.fd is None: raise portNotOpenError
+ s = fcntl.ioctl(self.fd, TIOCMGET, TIOCM_zero_str)
+ return struct.unpack('I',s)[0] & TIOCM_CD != 0
+
+ # - - platform specific - - - -
+
+ def drainOutput(self):
+ """internal - not portable!"""
+ if self.fd is None: raise portNotOpenError
+ termios.tcdrain(self.fd)
+
+ def nonblocking(self):
+ """internal - not portable!"""
+ if self.fd is None:
+ raise portNotOpenError
+ fcntl.fcntl(self.fd, FCNTL.F_SETFL, FCNTL.O_NONBLOCK)
+
+ def fileno(self):
+ """For easier of the serial port instance with select.
+ WARNING: this function is not portable to different platforms!"""
+ if self.fd is None: raise portNotOpenError
+ return self.fd
+
+if __name__ == '__main__':
+ s = Serial(0,
+ baudrate=19200, #baudrate
+ bytesize=EIGHTBITS, #number of databits
+ parity=PARITY_EVEN, #enable parity checking
+ stopbits=STOPBITS_ONE, #number of stopbits
+ timeout=3, #set a timeout value, None for waiting forever
+ xonxoff=0, #enable software flow control
+ rtscts=0, #enable RTS/CTS flow control
+ )
+ s.setRTS(1)
+ s.setDTR(1)
+ s.flushInput()
+ s.flushOutput()
+ s.write('hello')
+ print repr(s.read(5))
+ print s.inWaiting()
+ del s
+
diff --git a/serial/serialposix.pyc b/serial/serialposix.pyc
new file mode 100644
index 0000000..49b9553
Binary files /dev/null and b/serial/serialposix.pyc differ
diff --git a/serial/serialutil.py b/serial/serialutil.py
new file mode 100644
index 0000000..fd466f2
--- /dev/null
+++ b/serial/serialutil.py
@@ -0,0 +1,400 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# see __init__.py
+#
+# (C) 2001-2008 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+
+PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
+STOPBITS_ONE, STOPBITS_TWO = (1, 2)
+FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5,6,7,8)
+
+PARITY_NAMES = {
+ PARITY_NONE: 'None',
+ PARITY_EVEN: 'Even',
+ PARITY_ODD: 'Odd',
+ PARITY_MARK: 'Mark',
+ PARITY_SPACE:'Space',
+}
+
+XON = chr(17)
+XOFF = chr(19)
+
+#Python < 2.2.3 compatibility
+try:
+ True
+except:
+ True = 1
+ False = not True
+
+class SerialException(Exception):
+ """Base class for serial port related exceptions."""
+
+portNotOpenError = SerialException('Port not open')
+
+class SerialTimeoutException(SerialException):
+ """Write timeouts give an exception"""
+
+writeTimeoutError = SerialTimeoutException("Write timeout")
+
+class FileLike(object):
+ """An abstract file like class.
+
+ This class implements readline and readlines based on read and
+ writelines based on write.
+ This class is used to provide the above functions for to Serial
+ port objects.
+
+ Note that when the serial port was opened with _NO_ timeout that
+ readline blocks until it sees a newline (or the specified size is
+ reached) and that readlines would never return and therefore
+ refuses to work (it raises an exception in this case)!
+ """
+
+ def read(self, size): raise NotImplementedError
+ def write(self, s): raise NotImplementedError
+
+ def readline(self, size=None, eol='\n'):
+ """read a line which is terminated with end-of-line (eol) character
+ ('\n' by default) or until timeout"""
+ line = ''
+ while 1:
+ c = self.read(1)
+ if c:
+ line += c #not very efficient but lines are usually not that long
+ if c == eol:
+ break
+ if size is not None and len(line) >= size:
+ break
+ else:
+ break
+ return line
+
+ def readlines(self, sizehint=None, eol='\n'):
+ """read a list of lines, until timeout
+ sizehint is ignored"""
+ if self.timeout is None:
+ raise ValueError, "Serial port MUST have enabled timeout for this function!"
+ lines = []
+ while 1:
+ line = self.readline(eol=eol)
+ if line:
+ lines.append(line)
+ if line[-1] != eol: #was the line received with a timeout?
+ break
+ else:
+ break
+ return lines
+
+ def xreadlines(self, sizehint=None):
+ """just call readlines - here for compatibility"""
+ return self.readlines()
+
+ def writelines(self, sequence):
+ for line in sequence:
+ self.write(line)
+
+ def flush(self):
+ """flush of file like objects"""
+ pass
+
+ # iterator for e.g. "for line in Serial(0): ..." usage
+ def next(self):
+ line = self.readline()
+ if not line: raise StopIteration
+ return line
+
+ def __iter__(self):
+ return self
+
+
+class SerialBase(FileLike):
+ """Serial port base class. Provides __init__ function and properties to
+ get/set port settings."""
+
+ #default values, may be overriden in subclasses that do not support all values
+ BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
+ 19200,38400,57600,115200,230400,460800,500000,576000,921600,
+ 1000000,1152000,1500000,2000000,2500000,3000000,3500000,4000000)
+ BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
+ PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD)
+ STOPBITS = (STOPBITS_ONE, STOPBITS_TWO)
+
+ def __init__(self,
+ port = None, #number of device, numbering starts at
+ #zero. if everything fails, the user
+ #can specify a device string, note
+ #that this isn't portable anymore
+ #port will be opened if one is specified
+ baudrate=9600, #baudrate
+ bytesize=EIGHTBITS, #number of databits
+ parity=PARITY_NONE, #enable parity checking
+ stopbits=STOPBITS_ONE, #number of stopbits
+ timeout=None, #set a timeout value, None to wait forever
+ xonxoff=0, #enable software flow control
+ rtscts=0, #enable RTS/CTS flow control
+ writeTimeout=None, #set a timeout for writes
+ dsrdtr=None, #None: use rtscts setting, dsrdtr override if true or false
+ interCharTimeout=None #Inter-character timeout, None to disable
+ ):
+ """Initialize comm port object. If a port is given, then the port will be
+ opened immediately. Otherwise a Serial port object in closed state
+ is returned."""
+
+ self._isOpen = False
+ self._port = None #correct value is assigned below trough properties
+ self._baudrate = None #correct value is assigned below trough properties
+ self._bytesize = None #correct value is assigned below trough properties
+ self._parity = None #correct value is assigned below trough properties
+ self._stopbits = None #correct value is assigned below trough properties
+ self._timeout = None #correct value is assigned below trough properties
+ self._writeTimeout = None #correct value is assigned below trough properties
+ self._xonxoff = None #correct value is assigned below trough properties
+ self._rtscts = None #correct value is assigned below trough properties
+ self._dsrdtr = None #correct value is assigned below trough properties
+ self._interCharTimeout = None #correct value is assigned below trough properties
+
+ #assign values using get/set methods using the properties feature
+ self.port = port
+ self.baudrate = baudrate
+ self.bytesize = bytesize
+ self.parity = parity
+ self.stopbits = stopbits
+ self.timeout = timeout
+ self.writeTimeout = writeTimeout
+ self.xonxoff = xonxoff
+ self.rtscts = rtscts
+ self.dsrdtr = dsrdtr
+ self.interCharTimeout = interCharTimeout
+
+ if port is not None:
+ self.open()
+
+ def isOpen(self):
+ """Check if the port is opened."""
+ return self._isOpen
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ #TODO: these are not realy needed as the is the BAUDRATES etc attribute...
+ #maybe i remove them before the final release...
+
+ def getSupportedBaudrates(self):
+ return [(str(b), b) for b in self.BAUDRATES]
+
+ def getSupportedByteSizes(self):
+ return [(str(b), b) for b in self.BYTESIZES]
+
+ def getSupportedStopbits(self):
+ return [(str(b), b) for b in self.STOPBITS]
+
+ def getSupportedParities(self):
+ return [(PARITY_NAMES[b], b) for b in self.PARITIES]
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def setPort(self, port):
+ """Change the port. The attribute portstr is set to a string that
+ contains the name of the port."""
+
+ was_open = self._isOpen
+ if was_open: self.close()
+ if port is not None:
+ if type(port) in [type(''), type(u'')]: #strings are taken directly
+ self.portstr = port
+ else:
+ self.portstr = self.makeDeviceName(port)
+ else:
+ self.portstr = None
+ self._port = port
+ if was_open: self.open()
+
+ def getPort(self):
+ """Get the current port setting. The value that was passed on init or using
+ setPort() is passed back. See also the attribute portstr which contains
+ the name of the port as a string."""
+ return self._port
+
+ port = property(getPort, setPort, doc="Port setting")
+
+
+ def setBaudrate(self, baudrate):
+ """Change baudrate. It raises a ValueError if the port is open and the
+ baudrate is not possible. If the port is closed, then tha value is
+ accepted and the exception is raised when the port is opened."""
+ #~ if baudrate not in self.BAUDRATES: raise ValueError("Not a valid baudrate: %r" % baudrate)
+ try:
+ self._baudrate = int(baudrate)
+ except TypeError:
+ raise ValueError("Not a valid baudrate: %r" % (baudrate,))
+ else:
+ if self._isOpen: self._reconfigurePort()
+
+ def getBaudrate(self):
+ """Get the current baudrate setting."""
+ return self._baudrate
+
+ baudrate = property(getBaudrate, setBaudrate, doc="Baudrate setting")
+
+
+ def setByteSize(self, bytesize):
+ """Change byte size."""
+ if bytesize not in self.BYTESIZES: raise ValueError("Not a valid byte size: %r" % (bytesize,))
+ self._bytesize = bytesize
+ if self._isOpen: self._reconfigurePort()
+
+ def getByteSize(self):
+ """Get the current byte size setting."""
+ return self._bytesize
+
+ bytesize = property(getByteSize, setByteSize, doc="Byte size setting")
+
+
+ def setParity(self, parity):
+ """Change parity setting."""
+ if parity not in self.PARITIES: raise ValueError("Not a valid parity: %r" % (parity,))
+ self._parity = parity
+ if self._isOpen: self._reconfigurePort()
+
+ def getParity(self):
+ """Get the current parity setting."""
+ return self._parity
+
+ parity = property(getParity, setParity, doc="Parity setting")
+
+
+ def setStopbits(self, stopbits):
+ """Change stopbits size."""
+ if stopbits not in self.STOPBITS: raise ValueError("Not a valid stopbit size: %r" % (stopbits,))
+ self._stopbits = stopbits
+ if self._isOpen: self._reconfigurePort()
+
+ def getStopbits(self):
+ """Get the current stopbits setting."""
+ return self._stopbits
+
+ stopbits = property(getStopbits, setStopbits, doc="Stopbits setting")
+
+
+ def setTimeout(self, timeout):
+ """Change timeout setting."""
+ if timeout is not None:
+ if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
+ try:
+ timeout + 1 #test if it's a number, will throw a TypeError if not...
+ except TypeError:
+ raise ValueError("Not a valid timeout: %r" % (timeout,))
+
+ self._timeout = timeout
+ if self._isOpen: self._reconfigurePort()
+
+ def getTimeout(self):
+ """Get the current timeout setting."""
+ return self._timeout
+
+ timeout = property(getTimeout, setTimeout, doc="Timeout setting for read()")
+
+
+ def setWriteTimeout(self, timeout):
+ """Change timeout setting."""
+ if timeout is not None:
+ if timeout < 0: raise ValueError("Not a valid timeout: %r" % (timeout,))
+ try:
+ timeout + 1 #test if it's a number, will throw a TypeError if not...
+ except TypeError:
+ raise ValueError("Not a valid timeout: %r" % timeout)
+
+ self._writeTimeout = timeout
+ if self._isOpen: self._reconfigurePort()
+
+ def getWriteTimeout(self):
+ """Get the current timeout setting."""
+ return self._writeTimeout
+
+ writeTimeout = property(getWriteTimeout, setWriteTimeout, doc="Timeout setting for write()")
+
+
+ def setXonXoff(self, xonxoff):
+ """Change XonXoff setting."""
+ self._xonxoff = xonxoff
+ if self._isOpen: self._reconfigurePort()
+
+ def getXonXoff(self):
+ """Get the current XonXoff setting."""
+ return self._xonxoff
+
+ xonxoff = property(getXonXoff, setXonXoff, doc="Xon/Xoff setting")
+
+ def setRtsCts(self, rtscts):
+ """Change RtsCts flow control setting."""
+ self._rtscts = rtscts
+ if self._isOpen: self._reconfigurePort()
+
+ def getRtsCts(self):
+ """Get the current RtsCts flow control setting."""
+ return self._rtscts
+
+ rtscts = property(getRtsCts, setRtsCts, doc="RTS/CTS flow control setting")
+
+ def setDsrDtr(self, dsrdtr=None):
+ """Change DsrDtr flow control setting."""
+ if dsrdtr is None:
+ #if not set, keep backwards compatibility and follow rtscts setting
+ self._dsrdtr = self._rtscts
+ else:
+ #if defined independently, follow its value
+ self._dsrdtr = dsrdtr
+ if self._isOpen: self._reconfigurePort()
+
+ def getDsrDtr(self):
+ """Get the current DsrDtr flow control setting."""
+ return self._dsrdtr
+
+ dsrdtr = property(getDsrDtr, setDsrDtr, "DSR/DTR flow control setting")
+
+ def setInterCharTimeout(self, interCharTimeout):
+ """Change inter-character timeout setting."""
+ if interCharTimeout is not None:
+ if interCharTimeout < 0: raise ValueError("Not a valid timeout: %r" % interCharTimeout)
+ try:
+ interCharTimeout + 1 #test if it's a number, will throw a TypeError if not...
+ except TypeError:
+ raise ValueError("Not a valid timeout: %r" % interCharTimeout)
+
+ self._interCharTimeout = interCharTimeout
+ if self._isOpen: self._reconfigurePort()
+
+ def getInterCharTimeout(self):
+ """Get the current inter-character timeout setting."""
+ return self._interCharTimeout
+
+ interCharTimeout = property(getInterCharTimeout, setInterCharTimeout, doc="Inter-character timeout setting for read()")
+
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def __repr__(self):
+ """String representation of the current port settings and its state."""
+ return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
+ self.__class__.__name__,
+ id(self),
+ self._isOpen,
+ self.portstr,
+ self.baudrate,
+ self.bytesize,
+ self.parity,
+ self.stopbits,
+ self.timeout,
+ self.xonxoff,
+ self.rtscts,
+ self.dsrdtr,
+ )
+
+if __name__ == '__main__':
+ s = SerialBase()
+ print s.portstr
+ print s.getSupportedBaudrates()
+ print s.getSupportedByteSizes()
+ print s.getSupportedParities()
+ print s.getSupportedStopbits()
+ print s
diff --git a/serial/serialutil.pyc b/serial/serialutil.pyc
new file mode 100644
index 0000000..6917e42
Binary files /dev/null and b/serial/serialutil.pyc differ
diff --git a/serial/serialwin32.py b/serial/serialwin32.py
new file mode 100644
index 0000000..f5e8961
--- /dev/null
+++ b/serial/serialwin32.py
@@ -0,0 +1,336 @@
+#! python
+# Python Serial Port Extension for Win32, Linux, BSD, Jython
+# serial driver for win32
+# see __init__.py
+#
+# (C) 2001-2008 Chris Liechti <[email protected]>
+# this is distributed under a free software license, see license.txt
+
+import win32file # The base COM port and file IO functions.
+import win32event # We use events and the WaitFor[Single|Multiple]Objects functions.
+import win32con # constants.
+from serialutil import *
+
+#from winbase.h. these should realy be in win32con
+MS_CTS_ON = 16
+MS_DSR_ON = 32
+MS_RING_ON = 64
+MS_RLSD_ON = 128
+
+def device(portnum):
+ """Turn a port number into a device name"""
+ return 'COM%d' % (portnum+1) #numbers are transformed to a string
+
+class Serial(SerialBase):
+ """Serial port implemenation for Win32. This implemenatation requires a
+ win32all installation."""
+
+ BAUDRATES = (50,75,110,134,150,200,300,600,1200,1800,2400,4800,9600,
+ 19200,38400,57600,115200)
+
+ def open(self):
+ """Open port with current settings. This may throw a SerialException
+ if the port cannot be opened."""
+ if self._port is None:
+ raise SerialException("Port must be configured before it can be used.")
+ self.hComPort = None
+ # the "\\.\COMx" format is required for devices other than COM1-COM8
+ # not all versions of windows seem to support this properly
+ # so that the first few ports are used with the DOS device name
+ port = self.portstr
+ if port.upper().startswith('COM') and int(port[3:]) > 8:
+ port = '\\\\.\\' + port
+ try:
+ self.hComPort = win32file.CreateFile(port,
+ win32con.GENERIC_READ | win32con.GENERIC_WRITE,
+ 0, # exclusive access
+ None, # no security
+ win32con.OPEN_EXISTING,
+ win32con.FILE_FLAG_OVERLAPPED,
+ None)
+ except Exception, msg:
+ self.hComPort = None #'cause __del__ is called anyway
+ raise SerialException("could not open port %s: %s" % (self.portstr, msg))
+ # Setup a 4k buffer
+ win32file.SetupComm(self.hComPort, 4096, 4096)
+
+ #Save original timeout values:
+ self._orgTimeouts = win32file.GetCommTimeouts(self.hComPort)
+
+ self._rtsState = win32file.RTS_CONTROL_ENABLE
+ self._dtrState = win32file.DTR_CONTROL_ENABLE
+
+ self._reconfigurePort()
+
+ # Clear buffers:
+ # Remove anything that was there
+ win32file.PurgeComm(self.hComPort,
+ win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT |
+ win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
+
+ self._overlappedRead = win32file.OVERLAPPED()
+ self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
+ self._overlappedWrite = win32file.OVERLAPPED()
+ #~ self._overlappedWrite.hEvent = win32event.CreateEvent(None, 1, 0, None)
+ self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
+ self._isOpen = True
+
+ def _reconfigurePort(self):
+ """Set communication parameters on opened port."""
+ if not self.hComPort:
+ raise SerialException("Can only operate on a valid port handle")
+
+ #Set Windows timeout values
+ #timeouts is a tuple with the following items:
+ #(ReadIntervalTimeout,ReadTotalTimeoutMultiplier,
+ # ReadTotalTimeoutConstant,WriteTotalTimeoutMultiplier,
+ # WriteTotalTimeoutConstant)
+ if self._timeout is None:
+ timeouts = (0, 0, 0, 0, 0)
+ elif self._timeout == 0:
+ timeouts = (win32con.MAXDWORD, 0, 0, 0, 0)
+ else:
+ timeouts = (0, 0, int(self._timeout*1000), 0, 0)
+ if self._timeout != 0 and self._interCharTimeout is not None:
+ timeouts = (int(self._interCharTimeout * 1000),) + timeouts[1:]
+
+ if self._writeTimeout is None:
+ pass
+ elif self._writeTimeout == 0:
+ timeouts = timeouts[:-2] + (0, win32con.MAXDWORD)
+ else:
+ timeouts = timeouts[:-2] + (0, int(self._writeTimeout*1000))
+ win32file.SetCommTimeouts(self.hComPort, timeouts)
+
+ win32file.SetCommMask(self.hComPort, win32file.EV_ERR)
+
+ # Setup the connection info.
+ # Get state and modify it:
+ comDCB = win32file.GetCommState(self.hComPort)
+ comDCB.BaudRate = self._baudrate
+
+ if self._bytesize == FIVEBITS:
+ comDCB.ByteSize = 5
+ elif self._bytesize == SIXBITS:
+ comDCB.ByteSize = 6
+ elif self._bytesize == SEVENBITS:
+ comDCB.ByteSize = 7
+ elif self._bytesize == EIGHTBITS:
+ comDCB.ByteSize = 8
+ else:
+ raise ValueError("Unsupported number of data bits: %r" % self._bytesize)
+
+ if self._parity == PARITY_NONE:
+ comDCB.Parity = win32file.NOPARITY
+ comDCB.fParity = 0 # Dis/Enable Parity Check
+ elif self._parity == PARITY_EVEN:
+ comDCB.Parity = win32file.EVENPARITY
+ comDCB.fParity = 1 # Dis/Enable Parity Check
+ elif self._parity == PARITY_ODD:
+ comDCB.Parity = win32file.ODDPARITY
+ comDCB.fParity = 1 # Dis/Enable Parity Check
+ elif self._parity == PARITY_MARK:
+ comDCB.Parity = win32file.MARKPARITY
+ comDCB.fParity = 1 # Dis/Enable Parity Check
+ elif self._parity == PARITY_SPACE:
+ comDCB.Parity = win32file.SPACEPARITY
+ comDCB.fParity = 1 # Dis/Enable Parity Check
+ else:
+ raise ValueError("Unsupported parity mode: %r" % self._parity)
+
+ if self._stopbits == STOPBITS_ONE:
+ comDCB.StopBits = win32file.ONESTOPBIT
+ elif self._stopbits == STOPBITS_TWO:
+ comDCB.StopBits = win32file.TWOSTOPBITS
+ else:
+ raise ValueError("Unsupported number of stop bits: %r" % self._stopbits)
+
+ comDCB.fBinary = 1 # Enable Binary Transmission
+ # Char. w/ Parity-Err are replaced with 0xff (if fErrorChar is set to TRUE)
+ if self._rtscts:
+ comDCB.fRtsControl = win32file.RTS_CONTROL_HANDSHAKE
+ else:
+ comDCB.fRtsControl = self._rtsState
+ if self._dsrdtr:
+ comDCB.fDtrControl = win32file.DTR_CONTROL_HANDSHAKE
+ else:
+ comDCB.fDtrControl = self._dtrState
+ comDCB.fOutxCtsFlow = self._rtscts
+ comDCB.fOutxDsrFlow = self._dsrdtr
+ comDCB.fOutX = self._xonxoff
+ comDCB.fInX = self._xonxoff
+ comDCB.fNull = 0
+ comDCB.fErrorChar = 0
+ comDCB.fAbortOnError = 0
+ comDCB.XonChar = XON
+ comDCB.XoffChar = XOFF
+
+ try:
+ win32file.SetCommState(self.hComPort, comDCB)
+ except win32file.error, e:
+ raise ValueError("Cannot configure port, some setting was wrong. Original message: %s" % e)
+
+ #~ def __del__(self):
+ #~ self.close()
+
+ def close(self):
+ """Close port"""
+ if self._isOpen:
+ if self.hComPort:
+ try:
+ # Restore original timeout values:
+ win32file.SetCommTimeouts(self.hComPort, self._orgTimeouts)
+ except win32file.error:
+ # ignore errors. can happen for unplugged USB serial devices
+ pass
+ # Close COM-Port:
+ win32file.CloseHandle(self.hComPort)
+ win32file.CloseHandle(self._overlappedRead.hEvent)
+ win32file.CloseHandle(self._overlappedWrite.hEvent)
+ self.hComPort = None
+ self._isOpen = False
+
+ def makeDeviceName(self, port):
+ return device(port)
+
+ # - - - - - - - - - - - - - - - - - - - - - - - -
+
+ def inWaiting(self):
+ """Return the number of characters currently in the input buffer."""
+ flags, comstat = win32file.ClearCommError(self.hComPort)
+ return comstat.cbInQue
+
+ def read(self, size=1):
+ """Read size bytes from the serial port. If a timeout is set it may
+ return less characters as requested. With no timeout it will block
+ until the requested number of bytes is read."""
+ if not self.hComPort: raise portNotOpenError
+ if size > 0:
+ win32event.ResetEvent(self._overlappedRead.hEvent)
+ flags, comstat = win32file.ClearCommError(self.hComPort)
+ if self.timeout == 0:
+ n = min(comstat.cbInQue, size)
+ if n > 0:
+ rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(n), self._overlappedRead)
+ win32event.WaitForSingleObject(self._overlappedRead.hEvent, win32event.INFINITE)
+ read = str(buf)
+ else:
+ read = ''
+ else:
+ rc, buf = win32file.ReadFile(self.hComPort, win32file.AllocateReadBuffer(size), self._overlappedRead)
+ n = win32file.GetOverlappedResult(self.hComPort, self._overlappedRead, 1)
+ read = str(buf[:n])
+ else:
+ read = ''
+ return read
+
+ def write(self, data):
+ """Output the given string over the serial port."""
+ if not self.hComPort: raise portNotOpenError
+ if not isinstance(data, str):
+ raise TypeError('expected str, got %s' % type(data))
+ #print repr(s),
+ if data:
+ #~ win32event.ResetEvent(self._overlappedWrite.hEvent)
+ err, n = win32file.WriteFile(self.hComPort, data, self._overlappedWrite)
+ if err: #will be ERROR_IO_PENDING:
+ # Wait for the write to complete.
+ #~ win32event.WaitForSingleObject(self._overlappedWrite.hEvent, win32event.INFINITE)
+ n = win32file.GetOverlappedResult(self.hComPort, self._overlappedWrite, 1)
+ if n != len(data):
+ raise writeTimeoutError
+
+
+ def flushInput(self):
+ """Clear input buffer, discarding all that is in the buffer."""
+ if not self.hComPort: raise portNotOpenError
+ win32file.PurgeComm(self.hComPort, win32file.PURGE_RXCLEAR | win32file.PURGE_RXABORT)
+
+ def flushOutput(self):
+ """Clear output buffer, aborting the current output and
+ discarding all that is in the buffer."""
+ if not self.hComPort: raise portNotOpenError
+ win32file.PurgeComm(self.hComPort, win32file.PURGE_TXCLEAR | win32file.PURGE_TXABORT)
+
+ def sendBreak(self, duration=0.25):
+ """Send break condition. Timed, returns to idle state after given duration."""
+ if not self.hComPort: raise portNotOpenError
+ import time
+ win32file.SetCommBreak(self.hComPort)
+ time.sleep(duration)
+ win32file.ClearCommBreak(self.hComPort)
+
+ def setBreak(self, level=1):
+ """Set break: Controls TXD. When active, to transmitting is possible."""
+ if not self.hComPort: raise portNotOpenError
+ if level:
+ win32file.SetCommBreak(self.hComPort)
+ else:
+ win32file.ClearCommBreak(self.hComPort)
+
+ def setRTS(self, level=1):
+ """Set terminal status line: Request To Send"""
+ if not self.hComPort: raise portNotOpenError
+ if level:
+ self._rtsState = win32file.RTS_CONTROL_ENABLE
+ win32file.EscapeCommFunction(self.hComPort, win32file.SETRTS)
+ else:
+ self._rtsState = win32file.RTS_CONTROL_DISABLE
+ win32file.EscapeCommFunction(self.hComPort, win32file.CLRRTS)
+
+ def setDTR(self, level=1):
+ """Set terminal status line: Data Terminal Ready"""
+ if not self.hComPort: raise portNotOpenError
+ if level:
+ self._dtrState = win32file.DTR_CONTROL_ENABLE
+ win32file.EscapeCommFunction(self.hComPort, win32file.SETDTR)
+ else:
+ self._dtrState = win32file.DTR_CONTROL_DISABLE
+ win32file.EscapeCommFunction(self.hComPort, win32file.CLRDTR)
+
+ def getCTS(self):
+ """Read terminal status line: Clear To Send"""
+ if not self.hComPort: raise portNotOpenError
+ return MS_CTS_ON & win32file.GetCommModemStatus(self.hComPort) != 0
+
+ def getDSR(self):
+ """Read terminal status line: Data Set Ready"""
+ if not self.hComPort: raise portNotOpenError
+ return MS_DSR_ON & win32file.GetCommModemStatus(self.hComPort) != 0
+
+ def getRI(self):
+ """Read terminal status line: Ring Indicator"""
+ if not self.hComPort: raise portNotOpenError
+ return MS_RING_ON & win32file.GetCommModemStatus(self.hComPort) != 0
+
+ def getCD(self):
+ """Read terminal status line: Carrier Detect"""
+ if not self.hComPort: raise portNotOpenError
+ return MS_RLSD_ON & win32file.GetCommModemStatus(self.hComPort) != 0
+
+ # - - platform specific - - - -
+
+ def setXON(self, level=True):
+ """Platform specific - set flow state."""
+ if not self.hComPort: raise portNotOpenError
+ if level:
+ win32file.EscapeCommFunction(self.hComPort, win32file.SETXON)
+ else:
+ win32file.EscapeCommFunction(self.hComPort, win32file.SETXOFF)
+
+#Nur Testfunktion!!
+if __name__ == '__main__':
+ s = Serial(0)
+ print s
+
+ s = Serial()
+ print s
+
+
+ s.baudrate = 19200
+ s.databits = 7
+ s.close()
+ s.port = 0
+ s.open()
+ print s
+
diff --git a/serial/sermsdos.py b/serial/sermsdos.py
new file mode 100644
index 0000000..a516118
--- /dev/null
+++ b/serial/sermsdos.py
@@ -0,0 +1,215 @@
+# sermsdos.py
+#
+# History:
+#
+# 3rd September 2002 Dave Haynes
+# 1. First defined
+#
+# Although this code should run under the latest versions of
+# Python, on DOS-based platforms such as Windows 95 and 98,
+# it has been specifically written to be compatible with
+# PyDOS, available at:
+# http://www.python.org/ftp/python/wpy/dos.html
+#
+# PyDOS is a stripped-down version of Python 1.5.2 for
+# DOS machines. Therefore, in making changes to this file,
+# please respect Python 1.5.2 syntax. In addition, please
+# limit the width of this file to 60 characters.
+#
+# Note also that the modules in PyDOS contain fewer members
+# than other versions, so we are restricted to using the
+# following:
+#
+# In module os:
+# -------------
+# environ, chdir, getcwd, getpid, umask, fdopen, close,
+# dup, dup2, fstat, lseek, open, read, write, O_RDONLY,
+# O_WRONLY, O_RDWR, O_APPEND, O_CREAT, O_EXCL, O_TRUNC,
+# access, F_OK, R_OK, W_OK, X_OK, chmod, listdir, mkdir,
+# remove, rename, renames, rmdir, stat, unlink, utime,
+# execl, execle, execlp, execlpe, execvp, execvpe, _exit,
+# system.
+#
+# In module os.path:
+# ------------------
+# curdir, pardir, sep, altsep, pathsep, defpath, linesep.
+#
+
+import os
+import sys
+import string
+import serialutil
+
+BAUD_RATES = {
+ 110: "11",
+ 150: "15",
+ 300: "30",
+ 600: "60",
+ 1200: "12",
+ 2400: "24",
+ 4800: "48",
+ 9600: "96",
+ 19200: "19"}
+
+(PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK,
+PARITY_SPACE) = range(5)
+(STOPBITS_ONE, STOPBITS_ONEANDAHALF,
+STOPBITS_TWO) = (1, 1.5, 2)
+FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5,6,7,8)
+(RETURN_ERROR, RETURN_BUSY, RETURN_RETRY, RETURN_READY,
+RETURN_NONE) = ('E', 'B', 'P', 'R', 'N')
+portNotOpenError = ValueError('port not open')
+
+def device(portnum):
+ return 'COM%d' % (portnum+1)
+
+class Serial(serialutil.FileLike):
+ """
+ port: number of device; numbering starts at
+ zero. if everything fails, the user can
+ specify a device string, note that this
+ isn't portable any more
+ baudrate: baud rate
+ bytesize: number of databits
+ parity: enable parity checking
+ stopbits: number of stopbits
+ timeout: set a timeout (None for waiting forever)
+ xonxoff: enable software flow control
+ rtscts: enable RTS/CTS flow control
+ retry: DOS retry mode
+ """
+ def __init__(self,
+ port,
+ baudrate = 9600,
+ bytesize = EIGHTBITS,
+ parity = PARITY_NONE,
+ stopbits = STOPBITS_ONE,
+ timeout = None,
+ xonxoff = 0,
+ rtscts = 0,
+ retry = RETURN_RETRY
+ ):
+
+ if type(port) == type(''):
+ #strings are taken directly
+ self.portstr = port
+ else:
+ #numbers are transformed to a string
+ self.portstr = device(port+1)
+
+ self.baud = BAUD_RATES[baudrate]
+ self.bytesize = str(bytesize)
+
+ if parity == PARITY_NONE:
+ self.parity = 'N'
+ elif parity == PARITY_EVEN:
+ self.parity = 'E'
+ elif parity == PARITY_ODD:
+ self.parity = 'O'
+ elif parity == PARITY_MARK:
+ self.parity = 'M'
+ elif parity == PARITY_SPACE:
+ self.parity = 'S'
+
+ self.stop = str(stopbits)
+ self.retry = retry
+ self.filename = "sermsdos.tmp"
+
+ self._config(self.portstr, self.baud, self.parity,
+ self.bytesize, self.stop, self.retry, self.filename)
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ pass
+
+ def _config(self, port, baud, parity, data, stop, retry,
+ filename):
+ comString = string.join(("MODE ", port, ":"
+ , " BAUD= ", baud, " PARITY= ", parity
+ , " DATA= ", data, " STOP= ", stop, " RETRY= ",
+ retry, " > ", filename ), '')
+ os.system(comString)
+
+ def setBaudrate(self, baudrate):
+ self._config(self.portstr, BAUD_RATES[baudrate],
+ self.parity, self.bytesize, self.stop, self.retry,
+ self.filename)
+
+ def inWaiting(self):
+ """returns the number of bytes waiting to be read"""
+ raise NotImplementedError
+
+ def read(self, num = 1):
+ """Read num bytes from serial port"""
+ handle = os.open(self.portstr,
+ os.O_RDONLY | os.O_BINARY)
+ # print os.fstat(handle)
+ rv = os.read(handle, num)
+ os.close(handle)
+ return rv
+
+ def write(self, s):
+ """Write string to serial port"""
+ handle = os.open(self.portstr,
+ os.O_WRONLY | os.O_BINARY)
+ rv = os.write(handle, s)
+ os.close(handle)
+ return rv
+
+ def flushInput(self):
+ raise NotImplementedError
+
+ def flushOutput(self):
+ raise NotImplementedError
+
+ def sendBreak(self):
+ raise NotImplementedError
+
+ def setRTS(self,level=1):
+ """Set terminal status line"""
+ raise NotImplementedError
+
+ def setDTR(self,level=1):
+ """Set terminal status line"""
+ raise NotImplementedError
+
+ def getCTS(self):
+ """Eead terminal status line"""
+ raise NotImplementedError
+
+ def getDSR(self):
+ """Eead terminal status line"""
+ raise NotImplementedError
+
+ def getRI(self):
+ """Eead terminal status line"""
+ raise NotImplementedError
+
+ def getCD(self):
+ """Eead terminal status line"""
+ raise NotImplementedError
+
+ def __repr__(self):
+ return string.join(( "<Serial>: ", self.portstr
+ , self.baud, self.parity, self.bytesize, self.stop,
+ self.retry , self.filename), ' ')
+
+if __name__ == '__main__':
+ print __name__
+ s = Serial(0)
+ print s
+
+
+
+
+
+
+
+
+
+
+
+
+
|
techniker/cs1504_barcode_tool | b4933fd09fca3b17b3226fde96f2cbf9e425f1d0 | cs1504 tool | diff --git a/cs1504.py b/cs1504.py
new file mode 100644
index 0000000..89acba9
--- /dev/null
+++ b/cs1504.py
@@ -0,0 +1,335 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Bjoern Heller. All rights reserved
+# This code is licensed under GNU/ GPL
+import sys, time, datetime, serial, struct, pprint #global variables
+
+#declare serial com port (may change under macosx
+if sys.platform == 'darwin':
+ serial_port = 'cu.usbserial-00402126'
+elif sys.platform == 'linux':
+ serial_port = '/dev/ttyUSB0'
+elif sys.platform == 'win32':
+ # this port varies from PC to PC
+ serial_port = 'COM8'
+else:
+ serial_port = 0
+
+version = '$Id: cs1504.py,v 2.0 15/03/2009 05:47:42 majid Exp majid $' #version string
+
+print >> sys.stderr, ''
+print >> sys.stderr, 'Hellercom.de Symbol CS1504 Barcode Scanner Software'
+print >> sys.stderr, '---------------------------------------------------'
+print >> sys.stderr, ''
+print >> sys.stderr, 'This software is licensed under GNU/ GPL'
+print >> sys.stderr, ''
+print >> sys.stderr, '[email protected] http://www.hellercom.de'
+print >> sys.stderr, ''
+
+# Revision history:
+# $Log: cs1504.py,v $
+#
+
+########################################################################
+# bar code conventions
+
+def format_isbn(isbn):
+ """Produce an ISBN check digit"""
+ # calculate check digit
+ isbn = isbn.replace('-', '')
+ assert len(isbn) >= 9 and len(isbn) <= 10
+ check = 0
+ for i in range(9):
+ check += (10 - i) * (ord(isbn[i]) - ord('0'))
+ check = -check % 11
+ if check == 10:
+ check = 'X'
+ else:
+ check = str(check)
+ if len(isbn) > 9:
+ assert isbn[-1] == check
+ else:
+ isbn = isbn + check
+ # lookup ISBN specs at http://www.isbn-international.org/en/userman/chapter4.html
+ #
+
+ return isbn
+
+def expand(symbology, code):
+ """Expand certain types of common book codes"""
+ # 10-digit ISBNs are encoded as EAN-13 with the charming fictitious country
+ # code 978, a.k.a. "bookland"
+ # see http://www.adams1.com/pub/russadam/isbn.html
+ if symbology.startswith('EAN-13') and code.startswith('978'):
+ symbology = 'ISBN'
+ code = format_isbn(code[3:12])
+ return symbology, code
+
+########################################################################
+# the Symbol CS 1504 protocol (ref. to PDF file) got it from symbol directly
+#also some setup commands supported but not yet implemented (such as led settings an
+#stay awake commands
+
+symbologies = {
+ 0x16: 'Bookland',
+ 0x0E: 'MSI',
+ 0x02: 'Codabar',
+ 0x11: 'PDF-417',
+ 0x0c: 'Code 11',
+ 0x26: 'Postbar (Canada)',
+ 0x20: 'Code 32',
+ 0x1e: 'Postnet (US)',
+ 0x03: 'Code 128',
+ 0x23: 'Postal (Australia)',
+ 0x01: 'Code 39',
+ 0x22: 'Postal (Japan)',
+ 0x13: 'Code 39 Full ASCII',
+ 0x27: 'Postal (UK)',
+ 0x07: 'Code 93',
+ 0x1c: 'QR code',
+ 0x1d: 'Composite',
+ 0x31: 'RSS limited',
+ 0x17: 'Coupon',
+ 0x30: 'RSS-14',
+ 0x04: 'D25',
+ 0x32: 'RSS Expanded',
+ 0x1b: 'Data Matrix',
+ 0x24: 'Signature',
+ 0x0f: 'EAN-128',
+ 0x15: 'Trioptic Code 39',
+ 0x0b: 'EAN-13',
+ 0x08: 'UPCA',
+ 0x4b: 'EAN-13+2',
+ 0x48: 'UPCA+2',
+ 0x8b: 'EAN-13+5',
+ 0x88: 'UPCA+5',
+ 0x0a: 'EAN-8',
+ 0x09: 'UPCE',
+ 0x4a: 'EAN-8+2',
+ 0x49: 'UPCE+2',
+ 0x8a: 'EAN-8+5',
+ 0x89: 'UPCE+5',
+ 0x05: 'IATA',
+ 0x10: 'UPCE1',
+ 0x19: 'ISBT-128',
+ 0x50: 'UPCE1+2',
+ 0x21: 'ISBT-128 concatenated',
+ 0x90: 'UPCE1+5',
+ 0x06: 'ITF',
+ 0x28: 'Macro PDF'
+ }
+MAX_RESP = 6144
+
+class CS1504: #comm code
+
+ def __init__(self, port='/dev/cu.usbserial'):
+ attempts = 0
+ connected = False
+ while not connected:
+ try:
+ attempts += 1
+ self.ser = serial.Serial(port,
+ baudrate=9600,
+ bytesize=8,
+ parity=serial.PARITY_ODD,
+ stopbits=serial.STOPBITS_ONE,
+ timeout=2)
+ connected = True
+ except serial.SerialException:
+ if attempts <= 3:
+ print >> sys.stderr, 'connection on', port, 'failed, retrying'
+ time.sleep(2.0)
+ else:
+ print >> sys.stderr, 'giving up :( bye'
+ print >> 'try changing the com port settings'
+ print >> 'or check your scanner for battery'
+ raise
+ self.delta = datetime.timedelta(0)
+ self.serial = None
+ self.sw_ver = None
+ self.last_barcodes = []
+
+ def interrogate(self):
+ """Initiate communications with the scanner"""
+ print >> sys.stderr, 'Using serial device:', self.ser.portstr + '... ',
+ count = 0
+ while count < 50:
+ self.send('\x01\x02\x00')
+ try:
+ data = self.recv(23)
+ except AssertionError:
+ time.sleep(1.0)
+ data = None
+ if not data:
+ count += 1
+ time.sleep(0.2)
+ continue
+ print >> sys.stderr, 'connected'
+ break
+ if not data:
+ raise IOError
+ version, status = map(ord, data[2:4])
+ assert status in [0, 22]
+ if status == 22:
+ print >> sys.stderr, '!!!!Scanner Battery is low!!!!'
+ self.serial = data[4:12]
+ self.sw_ver = data[12:20]
+ assert data[20] == '\0'
+ print >> sys.stderr, 'serial#', self.serial.encode('hex')
+ print >> sys.stderr, 'Scanner Software version:', self.sw_ver
+
+ def get_time(self):
+ """Getting the time set in the scanner and calculating the drift..."""
+ print >> sys.stderr, 'reading clock for drift...'
+ self.send('\x0a\x02\x00')
+ self.time_response(True)
+
+ def set_time(self):
+ """clearing scanner time..."""
+ now = list(datetime.datetime.now().timetuple()[0:6])
+ now[0] -= 2000
+ now.reverse()
+ self.send('\x09\x02\x06' + ''.join(map(chr, now)) + '\0')
+ self.time_response()
+ print >> sys.stderr, 'done!'
+
+ def time_response(self, calculate_drift=False):
+ now = datetime.datetime.now()
+ data = self.recv(12)
+ assert data[2] == '\x06'
+ s, mi, h, d, m, y = map(ord, data[3:9])
+ y += 2000
+ ts = datetime.datetime(y, m, d, h, mi, s)
+ # determine the clock drift so we can correct timestamps
+ if calculate_drift:
+ self.delta = now - ts
+ print >> sys.stderr, 'clock drift is:', self.delta
+ if abs(self.delta).seconds > 60:
+ print >> sys.stderr, '!!!!Found big difference between scanner RTC and host clock!!!!',
+ print >> sys.stderr, self.delta
+
+ def get_barcodes(self):
+ """Retrieving bar codes and timestamps from scanner's memory, and
+ correcting clock drift...
+ """
+ print >> sys.stderr, 'reading barcodes...',
+ count = 0
+ # retry up to 5 times to read scanner
+ while count < 5:
+ try:
+ self.send('\x07\x02\x00')
+ data = self.recv()
+ assert data[2:10] == self.serial, data[2:10].encode('hex')
+ break
+ except AssertionError:
+ count += 1
+ time.sleep(0.2)
+ self.last_barcodes = []
+ data = data[10:-3]
+ while data:
+ length = ord(data[0])
+ first, data = data[1:length+1], data[length+1:]
+ symbology = symbologies.get(ord(first[0]), 'UNKNOWN')
+ code = first[1:-4]
+ t = struct.unpack('>I', first[-4:])[0]
+ y = 2000 + int(t & 0x3f)
+ t >>= 6
+ m = int(t & 0x0f)
+ t >>= 4
+ d = int(t & 0x1f)
+ t >>= 5
+ h = int(t & 0x1f)
+ t >>= 5
+ mi = int(t & 0x3f)
+ t >>= 6
+ s = int(t & 0x3f)
+ ts = datetime.datetime(y, m, d, h, mi, s) + self.delta
+ symbology, code = expand(symbology, code)
+ self.last_barcodes.append((symbology, code, ts))
+ print >> sys.stderr, 'done (%d read)' % len(self.last_barcodes)
+ return self.last_barcodes
+
+ def clear_barcodes(self):
+ """Clearing the bar codes in the scanner's memory..."""
+ print >> sys.stderr, 'clearing barcodes...',
+ self.send('\x02\x02\x00')
+ data = self.recv(5)
+ print >> sys.stderr, 'done!'
+
+ def power_down(self):
+ """Shutting the scanner down to save battery life..."""
+ print >> sys.stderr, 'powering down scanner...',
+ self.send('\x05\x02\x00')
+ data = self.recv(5)
+ print >> sys.stderr, 'done!'
+
+ def send(self, cmd):
+ """Sending a command to the scanner..."""
+ self.ser.write(cmd)
+ self.ser.write(crc16(cmd))
+
+ def recv(self, length=MAX_RESP):
+ """Receive a response. For fixed-size responses, specifying it will take
+ less time as we won't need to wait for the timeout to return data
+ """
+ data = self.ser.read(length)
+ if data:
+ assert data.startswith('\x06\x02'), data.encode('hex')
+ assert data[-2:] == crc16(data[:-2])
+ assert data[-3] == '\0'
+ return data
+
+ def close(self):
+ self.ser.close()
+
+ def __del__(self):
+ self.close()
+ del self.ser
+
+########################################################################
+# Modified from:
+# http://news.hping.org/comp.lang.python.archive/18112.html
+# to use the algorithm as specified by Symbol
+# original crc16.py by Bryan G. Olson, 2005
+# This module is free software and may be used and
+# distributed under the same terms as Python itself.
+import array
+def crc16(string, value=0):
+ """CRC function using Symbol's specified algorithm
+ """
+ value = 0xffff
+ for ch in string:
+ value = table[ord(ch) ^ (value & 0xff)] ^ (value >> 8)
+ #return value
+ return struct.pack('>H', ~value) #here i get an error ->get it fixed
+
+# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
+# top bit implicit, reflected
+poly = 0xa001
+table = array.array('H')
+for byte in range(256):
+ crc = 0
+ for bit in range(8):
+ if (byte ^ crc) & 1:
+ crc = (crc >> 1) ^ poly
+ else:
+ crc >>= 1
+ byte >>= 1
+ table.append(crc)
+
+assert crc16('\x01\x02\x00') == '\x9f\xde', \
+ map(hex, map(ord, crc16('\x01\x02\x00')))
+
+if __name__ == '__main__':
+ scanner = CS1504(serial_port)
+ scanner.interrogate()
+ scanner.get_time()
+ scanner.set_time()
+ barcodes = scanner.get_barcodes()
+ for symbology, code, timestamp in barcodes:
+ print '%s,%s,%s' % (symbology, code, str(timestamp).split('.')[0])
+ if barcodes:
+ scanner.clear_barcodes()
+ scanner.power_down()
+ print >> sys.stderr, 'good bye!'
+ print >> sys.stderr, ''
+ print >> sys.stderr, '----------------'
|
liangwenke/jarray | ffd6b43ccadad45a911f61b8902aa19ee78e2a63 | cleanup document | diff --git a/README.rdoc b/README.rdoc
index 5a8bcbc..304bbcb 100644
--- a/README.rdoc
+++ b/README.rdoc
@@ -1,22 +1,21 @@
== jQueryæä»¶jarrayä¸»è¦æ¯æ©å±JavaScriptæ°ç»çæ¹æ³
== Usage
两个æ°ç»åå¹¶,å 餿°ç»ä¸éå¤å
ç´
-
- jQuery.uniq([1,2]) // return [1,2]
- jQuery.uniq([1,2], []) // return [1,2]
- jQuery.uniq([1,2], [2]) // return [1,2]
- jQuery.uniq([1,2], [2,3]) // return [1,2,3]
+ jQuery.uniq([1,2]) // return [1,2]
+ jQuery.uniq([1,2], []) // return [1,2]
+ jQuery.uniq([1,2], [2]) // return [1,2]
+ jQuery.uniq([1,2], [2,3]) // return [1,2,3]
两个æ°ç»ç¸å
- jQuery.sub([1,2]) // return [1]
- jQuery.sub([1,2], []) // return [1]
- jQuery.sub([1,2], [2]) // return [1]
- jQuery.sub([1,2], [2,3]) // return [1]
+ jQuery.sub([1,2]) // return [1]
+ jQuery.sub([1,2], []) // return [1]
+ jQuery.sub([1,2], [2]) // return [1]
+ jQuery.sub([1,2], [2,3]) // return [1]
== Note
Copyright (c) 2010 [email protected], released under the MIT license
|
zachbonham/Environment.PowerShell | 97fca8b8b38cdbd60cddbb299ceb5a87c9584d58 | Added README | diff --git a/README b/README
new file mode 100644
index 0000000..247acf7
--- /dev/null
+++ b/README
@@ -0,0 +1,21 @@
+In our development environment, we provision all of our servers with a configuration value in the machine.config which applications can use to determine what configuration value to use.
+
+This is highly specific to our development environment and not likely to have value elsewhere other than a rough example of a PowerShell Module.
+
+There are a couple of different ways to deploy Environment.PowerShell.dll module:
+
+- copy to %PSModulePath%\Environment.PowerShell\Environment.PowerShell.dll
+- copy to some other known location
+
+When running a PowerShell session, it might be necessary to import the module into the session. To do this execute:
+
+If module was copied to %PSModulePath%\Environment.PowerShell
+
+import-module Environment.PowerShell
+
+Otherwise provide the path to the location of the Environment.PowerShell.dll. For example, if you copied to C:\modules:
+
+import-module c:\modules\Environment.PowerShell
+
+You must be an administrator to run this as Environment.PowerShell is writing appSettings values to the available machine.configs.
+
|
dancrew32/AWESOME-JS | 19069f3318b6f58b7b35e52f8567e34232129350 | canvas setup methods | diff --git a/awesome.js b/awesome.js
index 09c84f2..b73e3c9 100644
--- a/awesome.js
+++ b/awesome.js
@@ -187,814 +187,822 @@ var AWESOME = (function (WIN, DOC) {
'r': '\r',
't': '\t'
};
function unescapeOne(_, ch, hex) {
return ch ? escapes[ch] : String.fromCharCode(parseInt(hex, 16));
}
var EMPTY_STRING = '';
var SLASH = '\\';
var firstTokenCtors = { '{': Object, '[': Array };
var hop = Object.hasOwnProperty;
var toks = str.match(jsonToken);
var tok = toks[0];
var topLevelPrimitive = false;
if ('{' === tok) {
result = {};
} else if ('[' === tok) {
result = [];
} else {
result = [];
topLevelPrimitive = true;
}
var key;
var stack = [result];
for (var i = 1 - topLevelPrimitive, n = toks.length; i < n; ++i) {
tok = toks[i];
var cont;
switch (tok.charCodeAt(0)) {
case 0x22: // '"'
tok = tok.substring(1, tok.length - 1);
if (tok.indexOf(SLASH) !== -1) {
tok = tok.replace(escapeSequence, unescapeOne);
}
cont = stack[0];
if (!key) {
if (cont instanceof Array) {
key = cont.length;
} else {
key = tok || EMPTY_STRING // Use as key for next value seen.
break;
}
}
cont[key] = tok;
key = void 0;
break;
case 0x5b: // '['
cont = stack[0];
stack.unshift(cont[key || cont.length] = []);
key = void 0;
break;
case 0x5d: // ']'
stack.shift();
break;
case 0x66: // 'f'
cont = stack[0];
cont[key || cont.length] = false;
key = void 0;
break;
case 0x6e: // 'n'
cont = stack[0];
cont[key || cont.length] = null;
key = void 0;
break;
case 0x74: // 't'
cont = stack[0];
cont[key || cont.length] = true;
key = void 0;
break;
case 0x7b: // '{'
cont = stack[0];
stack.unshift(cont[key || cont.length] = {});
key = void 0;
break;
case 0x7d: // '}'
stack.shift();
break;
default: // sign or digit
cont = stack[0];
cont[key || cont.length] = +(tok);
key = void 0;
break;
}
}
if (topLevelPrimitive) {
if (stack.length !== 1) { throw new Error(); }
result = result[0];
} else {
if (stack.length) { throw new Error(); }
}
break;
}
return result;
}
// PUBLIC
return {
ready: function (fn, ctx) {
var contentLoaded = 'DOMContentLoaded';
var ready;
var timer;
var onStateChange = function (e) {
// Mozilla & Opera
if (e && e.type === contentLoaded) {
fireDOMReady();
// Legacy
} else if (e && e.type === 'load') {
fireDOMReady();
// Safari & IE
} else if (DOC.readyState) {
if ((RXP.ready).test(DOC.readyState)) {
fireDOMReady();
// IE
} else if (!!DOCEL.doScroll) {
try {
ready || DOCEL.doScroll('left');
} catch (ex) {
return;
}
fireDOMReady();
}
}
};
var fireDOMReady = function () {
if (!ready) {
ready = true;
// onload function in given context or window object
fn.call(ctx || WIN);
// Clean up after the DOM is ready
if (CANATTACH)
DOC.removeEventListener(contentLoaded, onStateChange, false);
DOC.onreadystatechange = null;
WIN.onload = null;
clearInterval(timer);
timer = null;
}
};
// Mozilla & Opera
if (CANATTACH) DOC.addEventListener(contentLoaded, onStateChange, false);
// IE
DOC.onreadystatechange = onStateChange;
// Safari & IE
timer = setInterval(onStateChange, 5);
// Legacy
WIN.onload = onStateChange;
},
log: function (data, type) {
if (typeof console === 'undefined') return;
type = type || 'log'
if (isUndefined(console[type])) return;
console[type](data);
},
noop: noop,
cancelEvent: function (event) {
event = event || WIN.event;
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
},
cancelPropagation: function (event) {
event = event || WIN.event;
if (event.stopPropagation) {
event.stopPropagation();
} else {
event.cancelBubble = true;
}
},
bind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false; // bubble
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].addEventListener(type, handler, capture);
} else if (obj[i].attachEvent) {
obj[i].attachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = handler;
}
}
},
unbind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false;
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].removeEventListener(type, handler, capture);
} else if (obj[i].detachEvent) {
obj[i].detachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = null;
}
}
},
fire: function(obj, ev, capture, cancelable) {
var evt;
if (DOC.createEventObject) { // ie
evt = DOC.createEventObject();
return obj.fireEvent('on'+ ev, evt);
}
capture = capture || false;
cancelable = cancelable || true;
evt = DOC.createEvent('HTMLEvents');
evt.initEvent(ev, capture, cancelable);
return !obj.dispatchEvent(evt);
},
hover: function (obj, over, out, capture) {
if (isUndefined(obj)) {return;}
var $this = this;
out = out || null;
$this.bind(obj, 'mouseover', over, capture);
if (out) $this.bind(obj, 'mouseout', out, capture);
},
toArray: function(obj) {
if (!isArray(obj)) obj = [obj];
return obj;
},
isObject: isObject,
isArray: isArray,
isString: isString,
isUndefined: isUndefined,
isNull: isNull,
isNullOrUndefined: isNullOrUndefined,
hasClass: function (el, cls) {
var re = el.className.split(' ');
if (isUndefined(re)) { return false; }
return -1 !== re.indexOf(cls);
},
addClass: function (el, cls) {
if (!this.hasClass(el, cls)) el.className += ' '+ cls;
},
removeClass: function (el, cls) {
if (!this.hasClass(el, cls)) return;
var re = el.className.split(' ');
if (isUndefined(re)) return;
re.splice(re.indexOf(cls), 1);
var i = re.length;
el.className = ''; // empty
while (i--) { // reload
el.className += re[i] +' ';
}
},
getId: function (id) {
return DOC.getElementById(id) || false;
},
getTag: function (tag, context) {
context = context || DOC;
tag = tag || '*';
return context.getElementsByTagName(tag);
},
getClass: function (searchClass, context, tag) {
var classElements = [];
var els = this.getTag(tag, context);
var elsLen = els.length;
var pattern = new RegExp('^|\\s' + searchClass + '\\s|$');
for (var i = 0, j = 0; i < elsLen; ++i) {
if (pattern.test(els[i].className)) {
classElements[j] = els[i];
j++;
}
}
return classElements;
},
is: function(el, type) {
if (isUndefined(type)) return el.nodeName;
return el.nodeName === type.toUpperCase();
},
toCamelCase: function (string) {
var strs = string.split('-');
if (strs.length === 1) return strs[0];
var ccstr = string.indexOf('-') === 0
? strs[0].charAt(0).toUpperCase() + strs[0].substring(1)
: strs[0];
for (var i = 1, len = strs.length; i < len; ++i) {
var s = strs[i];
ccstr += s.charAt(0).toUpperCase() + s.substring(1);
}
return ccstr;
},
style: function (el, prop, newVal) {
if (!isUndefined(el))
if (isUndefined(prop)) {
return el.currentStyle || getComputedStyle(el, null);
} else {
prop = this.toCamelCase(prop);
newVal = newVal || null;
if (newVal) {
if (prop === 'opacity') {
el.style.filter = "alpha(opacity=" + newVal * 100 + ")";
el.style.opacity = newVal;
} else {
prop = this.toCamelCase(prop);
el.style[prop] = newVal;
}
} else {
var view = DOC.defaultView;
if (view && view.getComputedStyle) {
return view.getComputedStyle(el, '')[prop] || null;
} else {
if (prop === 'opacity') {
if (el['filters'].length <= 0) {
el.style.filter = 'alpha(opacity = 100)';
}
var opacity = el['filters']('alpha').opacity;
return isNaN(opacity) ? 1 : (opacity ? opacity / 100 : 0);
}
return el.currentStyle[prop] || null;
}
}
}
},
hide: function(el) {
},
getPosition: function(obj) {
if (!obj) return;
var curLeft = 0;
var curTop = 0;
do {
curLeft += obj.offsetLeft;
curTop += obj.offsetTop;
} while (obj = obj.offsetParent);
return {
top: curTop,
left: curLeft
};
},
getMousePosition: function(event, relativeTo) {
var x = event.pageX;
var y = event.pageY;
if (isNull(x) && !isNull(event.clientX)) {
var xScroll = (DOCEL && DOCEL.scrollLeft || BODY && BODY.scrollLeft || 0);
var xClient = (DOCEL && DOCEL.clientLeft || BODY && BODY.clientLeft || 0);
var yScroll = (DOCEL && DOCEL.scrollTop || BODY && BODY.scrollTop || 0);
var yClient = (DOCEL && DOCEL.clientTop || BODY && BODY.clientTop || 0);
x = event.clientX + xScroll - xClient;
y = event.clientY + yScroll - yClient;
}
if (!isNullOrUndefined(relativeTo)) {
var tar = (typeof relativeTo === 'object') ? relativeTo : event.target;
var tarPos = this.getPosition(tar);
x = x - tarPos.left;
y = y - tarPos.top;
}
return {
x: x,
y: y
};
},
getScrollPosition: function() {
if (!isUndefined(WIN.pageYOffset)) {
return WIN.pageYOffset;
}
return DOCEL.scrollTop;
},
docHeight: function () {
return Math.max(
Math.max(BODY.scrollHeight, DOCEL.scrollHeight),
Math.max(BODY.offsetHeight, DOCEL.offsetHeight),
Math.max(BODY.clientHeight, DOCEL.clientHeight)
);
},
docWidth: function () {
return Math.max(BODY.clientWidth, DOCEL.clientWidth);
},
viewportHeight: function () {
if (!isUndefined(WIN.innerHeight)) {
return WIN.innerHeight;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientHeight)
&& DOCEL.clientHeight) { //ie6
return DOCEL.clientHeight;
}
return BODY.clientHeight;
},
viewportWidth: function () {
if (!isUndefined(WIN.innerWidth)) {
return WIN.innerWidth;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientWidth)
&& DOCEL.clientWidth) { //ie6
return DOCEL.clientWidth;
}
return BODY.clientWidth;
},
attr: function (ele, attr, newVal) {
newVal = newVal || null;
if (newVal) {
ele.setAttribute(attr, newVal);
} else {
var attrs = ele.attributes,
attrsLen = attrs.length,
result = ele.getAttribute(attr) || ele[attr] || null;
if (!result) {
while (attrsLen--) {
if (attr[attrsLen].nodeName === attr)
result = attr[i].nodeValue;
}
}
return result;
}
},
template: function(template, obj){
var cache = {};
var strCache = template;
var matches = 0;
template.replace(RXP.template, function(tmpl, val) { // #{oKey}
cache[tmpl] = val;
});
for (var key in cache) {
strCache = strCache.replace(new RegExp(key, 'g'), obj[cache[key]]);
}
return strCache;
},
html: function(obj, str, coerce, coercePar) {
coerse = coerce || false;
if (coerce) {
var temp = obj.ownerDocument.createElement('DIV');
temp.innerHTML = '<'+ coercePar +'>'+ str +'</'+ coercePar +'>';
this.swap(temp.firstChild.firstChild, obj);
} else {
obj.innerHTML = str;
}
},
encodeHTML: function (str) {
return str.replace(RXP.amp, '&')
.replace(RXP.lt, '<')
.replace(RXP.gt, '>')
.replace(RXP.quote, '"')
.replace(RXP.apos, ''');
},
stripHTML: function (str) {
return str.replace(/<.*?>/g,'');
},
text: function (obj, txt) {
if (isUndefined(obj)) return;
if (txt) {
if (!isUndefined(obj.innerText)) {
obj.innerText = txt;
}
obj.textContent = txt;
return;
}
return obj.innerText || obj.textContent || obj.text;
},
plural: function(count, singular, plural) {
return count === 1 ? singular : plural;
},
trim: function (str) {
return str.replace(/^\s+|\s+$/g);
},
prepend: function (newNode, node) {
node.insertBefore(this.toNode(newNode), node.childNodes[0]);
},
append: function (newNode, node) {
node.appendChild(this.toNode(newNode));
},
before: function (newNode, node) {
node.parentNode.insertBefore(this.toNode(newNode), node);
},
after: function (newNode, node) {
node.parentNode.insertBefore(this.toNode(newNode), node.nextSibling);
},
swap: function (a, b) {
a.parentNode.replaceChild(b, a);
},
remove: function (ele, recursive) {
if (!ele) return false;
recursive = recursive || true;
ele = this.toArray(ele);
var i = ele.length;
while (i--) {
if (!isUndefined(ele[i].parentNode)) {
if (recursive) {
this.destroy(ele[i]);
continue;
}
ele[i].parentNode.removeChild(ele[i]);
}
}
},
destroy: function(el) {
if (isUndefined(el)) return;
var trash = this.create('DIV');
trash.appendChild(el);
trash.innerHTML = '';
},
toNode: function(text) {
if (!isString(text)) return text;
return this.frag(text);
},
create: function (tag) {
if (tag.charAt(0) === '<') return this.frag(tag);
return DOC.createElement(tag.toUpperCase());
},
frag: function(str) {
var frag = DOC.createDocumentFragment();
var temp = this.create('DIV');
temp.innerHTML = str;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
},
+ canvas: function(el) {
+ if (!el.getContext) {
+ this.addClass('no-canvas');
+ return false;
+ } else {
+ return el.getContext('2d');
+ }
+ },
// TODO: Execution Queue
// Cookies
createCookie: function (name, value, days, domain) {
var expires = '';
var cookie;
domain = domain || WIN.location.host;
if (days) {
var date = new Date();
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
expires = '; expires=' + date.toGMTString();
}
cookie = name + '=' + value + expires + ';';
if (domain) {
cookie += ' domain=.'+ domain +' ;';
}
if (path) {
cookie += 'path='+ path;
}
DOC.cookie = cookie;
},
eraseCookie: function (name) {
this.createCookie(name, '', -1);
},
readCookie: function (c_name) {
if (DOC.cookie.length) {
var c_start = DOC.cookie.indexOf(c_name + "=");
if (c_start !== -1) {
c_start = c_start + c_name.length + 1;
var c_end = DOC.cookie.indexOf(";", c_start);
if (c_end === -1) {
c_end = DOC.cookie.length;
}
return unescape(DOC.cookie.substring(c_start, c_end));
}
}
return null;
},
// Math
getMax: function (array) {
var m = Math;
return m.max.apply(m, array);
},
getMin: function (array) {
var m = Math;
return m.min.apply(m, array);
},
getRandom: function(min, max) {
var m = Math;
- if (min) {
+ if (!isUndefined(min)) {
return m.floor(m.random() * (max - min + 1)) + min;
} else {
return m.round(m.random()); // 1 or 0
}
},
inArray: function(obj, arr) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
return true;
}
}
return false;
},
isDescendant: function(p, c) {
var node = c.parentNode;
while (!isNull(node)) {
if (node === p) {
return true;
}
node = node.parentNode;
}
return false;
},
sort: function(options) {
options = this.setDefaults({
arr: [],
type: 'alphabetical',
order: 'desc',
property: null,
method: null
}, options);
var $this = this;
var method;
switch(options.type) {
case 'alphabetical':
method = function(a, b) {
var A = a.toLowerCase();
var B = b.toLowerCase();
if (options.order === 'asc') {
if (A < B) { return -1; }
else if (A > B) { return 1; }
else { return 0; }
} else {
if (A > B) { return -1; }
else if (A < B) { return 1; }
else { return 0; }
}
};
break;
case 'numerical':
if (options.order === 'asc') {
method = function(a, b) { return a - b; };
} else {
method = function(a, b) { return b - a; };
}
break;
case 'random':
method = function() {
return Math.round(Math.random()) - 0.5;
};
break;
}
return options.arr.sort(method);
},
animate: function (el, options) {
var $this = this;
options = this.setDefaults({
property: 'width',
from: $this.style(el, options.property),
to: '0px',
duration: 200,
easing: function(pos) {
return (-Math.cos(pos * Math.PI) / 2) + 0.5;
},
callback: noop
}, options);
var fromNum = parseFloat(options.from);
var fromUnit = getUnit(options.from);
var toNum = parseFloat(options.to);
var toUnit = getUnit(options.to) || fromUnit;
var interval;
var start = +new Date();
var finish = start + options.duration;
function interpolate(source, target, pos) {
return (source + (target - source) * pos).toFixed(3);
}
function getUnit(prop){
return prop.toString().replace(/^[\-\d\.]+/,'') || '';
}
interval = setInterval(function() {
var time = +new Date();
var pos = time > finish ? 1 : (time-start) / options.duration;
var interpolation = interpolate(fromNum, toNum, options.easing(pos));
$this.style(el, options.property, interpolation + toUnit);
if (time > finish) {
clearInterval(interval);
options.callback();
}
}, 10);
},
fadeIn: function(el, duration, callback) {
this.fade(el, duration, 1, callback);
},
fadeOut: function(el, duration, callback) {
this.fade(el, duration, 0, callback);
},
fade: function(el, duration, to, callback, from) {
callback = callback || noop;
this.animate(el, {
property: 'opacity',
to: to,
duration: duration,
callback: callback
});
},
// Ajax
getUrlVars: function () {
var vars = [];
var hash;
var hashes = WIN.location.href.slice(WIN.location.href.indexOf('?') + 1).split('&');
var hashlen = hashes.length;
for (var i = 0; i < hashlen; ++i) {
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
},
serialize: function(obj) {
var viableNodes = ['input', 'select', 'textarea'];
var viableNodesLen = viableNodes.length;
var rawChildren = [];
var formChildren = [];
var returnObject = {};
var nodeList = [];
for (var i = 0; i < viableNodesLen; ++i) {
nodeList = obj.getElementsByTagName(viableNodes[i]);
var nodeListLen = nodeList.length;
for (var j = 0; j < nodeListLen; ++j) {
rawChildren.push(nodeList[j]);
}
}
// build list of viable form elements
var rawChildrenLen = rawChildren.length;
for (var k = 0; k < rawChildrenLen; ++k) {
var currentNode = rawChildren[k];
switch(rawChildren[k].nodeName.toLowerCase()) {
case 'input':
switch(currentNode.type) {
case 'text':
case 'hidden':
case 'password':
formChildren.push(currentNode);
break;
case 'radio':
case 'checkbox':
if (currentNode.checked === 'checked') {
formChildren.push(currentNode);
}
break;
}
break;
case 'select':
case 'textarea':
formChildren.push(currentNode);
break;
}
}
//build object of the name-value pairs
var formChildrenLen = formChildren.length;
for (var m = 0; m < formChildrenLen; ++m) {
var currentChild = formChildren[m];
if (!returnObject.hasOwnProperty(currentChild.name)) {
returnObject[currentChild.name] = currentChild.value;
} else {
if (typeof returnObject[currentChild.name] === 'string') {
returnObject[currentChild.name] = [returnObject[currentChild.name], currentChild.value.toString()];
} else {
returnObject[currentChild.name].push(currentChild.value.toString());
}
}
}
return returnObject;
},
formatParams: formatParams,
setDefaults: function(defaults, options) {
if (!options) {
options = defaults;
} else {
for (var index in defaults) {
if (isUndefined(options[index])) {
options[index] = defaults[index];
}
}
}
return options;
},
parse: parse,
addScript: function(url, id) {
var script = this.create('script');
script.type = 'text/javascript';
script.src = url || '#';
script.id = id || 'awesome-script'; // id to remove
this.append(script, this.getTag('head')[0]);
return true;
},
ajax: function(options) {
options = this.setDefaults({
url: null,
data: null, // key:val
dataType: null,
type: 'post',
disguise: false,
requestId: null,
beforeSend: noop,
sendPrepared: noop,
afterSend: noop,
complete: noop,
failure: noop
}, options);
// init
switch (options.type.toUpperCase()) {
case 'POST':
postRequest(options);
break;
case 'JSONP':
this.addScript(options.url, options.requestId || 'awesome-jsonp');
break;
default:
getRequest(options);
}
},
spinner: function(el, speed, slides) {
slides = slides || ['⊕', '⊗'];
var i = slides.length;
var cur = 0;
return setInterval(function() {
cur = cur + 1 === i ? 0 : cur + 1;
el.innerHTML = slides[cur];
}, speed || 100);
}
};
}(window, document));
|
dancrew32/AWESOME-JS | 9472c51df5f1e10835466e44c058c6241939f4d1 | broke the parser.. fixed it.. fixing some tests.. | diff --git a/awesome.js b/awesome.js
index 06d80a8..09c84f2 100644
--- a/awesome.js
+++ b/awesome.js
@@ -1,998 +1,1000 @@
// Awesome ensues
var AWESOME = (function (WIN, DOC) {
// PRIVATE
var BODY = DOC.body;
var DOCEL = DOC.documentElement;
var CANATTACH = isFunction(BODY.addEventListener) && isUndefined(BODY.attachEvent);
var CANCANVAS = null;
var RXP = {
ready: /loaded|complete/,
template: /#{([^}]*)}/g,
amp: /&/g,
lt: /</g,
gt: />/g,
quote: /"/g,
- apos: /'/g,
- number: '(?:-?\\b(?:0|[1-9][0-9]*)(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b)',
- oneChar: '(?:[^\\0-\\x08\\x0a-\\x1f\"\\\\]|\\\\(?:[\"/\\\\bfnrt]|u[0-9A-Fa-f]{4}))',
- jsonEscapeSeq: /\\\\(?:([^u])|u(.{4}))/g
+ apos: /'/g
};
if (!Array.indexOf) {
Array.prototype.indexOf = function(obj) {
for(var i = 0; i < this.length; i++) {
if (this[i] === obj){
return i;
}
}
return -1;
};
}
function noop() {}
// isTest's
function isObject(val) {
return typeof val === 'object';
}
function isArray(val) {
return isObject(val) && !isUndefined(val.length);
}
function isString(val) {
return typeof val === 'string';
}
function isFunction(val) {
return typeof val === 'function';
}
function isUndefined(val) {
return typeof val === 'undefined';
}
function isNull(val) {
return typeof val === 'null';
}
function isNullOrUndefined(val) {
return isNull(val) || isUndefined(val);
}
// ajax
function openRequest(options, method) {
var req = getHttpRequest();
if (isNull(req)) return;
var d = new Date();
var aborted = 'abort';
req.open(method, options.url, true);
if (method === 'POST') {
req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
}
if (!options.disguise) {
req.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
}
req.setRequestHeader('X-Request-Id', d.getTime());
req.onreadystatechange = function(e) {
var data = '';
switch (req.readyState) {
case 0:
options.beforeSend();
break;
case 1:
options.sendPrepared();
break;
case 2:
options.afterSend();
break;
case 4:
-
if (!isNull(options.dataType)) {
try {
data = parse(req.responseText, options.dataType);
} catch (erD) { data = aborted; }
} else {
try {
data = req.responseText;
} catch (erT) { data = aborted; }
}
if (data !== aborted && req.status >= 200 && req.status < 300) {
options.complete(data);
} else if (data !== aborted && req.status === 0) { // file:/// ajax
options.complete(data);
} else {
options.failure(data);
}
break;
}
};
return req;
}
function getRequest(options) {
var req = openRequest(options, 'GET');
req.send('');
return req;
}
function postRequest(options) {
var req = openRequest(options, 'POST');
req.send(formatParams(options.data));
return req;
}
function getHttpRequest() {
+ var MSxml = 'Msxml2.XMLHTTP';
if (typeof XMLHttpRequest !== 'undefined')
return new XMLHttpRequest();
try {
return new ActiveXObject(MSxml +'.6.0');
} catch(e1) {}
try {
return new ActiveXObject(MSxml +'.3.0');
} catch(e2) {}
try {
return new ActiveXObject(MSxml);
} catch(e3) {}
try {
return new ActiveXObject('Microsoft.XMLHTTP');
} catch(e4) {}
}
function formatParams(obj) {
if (isNull(obj)) {return '';}
var q = [];
var encode = encodeURIComponent;
for (var prop in obj) {
if (obj.hasOwnProperty(prop)) {
q.push( encode(prop) +'='+ encode(obj[prop]) );
}
}
return q.join('&');
}
// parse
function parse(str, type) {
if (str === '') return;
type = type || 'json';
var result;
switch (type.toLowerCase()) {
case 'xml':
if (WIN.DOMParser) {
var parser = new DOMParser();
return parser.parseFromString(str, 'text/xml');
} else { // ie
var xmlDoc = new ActiveXObject('Microsoft.XMLDOM');
xmlDoc.async = 'false';
xmlDoc.loadXML(str);
return xmlDoc;
}
break;
case 'json':
- if (JSON.parse) {
+ if (typeof JSON !== 'undefined') {
return JSON.parse(str);
}
- var string = '(?:\"' + RXP.oneChar + '*\")';
+ var number = '(?:-?\\b(?:0|[1-9][0-9]*)(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b)';
+ var oneChar = '(?:[^\\0-\\x08\\x0a-\\x1f\"\\\\]'
+ + '|\\\\(?:[\"/\\\\bfnrt]|u[0-9A-Fa-f]{4}))';
+ var string = '(?:\"' + oneChar + '*\")';
+
var jsonToken = new RegExp(
'(?:false|true|null|[\\{\\}\\[\\]]'
- + '|' + RXP.number
+ + '|' + number
+ '|' + string
+ ')', 'g');
+ var escapeSequence = new RegExp('\\\\(?:([^u])|u(.{4}))', 'g');
var escapes = {
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
};
function unescapeOne(_, ch, hex) {
return ch ? escapes[ch] : String.fromCharCode(parseInt(hex, 16));
}
+ var EMPTY_STRING = '';
+ var SLASH = '\\';
+ var firstTokenCtors = { '{': Object, '[': Array };
+ var hop = Object.hasOwnProperty;
var toks = str.match(jsonToken);
var tok = toks[0];
var topLevelPrimitive = false;
if ('{' === tok) {
result = {};
} else if ('[' === tok) {
result = [];
} else {
result = [];
topLevelPrimitive = true;
}
var key;
var stack = [result];
for (var i = 1 - topLevelPrimitive, n = toks.length; i < n; ++i) {
tok = toks[i];
var cont;
switch (tok.charCodeAt(0)) {
case 0x22: // '"'
tok = tok.substring(1, tok.length - 1);
- if (tok.indexOf('\\') !== -1) {
- tok = tok.replace(RXP.jsonEscapeSeq, unescapeOne);
+ if (tok.indexOf(SLASH) !== -1) {
+ tok = tok.replace(escapeSequence, unescapeOne);
}
cont = stack[0];
if (!key) {
if (cont instanceof Array) {
key = cont.length;
} else {
- key = tok || ''; // Use as key for next value seen.
+ key = tok || EMPTY_STRING // Use as key for next value seen.
break;
}
}
cont[key] = tok;
key = void 0;
break;
case 0x5b: // '['
cont = stack[0];
stack.unshift(cont[key || cont.length] = []);
key = void 0;
break;
case 0x5d: // ']'
stack.shift();
break;
case 0x66: // 'f'
cont = stack[0];
cont[key || cont.length] = false;
key = void 0;
break;
case 0x6e: // 'n'
cont = stack[0];
cont[key || cont.length] = null;
key = void 0;
break;
case 0x74: // 't'
cont = stack[0];
cont[key || cont.length] = true;
key = void 0;
break;
case 0x7b: // '{'
cont = stack[0];
stack.unshift(cont[key || cont.length] = {});
key = void 0;
break;
case 0x7d: // '}'
stack.shift();
break;
default: // sign or digit
cont = stack[0];
cont[key || cont.length] = +(tok);
key = void 0;
break;
}
}
if (topLevelPrimitive) {
if (stack.length !== 1) { throw new Error(); }
result = result[0];
} else {
if (stack.length) { throw new Error(); }
}
break;
}
return result;
}
// PUBLIC
return {
ready: function (fn, ctx) {
var contentLoaded = 'DOMContentLoaded';
var ready;
var timer;
var onStateChange = function (e) {
// Mozilla & Opera
if (e && e.type === contentLoaded) {
fireDOMReady();
// Legacy
} else if (e && e.type === 'load') {
fireDOMReady();
// Safari & IE
} else if (DOC.readyState) {
if ((RXP.ready).test(DOC.readyState)) {
fireDOMReady();
// IE
} else if (!!DOCEL.doScroll) {
try {
ready || DOCEL.doScroll('left');
} catch (ex) {
return;
}
fireDOMReady();
}
}
};
var fireDOMReady = function () {
if (!ready) {
ready = true;
// onload function in given context or window object
fn.call(ctx || WIN);
// Clean up after the DOM is ready
if (CANATTACH)
DOC.removeEventListener(contentLoaded, onStateChange, false);
DOC.onreadystatechange = null;
WIN.onload = null;
clearInterval(timer);
timer = null;
}
};
// Mozilla & Opera
if (CANATTACH) DOC.addEventListener(contentLoaded, onStateChange, false);
// IE
DOC.onreadystatechange = onStateChange;
// Safari & IE
timer = setInterval(onStateChange, 5);
// Legacy
WIN.onload = onStateChange;
},
log: function (data, type) {
if (typeof console === 'undefined') return;
type = type || 'log'
- if (isUndefined(console)) return;
+ if (isUndefined(console[type])) return;
console[type](data);
},
noop: noop,
cancelEvent: function (event) {
event = event || WIN.event;
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
},
cancelPropagation: function (event) {
event = event || WIN.event;
if (event.stopPropagation) {
event.stopPropagation();
} else {
event.cancelBubble = true;
}
},
bind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false; // bubble
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].addEventListener(type, handler, capture);
} else if (obj[i].attachEvent) {
obj[i].attachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = handler;
}
}
},
unbind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false;
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].removeEventListener(type, handler, capture);
} else if (obj[i].detachEvent) {
obj[i].detachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = null;
}
}
},
fire: function(obj, ev, capture, cancelable) {
var evt;
if (DOC.createEventObject) { // ie
evt = DOC.createEventObject();
return obj.fireEvent('on'+ ev, evt);
}
capture = capture || false;
cancelable = cancelable || true;
evt = DOC.createEvent('HTMLEvents');
evt.initEvent(ev, capture, cancelable);
return !obj.dispatchEvent(evt);
},
hover: function (obj, over, out, capture) {
if (isUndefined(obj)) {return;}
var $this = this;
out = out || null;
$this.bind(obj, 'mouseover', over, capture);
if (out) $this.bind(obj, 'mouseout', out, capture);
},
toArray: function(obj) {
if (!isArray(obj)) obj = [obj];
return obj;
},
isObject: isObject,
isArray: isArray,
isString: isString,
isUndefined: isUndefined,
isNull: isNull,
isNullOrUndefined: isNullOrUndefined,
hasClass: function (el, cls) {
var re = el.className.split(' ');
if (isUndefined(re)) { return false; }
return -1 !== re.indexOf(cls);
},
addClass: function (el, cls) {
if (!this.hasClass(el, cls)) el.className += ' '+ cls;
},
removeClass: function (el, cls) {
if (!this.hasClass(el, cls)) return;
var re = el.className.split(' ');
if (isUndefined(re)) return;
re.splice(re.indexOf(cls), 1);
var i = re.length;
el.className = ''; // empty
while (i--) { // reload
el.className += re[i] +' ';
}
},
getId: function (id) {
- return DOC.getElementById(id);
+ return DOC.getElementById(id) || false;
},
getTag: function (tag, context) {
context = context || DOC;
tag = tag || '*';
return context.getElementsByTagName(tag);
},
getClass: function (searchClass, context, tag) {
var classElements = [];
var els = this.getTag(tag, context);
var elsLen = els.length;
var pattern = new RegExp('^|\\s' + searchClass + '\\s|$');
for (var i = 0, j = 0; i < elsLen; ++i) {
if (pattern.test(els[i].className)) {
classElements[j] = els[i];
j++;
}
}
return classElements;
},
is: function(el, type) {
if (isUndefined(type)) return el.nodeName;
return el.nodeName === type.toUpperCase();
},
toCamelCase: function (string) {
var strs = string.split('-');
if (strs.length === 1) return strs[0];
var ccstr = string.indexOf('-') === 0
? strs[0].charAt(0).toUpperCase() + strs[0].substring(1)
: strs[0];
for (var i = 1, len = strs.length; i < len; ++i) {
var s = strs[i];
ccstr += s.charAt(0).toUpperCase() + s.substring(1);
}
return ccstr;
},
style: function (el, prop, newVal) {
if (!isUndefined(el))
if (isUndefined(prop)) {
return el.currentStyle || getComputedStyle(el, null);
} else {
prop = this.toCamelCase(prop);
newVal = newVal || null;
if (newVal) {
if (prop === 'opacity') {
el.style.filter = "alpha(opacity=" + newVal * 100 + ")";
el.style.opacity = newVal;
} else {
prop = this.toCamelCase(prop);
el.style[prop] = newVal;
}
} else {
var view = DOC.defaultView;
if (view && view.getComputedStyle) {
return view.getComputedStyle(el, '')[prop] || null;
} else {
if (prop === 'opacity') {
if (el['filters'].length <= 0) {
el.style.filter = 'alpha(opacity = 100)';
}
var opacity = el['filters']('alpha').opacity;
return isNaN(opacity) ? 1 : (opacity ? opacity / 100 : 0);
}
return el.currentStyle[prop] || null;
}
}
}
},
hide: function(el) {
},
getPosition: function(obj) {
if (!obj) return;
var curLeft = 0;
var curTop = 0;
do {
curLeft += obj.offsetLeft;
curTop += obj.offsetTop;
} while (obj = obj.offsetParent);
return {
top: curTop,
left: curLeft
};
},
getMousePosition: function(event, relativeTo) {
var x = event.pageX;
var y = event.pageY;
if (isNull(x) && !isNull(event.clientX)) {
var xScroll = (DOCEL && DOCEL.scrollLeft || BODY && BODY.scrollLeft || 0);
var xClient = (DOCEL && DOCEL.clientLeft || BODY && BODY.clientLeft || 0);
var yScroll = (DOCEL && DOCEL.scrollTop || BODY && BODY.scrollTop || 0);
var yClient = (DOCEL && DOCEL.clientTop || BODY && BODY.clientTop || 0);
x = event.clientX + xScroll - xClient;
y = event.clientY + yScroll - yClient;
}
if (!isNullOrUndefined(relativeTo)) {
var tar = (typeof relativeTo === 'object') ? relativeTo : event.target;
var tarPos = this.getPosition(tar);
x = x - tarPos.left;
y = y - tarPos.top;
}
return {
x: x,
y: y
};
},
getScrollPosition: function() {
if (!isUndefined(WIN.pageYOffset)) {
return WIN.pageYOffset;
}
return DOCEL.scrollTop;
},
docHeight: function () {
return Math.max(
Math.max(BODY.scrollHeight, DOCEL.scrollHeight),
Math.max(BODY.offsetHeight, DOCEL.offsetHeight),
Math.max(BODY.clientHeight, DOCEL.clientHeight)
);
},
docWidth: function () {
return Math.max(BODY.clientWidth, DOCEL.clientWidth);
},
viewportHeight: function () {
if (!isUndefined(WIN.innerHeight)) {
return WIN.innerHeight;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientHeight)
&& DOCEL.clientHeight) { //ie6
return DOCEL.clientHeight;
}
return BODY.clientHeight;
},
viewportWidth: function () {
if (!isUndefined(WIN.innerWidth)) {
return WIN.innerWidth;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientWidth)
&& DOCEL.clientWidth) { //ie6
return DOCEL.clientWidth;
}
return BODY.clientWidth;
},
attr: function (ele, attr, newVal) {
newVal = newVal || null;
if (newVal) {
ele.setAttribute(attr, newVal);
} else {
var attrs = ele.attributes,
attrsLen = attrs.length,
result = ele.getAttribute(attr) || ele[attr] || null;
if (!result) {
while (attrsLen--) {
if (attr[attrsLen].nodeName === attr)
result = attr[i].nodeValue;
}
}
return result;
}
},
template: function(template, obj){
var cache = {};
var strCache = template;
var matches = 0;
template.replace(RXP.template, function(tmpl, val) { // #{oKey}
cache[tmpl] = val;
});
for (var key in cache) {
strCache = strCache.replace(new RegExp(key, 'g'), obj[cache[key]]);
}
return strCache;
},
html: function(obj, str, coerce, coercePar) {
coerse = coerce || false;
if (coerce) {
var temp = obj.ownerDocument.createElement('DIV');
temp.innerHTML = '<'+ coercePar +'>'+ str +'</'+ coercePar +'>';
this.swap(temp.firstChild.firstChild, obj);
} else {
obj.innerHTML = str;
}
},
encodeHTML: function (str) {
return str.replace(RXP.amp, '&')
.replace(RXP.lt, '<')
.replace(RXP.gt, '>')
.replace(RXP.quote, '"')
.replace(RXP.apos, ''');
},
stripHTML: function (str) {
return str.replace(/<.*?>/g,'');
},
text: function (obj, txt) {
if (isUndefined(obj)) return;
if (txt) {
if (!isUndefined(obj.innerText)) {
obj.innerText = txt;
}
obj.textContent = txt;
return;
}
return obj.innerText || obj.textContent || obj.text;
},
plural: function(count, singular, plural) {
return count === 1 ? singular : plural;
},
trim: function (str) {
return str.replace(/^\s+|\s+$/g);
},
prepend: function (newNode, node) {
node.insertBefore(this.toNode(newNode), node.childNodes[0]);
},
append: function (newNode, node) {
node.appendChild(this.toNode(newNode));
},
before: function (newNode, node) {
- //if (node.parentNode === BODY) {
- //this.prepend(this.toNode(newNode), BODY);
- //return;
- //}
node.parentNode.insertBefore(this.toNode(newNode), node);
},
after: function (newNode, node) {
node.parentNode.insertBefore(this.toNode(newNode), node.nextSibling);
},
swap: function (a, b) {
a.parentNode.replaceChild(b, a);
},
remove: function (ele, recursive) {
if (!ele) return false;
recursive = recursive || true;
ele = this.toArray(ele);
var i = ele.length;
while (i--) {
if (!isUndefined(ele[i].parentNode)) {
if (recursive) {
this.destroy(ele[i]);
continue;
}
ele[i].parentNode.removeChild(ele[i]);
}
}
},
destroy: function(el) {
if (isUndefined(el)) return;
var trash = this.create('DIV');
trash.appendChild(el);
trash.innerHTML = '';
},
toNode: function(text) {
if (!isString(text)) return text;
- return this.create(text);
+ return this.frag(text);
},
create: function (tag) {
+ if (tag.charAt(0) === '<') return this.frag(tag);
return DOC.createElement(tag.toUpperCase());
},
frag: function(str) {
var frag = DOC.createDocumentFragment();
var temp = this.create('DIV');
temp.innerHTML = str;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
},
// TODO: Execution Queue
// Cookies
createCookie: function (name, value, days, domain) {
var expires = '';
var cookie;
domain = domain || WIN.location.host;
if (days) {
var date = new Date();
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
expires = '; expires=' + date.toGMTString();
}
cookie = name + '=' + value + expires + ';';
if (domain) {
cookie += ' domain=.'+ domain +' ;';
}
if (path) {
cookie += 'path='+ path;
}
DOC.cookie = cookie;
},
eraseCookie: function (name) {
this.createCookie(name, '', -1);
},
readCookie: function (c_name) {
if (DOC.cookie.length) {
var c_start = DOC.cookie.indexOf(c_name + "=");
if (c_start !== -1) {
c_start = c_start + c_name.length + 1;
var c_end = DOC.cookie.indexOf(";", c_start);
if (c_end === -1) {
c_end = DOC.cookie.length;
}
return unescape(DOC.cookie.substring(c_start, c_end));
}
}
return null;
},
// Math
getMax: function (array) {
var m = Math;
return m.max.apply(m, array);
},
getMin: function (array) {
var m = Math;
return m.min.apply(m, array);
},
getRandom: function(min, max) {
var m = Math;
if (min) {
return m.floor(m.random() * (max - min + 1)) + min;
} else {
return m.round(m.random()); // 1 or 0
}
},
inArray: function(obj, arr) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
return true;
}
}
return false;
},
isDescendant: function(p, c) {
var node = c.parentNode;
while (!isNull(node)) {
if (node === p) {
return true;
}
node = node.parentNode;
}
return false;
},
sort: function(options) {
options = this.setDefaults({
arr: [],
type: 'alphabetical',
order: 'desc',
property: null,
method: null
}, options);
var $this = this;
var method;
switch(options.type) {
case 'alphabetical':
method = function(a, b) {
var A = a.toLowerCase();
var B = b.toLowerCase();
if (options.order === 'asc') {
if (A < B) { return -1; }
else if (A > B) { return 1; }
else { return 0; }
} else {
if (A > B) { return -1; }
else if (A < B) { return 1; }
else { return 0; }
}
};
break;
case 'numerical':
if (options.order === 'asc') {
method = function(a, b) { return a - b; };
} else {
method = function(a, b) { return b - a; };
}
break;
case 'random':
method = function() {
return Math.round(Math.random()) - 0.5;
};
break;
}
return options.arr.sort(method);
},
animate: function (el, options) {
var $this = this;
options = this.setDefaults({
property: 'width',
from: $this.style(el, options.property),
to: '0px',
duration: 200,
easing: function(pos) {
return (-Math.cos(pos * Math.PI) / 2) + 0.5;
},
callback: noop
}, options);
var fromNum = parseFloat(options.from);
var fromUnit = getUnit(options.from);
var toNum = parseFloat(options.to);
var toUnit = getUnit(options.to) || fromUnit;
var interval;
var start = +new Date();
var finish = start + options.duration;
function interpolate(source, target, pos) {
return (source + (target - source) * pos).toFixed(3);
}
function getUnit(prop){
return prop.toString().replace(/^[\-\d\.]+/,'') || '';
}
interval = setInterval(function() {
var time = +new Date();
var pos = time > finish ? 1 : (time-start) / options.duration;
var interpolation = interpolate(fromNum, toNum, options.easing(pos));
$this.style(el, options.property, interpolation + toUnit);
if (time > finish) {
clearInterval(interval);
options.callback();
}
}, 10);
},
fadeIn: function(el, duration, callback) {
this.fade(el, duration, 1, callback);
},
fadeOut: function(el, duration, callback) {
this.fade(el, duration, 0, callback);
},
fade: function(el, duration, to, callback, from) {
callback = callback || noop;
this.animate(el, {
property: 'opacity',
to: to,
duration: duration,
callback: callback
});
},
// Ajax
getUrlVars: function () {
var vars = [];
var hash;
var hashes = WIN.location.href.slice(WIN.location.href.indexOf('?') + 1).split('&');
var hashlen = hashes.length;
for (var i = 0; i < hashlen; ++i) {
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
},
serialize: function(obj) {
var viableNodes = ['input', 'select', 'textarea'];
var viableNodesLen = viableNodes.length;
var rawChildren = [];
var formChildren = [];
var returnObject = {};
var nodeList = [];
for (var i = 0; i < viableNodesLen; ++i) {
nodeList = obj.getElementsByTagName(viableNodes[i]);
var nodeListLen = nodeList.length;
for (var j = 0; j < nodeListLen; ++j) {
rawChildren.push(nodeList[j]);
}
}
// build list of viable form elements
var rawChildrenLen = rawChildren.length;
for (var k = 0; k < rawChildrenLen; ++k) {
var currentNode = rawChildren[k];
switch(rawChildren[k].nodeName.toLowerCase()) {
case 'input':
switch(currentNode.type) {
case 'text':
case 'hidden':
case 'password':
formChildren.push(currentNode);
break;
case 'radio':
case 'checkbox':
if (currentNode.checked === 'checked') {
formChildren.push(currentNode);
}
break;
}
break;
case 'select':
case 'textarea':
formChildren.push(currentNode);
break;
}
}
//build object of the name-value pairs
var formChildrenLen = formChildren.length;
for (var m = 0; m < formChildrenLen; ++m) {
var currentChild = formChildren[m];
if (!returnObject.hasOwnProperty(currentChild.name)) {
returnObject[currentChild.name] = currentChild.value;
} else {
if (typeof returnObject[currentChild.name] === 'string') {
returnObject[currentChild.name] = [returnObject[currentChild.name], currentChild.value.toString()];
} else {
returnObject[currentChild.name].push(currentChild.value.toString());
}
}
}
return returnObject;
},
formatParams: formatParams,
setDefaults: function(defaults, options) {
if (!options) {
options = defaults;
} else {
for (var index in defaults) {
if (isUndefined(options[index])) {
options[index] = defaults[index];
}
}
}
return options;
},
parse: parse,
addScript: function(url, id) {
var script = this.create('script');
script.type = 'text/javascript';
script.src = url || '#';
script.id = id || 'awesome-script'; // id to remove
this.append(script, this.getTag('head')[0]);
return true;
},
ajax: function(options) {
options = this.setDefaults({
url: null,
data: null, // key:val
dataType: null,
type: 'post',
disguise: false,
requestId: null,
beforeSend: noop,
sendPrepared: noop,
afterSend: noop,
complete: noop,
failure: noop
}, options);
- var MSxml = 'Msxml2.XMLHTTP';
// init
switch (options.type.toUpperCase()) {
case 'POST':
postRequest(options);
break;
case 'JSONP':
this.addScript(options.url, options.requestId || 'awesome-jsonp');
break;
default:
getRequest(options);
}
},
spinner: function(el, speed, slides) {
slides = slides || ['⊕', '⊗'];
var i = slides.length;
var cur = 0;
return setInterval(function() {
cur = cur + 1 === i ? 0 : cur + 1;
el.innerHTML = slides[cur];
}, speed || 100);
}
};
}(window, document));
diff --git a/test/awesome.test.js b/test/awesome.test.js
index 7554754..d80527d 100644
--- a/test/awesome.test.js
+++ b/test/awesome.test.js
@@ -1,234 +1,233 @@
(function($) {
$.log('ready', 'time');
unitTest = {
pass : true,
flag : []
};
$.ready(function() {
$.log('ready', 'timeEnd');
$.log('a', 'time');
var info = $.create('DIV');
info.id = 'info';
$.attr(info, 'rel', 'yee');
$.append(info, document.body);
var pass = function(method, test) {
test = $.isUndefined(test) ? true : test;
if (test) {
info.innerHTML += method +' works.<BR>';
} else {
info.innerHTML += '<b>'+ method +' FAILED</b>.<BR>';
}
};
pass('ready');
pass('create');
pass('attr', $.attr($.getId('info'), 'rel') === 'yee');
$.log('Safe Log Works.');
pass('log');
var a = $.create('DIV');
- var b = $.create('DIV');
var c = $.create('DIV');
- a.id = 'a';
- b.id = 'b';
- c.id = 'c';
+ $.attr(a,'id','a');
+ $.attr(c,'id','c');
$.before(a, info);
pass('before');
- $.prepend(b, info);
- pass('prepend');
pass('append'); // see beginning
$.after(c, info);
pass('after');
var bindTest = {
prop : false,
pass : false
};
function bindMethod() {
bindTest.pass = true;
};
$.bind(a, 'click', bindMethod);
$.fire(a, 'click');
if (bindTest.pass === true) {
pass('bind');
pass('fire');
pass('hover');// it's just using bind.. i'll pass it
bindTest.pass = false;// reset
$.unbind(a, 'click', bindMethod);
$.fire(a, 'click');
pass('unbind', (bindTest.pass === false));
}
var linkTest = $.create('A');
linkTest.id = 'link';
linkTest.href = 'http://www.google.com';
- $.append(linkTest, $.getId('b'));
+ $.append(linkTest, $.getId('test_form'));
var propCanceled = true;
var propCanceled = true;
var linkPropCancelTest = function(e) {
propCanceled = false;
};
var linkCancelTest = function(e) {
$.cancelEvent(e);
$.cancelPropagation(e);
};
$.bind(linkTest, 'click', function(e) {
linkCancelTest(e);
});
$.bind(document.body, 'click', function(e) {
linkPropCancelTest(e);
});
$.fire(linkTest, 'click');
setTimeout(function() {
pass('cancelEvent');
if (propCanceled === true) {
pass('cancelPropagation');
}
}, 500);
if (typeof $.getId('a') === 'object') {
pass('getId');
}
if (typeof $.getTag('div')[0] === 'object') {
pass('getTag');
}
$.addClass($.getId('a'), 'test');
if (typeof $.getClass('test', document.body, 'DIV')[0] === 'object') {
pass('getClass');
}
if ($.hasClass($.getId('a'), 'fuuuuu')) {
pass('hasClass', false);
}
if ($.hasClass($.getId('a'), 'test')) {
pass('hasClass');
$.removeClass($.getId('a'), 'test');
if (!$.hasClass($.getId('a'), 'test')) {
pass('removeClass');
$.addClass($.getId('a'), 'testing');
if ($.hasClass($.getId('a'), 'testing')) {
pass('addClass');
}
}
}
- $.remove($.getId('b'));
- if ($.getId('b') === null) {
- pass('remove');
- }
-
var text = info.innerHTML.split('<BR>');
if (text.length === 1) {
text = info.innerHTML.split('<br>');
}
text.pop(); // clear end empty node
info.innerHTML = '';
var arr = $.sort({
arr: text
});
var arrLen = arr.length;
while (arrLen--) {
info.innerHTML += arr[arrLen] +'<BR>';
}
$.style(info, 'display', 'block');
if ($.style(info, 'display') === 'block') {
pass('style');
pass('toCamelCase');
}
if ($.docHeight() > 0) {
pass('docHeight');
}
if ($.docWidth() > 0) {
pass('docWidth');
}
var htmlStr = '<div>"hi there\'</div>';
htmlStr = $.encodeHTML(htmlStr);
if (htmlStr === "<div>"hi there'</div>") {
pass('encodeHTML');
}
$.text(linkTest, 'test');
if ($.text(linkTest) === 'test') {
pass('text');
}
$.remove(linkTest);
$.ajax({
url: 'test.json',
type: 'get',
dataType: 'json',
complete: function(data) {
if (data.glossary.title === 'example glossary') {
pass('ajax');
pass('parse(json)');
}
}
});
$.ajax({
url: 'test.xml',
type: 'get',
dataType: 'xml',
complete: function(data) {
var output = $.getTag('to', data)[0];
if (typeof $.text(output) === 'string') {
pass('parse(xml)');
}
}
});
var formArray = $.serialize($.getId('test_form'));
- if (formArray.a === 'test' && formArray.d[0] === '1') {
+ if (formArray.a === 'test') {
pass('form serialize (to array)');
}
var params = $.formatParams(formArray);
if (params = 'a=test&c=3&d=1%2C3&b=1') {
pass('format params');
}
var template = "hey, #{name}. Your name is #{name} #{last}.";
var greeting = $.template(template, {name: 'dan', last: 'masq'});
if (greeting === 'hey, dan. Your name is dan masq.') {
pass('templating');
}
var IS_TEST = [$.create('P'), $.create('optgroup'), $.create('div'), $.create('link')];
var IS_TEST_LEN = IS_TEST.length;
while (IS_TEST_LEN--) {
if ($.is(IS_TEST[IS_TEST_LEN], 'p')) {
pass('is');
}
}
var inarraytest = ['1', 2, '34', 'dan'];
if ($.inArray(2, inarraytest) && $.inArray('34', inarraytest)) {
pass('inArray');
}
- var passcount = info.innerHTML.split('<BR>');
- if (passcount.length === 1) {
- passcount = info.innerHTML.split('<br>');
- }
+ setTimeout(function() {
+ var passcount = info.innerHTML.split('<BR>');
+ if (passcount.length === 1) {
+ passcount = info.innerHTML.split('<br>');
+ }
- var finalResults = $.create('b')
- finalResults.innerHTML = passcount.length +' passed.<br>';
- $.prepend(finalResults, info);
+ var finalResults = $.create('b')
+ finalResults.innerHTML = passcount.length +' passed.<br>';
+ $.prepend(finalResults, info);
+ pass('prepend');
+ }, 5000);
$.log('a', 'timeEnd');
var spinner = $.create('div');
$.append(spinner, document.body);
var timer = $.spinner(spinner, 65);
setTimeout(function() {
$.fadeOut(spinner, 500, function() {
clearInterval(timer);
$.remove(spinner);
+ pass('remove');
});
- }, 10000);
+ }, 4000);
+
+ var foo = $.create('<div class="cheese"><strong id="cheese">cool</strong></div>');
+ $.append(foo, document.body);
+ pass('frag/create', $.is($.getId('cheese'), 'strong'))
});
}(AWESOME));
diff --git a/test/index.html b/test/index.html
index 6f23c5d..332d7b3 100644
--- a/test/index.html
+++ b/test/index.html
@@ -1,25 +1,25 @@
<!DOCTYPE html>
<html>
<head>
<title>Awesome Test</title>
</head>
<body>
-<script type="text/javascript" src="../awesome.js"></script>
-<script type="text/javascript" src="awesome.test.js"></script>
<form id="test_form" action="faux" method="get" name="test_form" style="display:none;">
<input type="text" name="a" value="test">
<select name="b">
<option value="1"></option>
<option value="2"></option>
</select>
<input type="radio" name="c" value="1"/>
<input type="radio" name="c" value="2"/>
<input type="radio" name="c" value="3" checked/>
<input type="checkbox" name="d" value="1" checked/>
<input type="checkbox" name="d" value="2"/>
<input type="checkbox" name="d" value="3" checked/>
</form>
+<script type="text/javascript" src="../awesome.js"></script>
+<script type="text/javascript" src="awesome.test.js"></script>
</body>
</html>
|
dancrew32/AWESOME-JS | bb25ffc8ca31af49f7cc5322640348c761c73f66 | text based spinners. closes #33 | diff --git a/awesome.js b/awesome.js
index 7e140e8..06d80a8 100644
--- a/awesome.js
+++ b/awesome.js
@@ -1,985 +1,998 @@
// Awesome ensues
var AWESOME = (function (WIN, DOC) {
// PRIVATE
var BODY = DOC.body;
var DOCEL = DOC.documentElement;
var CANATTACH = isFunction(BODY.addEventListener) && isUndefined(BODY.attachEvent);
var CANCANVAS = null;
var RXP = {
ready: /loaded|complete/,
template: /#{([^}]*)}/g,
amp: /&/g,
lt: /</g,
gt: />/g,
quote: /"/g,
apos: /'/g,
number: '(?:-?\\b(?:0|[1-9][0-9]*)(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b)',
oneChar: '(?:[^\\0-\\x08\\x0a-\\x1f\"\\\\]|\\\\(?:[\"/\\\\bfnrt]|u[0-9A-Fa-f]{4}))',
jsonEscapeSeq: /\\\\(?:([^u])|u(.{4}))/g
};
if (!Array.indexOf) {
Array.prototype.indexOf = function(obj) {
for(var i = 0; i < this.length; i++) {
if (this[i] === obj){
return i;
}
}
return -1;
};
}
function noop() {}
// isTest's
function isObject(val) {
return typeof val === 'object';
}
function isArray(val) {
return isObject(val) && !isUndefined(val.length);
}
function isString(val) {
return typeof val === 'string';
}
function isFunction(val) {
return typeof val === 'function';
}
function isUndefined(val) {
return typeof val === 'undefined';
}
function isNull(val) {
return typeof val === 'null';
}
function isNullOrUndefined(val) {
return isNull(val) || isUndefined(val);
}
// ajax
function openRequest(options, method) {
var req = getHttpRequest();
if (isNull(req)) return;
var d = new Date();
var aborted = 'abort';
req.open(method, options.url, true);
if (method === 'POST') {
req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
}
if (!options.disguise) {
req.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
}
req.setRequestHeader('X-Request-Id', d.getTime());
req.onreadystatechange = function(e) {
var data = '';
switch (req.readyState) {
case 0:
options.beforeSend();
break;
case 1:
options.sendPrepared();
break;
case 2:
options.afterSend();
break;
case 4:
if (!isNull(options.dataType)) {
try {
data = parse(req.responseText, options.dataType);
} catch (erD) { data = aborted; }
} else {
try {
data = req.responseText;
} catch (erT) { data = aborted; }
}
if (data !== aborted && req.status >= 200 && req.status < 300) {
options.complete(data);
} else if (data !== aborted && req.status === 0) { // file:/// ajax
options.complete(data);
} else {
options.failure(data);
}
break;
}
};
return req;
}
function getRequest(options) {
var req = openRequest(options, 'GET');
req.send('');
return req;
}
function postRequest(options) {
var req = openRequest(options, 'POST');
req.send(formatParams(options.data));
return req;
}
function getHttpRequest() {
if (typeof XMLHttpRequest !== 'undefined')
return new XMLHttpRequest();
try {
return new ActiveXObject(MSxml +'.6.0');
} catch(e1) {}
try {
return new ActiveXObject(MSxml +'.3.0');
} catch(e2) {}
try {
return new ActiveXObject(MSxml);
} catch(e3) {}
try {
return new ActiveXObject('Microsoft.XMLHTTP');
} catch(e4) {}
}
function formatParams(obj) {
if (isNull(obj)) {return '';}
var q = [];
var encode = encodeURIComponent;
for (var prop in obj) {
if (obj.hasOwnProperty(prop)) {
q.push( encode(prop) +'='+ encode(obj[prop]) );
}
}
return q.join('&');
}
// parse
function parse(str, type) {
if (str === '') return;
type = type || 'json';
var result;
switch (type.toLowerCase()) {
case 'xml':
if (WIN.DOMParser) {
var parser = new DOMParser();
return parser.parseFromString(str, 'text/xml');
} else { // ie
var xmlDoc = new ActiveXObject('Microsoft.XMLDOM');
xmlDoc.async = 'false';
xmlDoc.loadXML(str);
return xmlDoc;
}
break;
case 'json':
if (JSON.parse) {
return JSON.parse(str);
}
var string = '(?:\"' + RXP.oneChar + '*\")';
var jsonToken = new RegExp(
'(?:false|true|null|[\\{\\}\\[\\]]'
+ '|' + RXP.number
+ '|' + string
+ ')', 'g');
var escapes = {
'"': '"',
'/': '/',
'\\': '\\',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t'
};
function unescapeOne(_, ch, hex) {
return ch ? escapes[ch] : String.fromCharCode(parseInt(hex, 16));
}
var toks = str.match(jsonToken);
var tok = toks[0];
var topLevelPrimitive = false;
if ('{' === tok) {
result = {};
} else if ('[' === tok) {
result = [];
} else {
result = [];
topLevelPrimitive = true;
}
var key;
var stack = [result];
for (var i = 1 - topLevelPrimitive, n = toks.length; i < n; ++i) {
tok = toks[i];
var cont;
switch (tok.charCodeAt(0)) {
case 0x22: // '"'
tok = tok.substring(1, tok.length - 1);
if (tok.indexOf('\\') !== -1) {
tok = tok.replace(RXP.jsonEscapeSeq, unescapeOne);
}
cont = stack[0];
if (!key) {
if (cont instanceof Array) {
key = cont.length;
} else {
key = tok || ''; // Use as key for next value seen.
break;
}
}
cont[key] = tok;
key = void 0;
break;
case 0x5b: // '['
cont = stack[0];
stack.unshift(cont[key || cont.length] = []);
key = void 0;
break;
case 0x5d: // ']'
stack.shift();
break;
case 0x66: // 'f'
cont = stack[0];
cont[key || cont.length] = false;
key = void 0;
break;
case 0x6e: // 'n'
cont = stack[0];
cont[key || cont.length] = null;
key = void 0;
break;
case 0x74: // 't'
cont = stack[0];
cont[key || cont.length] = true;
key = void 0;
break;
case 0x7b: // '{'
cont = stack[0];
stack.unshift(cont[key || cont.length] = {});
key = void 0;
break;
case 0x7d: // '}'
stack.shift();
break;
default: // sign or digit
cont = stack[0];
cont[key || cont.length] = +(tok);
key = void 0;
break;
}
}
if (topLevelPrimitive) {
if (stack.length !== 1) { throw new Error(); }
result = result[0];
} else {
if (stack.length) { throw new Error(); }
}
break;
}
return result;
}
// PUBLIC
return {
ready: function (fn, ctx) {
var contentLoaded = 'DOMContentLoaded';
var ready;
var timer;
var onStateChange = function (e) {
// Mozilla & Opera
if (e && e.type === contentLoaded) {
fireDOMReady();
// Legacy
} else if (e && e.type === 'load') {
fireDOMReady();
// Safari & IE
} else if (DOC.readyState) {
if ((RXP.ready).test(DOC.readyState)) {
fireDOMReady();
// IE
} else if (!!DOCEL.doScroll) {
try {
ready || DOCEL.doScroll('left');
} catch (ex) {
return;
}
fireDOMReady();
}
}
};
var fireDOMReady = function () {
if (!ready) {
ready = true;
// onload function in given context or window object
fn.call(ctx || WIN);
// Clean up after the DOM is ready
if (CANATTACH)
DOC.removeEventListener(contentLoaded, onStateChange, false);
DOC.onreadystatechange = null;
WIN.onload = null;
clearInterval(timer);
timer = null;
}
};
// Mozilla & Opera
if (CANATTACH) DOC.addEventListener(contentLoaded, onStateChange, false);
// IE
DOC.onreadystatechange = onStateChange;
// Safari & IE
timer = setInterval(onStateChange, 5);
// Legacy
WIN.onload = onStateChange;
},
log: function (data, type) {
if (typeof console === 'undefined') return;
type = type || 'log'
if (isUndefined(console)) return;
console[type](data);
},
noop: noop,
cancelEvent: function (event) {
event = event || WIN.event;
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
},
cancelPropagation: function (event) {
event = event || WIN.event;
if (event.stopPropagation) {
event.stopPropagation();
} else {
event.cancelBubble = true;
}
},
bind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false; // bubble
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].addEventListener(type, handler, capture);
} else if (obj[i].attachEvent) {
obj[i].attachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = handler;
}
}
},
unbind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false;
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].removeEventListener(type, handler, capture);
} else if (obj[i].detachEvent) {
obj[i].detachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = null;
}
}
},
fire: function(obj, ev, capture, cancelable) {
var evt;
if (DOC.createEventObject) { // ie
evt = DOC.createEventObject();
return obj.fireEvent('on'+ ev, evt);
}
capture = capture || false;
cancelable = cancelable || true;
evt = DOC.createEvent('HTMLEvents');
evt.initEvent(ev, capture, cancelable);
return !obj.dispatchEvent(evt);
},
hover: function (obj, over, out, capture) {
if (isUndefined(obj)) {return;}
var $this = this;
out = out || null;
$this.bind(obj, 'mouseover', over, capture);
if (out) $this.bind(obj, 'mouseout', out, capture);
},
toArray: function(obj) {
if (!isArray(obj)) obj = [obj];
return obj;
},
isObject: isObject,
isArray: isArray,
isString: isString,
isUndefined: isUndefined,
isNull: isNull,
isNullOrUndefined: isNullOrUndefined,
hasClass: function (el, cls) {
var re = el.className.split(' ');
if (isUndefined(re)) { return false; }
return -1 !== re.indexOf(cls);
},
addClass: function (el, cls) {
if (!this.hasClass(el, cls)) el.className += ' '+ cls;
},
removeClass: function (el, cls) {
if (!this.hasClass(el, cls)) return;
var re = el.className.split(' ');
if (isUndefined(re)) return;
re.splice(re.indexOf(cls), 1);
var i = re.length;
el.className = ''; // empty
while (i--) { // reload
el.className += re[i] +' ';
}
},
getId: function (id) {
return DOC.getElementById(id);
},
getTag: function (tag, context) {
context = context || DOC;
tag = tag || '*';
return context.getElementsByTagName(tag);
},
getClass: function (searchClass, context, tag) {
var classElements = [];
var els = this.getTag(tag, context);
var elsLen = els.length;
var pattern = new RegExp('^|\\s' + searchClass + '\\s|$');
for (var i = 0, j = 0; i < elsLen; ++i) {
if (pattern.test(els[i].className)) {
classElements[j] = els[i];
j++;
}
}
return classElements;
},
is: function(el, type) {
if (isUndefined(type)) return el.nodeName;
return el.nodeName === type.toUpperCase();
},
toCamelCase: function (string) {
var strs = string.split('-');
if (strs.length === 1) return strs[0];
var ccstr = string.indexOf('-') === 0
? strs[0].charAt(0).toUpperCase() + strs[0].substring(1)
: strs[0];
for (var i = 1, len = strs.length; i < len; ++i) {
var s = strs[i];
ccstr += s.charAt(0).toUpperCase() + s.substring(1);
}
return ccstr;
},
style: function (el, prop, newVal) {
if (!isUndefined(el))
if (isUndefined(prop)) {
return el.currentStyle || getComputedStyle(el, null);
} else {
prop = this.toCamelCase(prop);
newVal = newVal || null;
if (newVal) {
if (prop === 'opacity') {
el.style.filter = "alpha(opacity=" + newVal * 100 + ")";
el.style.opacity = newVal;
} else {
prop = this.toCamelCase(prop);
el.style[prop] = newVal;
}
} else {
var view = DOC.defaultView;
if (view && view.getComputedStyle) {
return view.getComputedStyle(el, '')[prop] || null;
} else {
if (prop === 'opacity') {
if (el['filters'].length <= 0) {
el.style.filter = 'alpha(opacity = 100)';
}
var opacity = el['filters']('alpha').opacity;
return isNaN(opacity) ? 1 : (opacity ? opacity / 100 : 0);
}
return el.currentStyle[prop] || null;
}
}
}
+ },
+ hide: function(el) {
+
},
getPosition: function(obj) {
if (!obj) return;
var curLeft = 0;
var curTop = 0;
do {
curLeft += obj.offsetLeft;
curTop += obj.offsetTop;
} while (obj = obj.offsetParent);
return {
top: curTop,
left: curLeft
};
},
getMousePosition: function(event, relativeTo) {
var x = event.pageX;
var y = event.pageY;
if (isNull(x) && !isNull(event.clientX)) {
var xScroll = (DOCEL && DOCEL.scrollLeft || BODY && BODY.scrollLeft || 0);
var xClient = (DOCEL && DOCEL.clientLeft || BODY && BODY.clientLeft || 0);
var yScroll = (DOCEL && DOCEL.scrollTop || BODY && BODY.scrollTop || 0);
var yClient = (DOCEL && DOCEL.clientTop || BODY && BODY.clientTop || 0);
x = event.clientX + xScroll - xClient;
y = event.clientY + yScroll - yClient;
}
if (!isNullOrUndefined(relativeTo)) {
var tar = (typeof relativeTo === 'object') ? relativeTo : event.target;
var tarPos = this.getPosition(tar);
x = x - tarPos.left;
y = y - tarPos.top;
}
return {
x: x,
y: y
};
},
getScrollPosition: function() {
if (!isUndefined(WIN.pageYOffset)) {
return WIN.pageYOffset;
}
return DOCEL.scrollTop;
},
docHeight: function () {
return Math.max(
Math.max(BODY.scrollHeight, DOCEL.scrollHeight),
Math.max(BODY.offsetHeight, DOCEL.offsetHeight),
Math.max(BODY.clientHeight, DOCEL.clientHeight)
);
},
docWidth: function () {
return Math.max(BODY.clientWidth, DOCEL.clientWidth);
},
viewportHeight: function () {
if (!isUndefined(WIN.innerHeight)) {
return WIN.innerHeight;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientHeight)
&& DOCEL.clientHeight) { //ie6
return DOCEL.clientHeight;
}
return BODY.clientHeight;
},
viewportWidth: function () {
if (!isUndefined(WIN.innerWidth)) {
return WIN.innerWidth;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientWidth)
&& DOCEL.clientWidth) { //ie6
return DOCEL.clientWidth;
}
return BODY.clientWidth;
},
attr: function (ele, attr, newVal) {
newVal = newVal || null;
if (newVal) {
ele.setAttribute(attr, newVal);
} else {
var attrs = ele.attributes,
attrsLen = attrs.length,
result = ele.getAttribute(attr) || ele[attr] || null;
if (!result) {
while (attrsLen--) {
if (attr[attrsLen].nodeName === attr)
result = attr[i].nodeValue;
}
}
return result;
}
},
template: function(template, obj){
var cache = {};
var strCache = template;
var matches = 0;
template.replace(RXP.template, function(tmpl, val) { // #{oKey}
cache[tmpl] = val;
});
for (var key in cache) {
strCache = strCache.replace(new RegExp(key, 'g'), obj[cache[key]]);
}
return strCache;
},
html: function(obj, str, coerce, coercePar) {
coerse = coerce || false;
if (coerce) {
var temp = obj.ownerDocument.createElement('DIV');
temp.innerHTML = '<'+ coercePar +'>'+ str +'</'+ coercePar +'>';
this.swap(temp.firstChild.firstChild, obj);
} else {
obj.innerHTML = str;
}
},
encodeHTML: function (str) {
return str.replace(RXP.amp, '&')
.replace(RXP.lt, '<')
.replace(RXP.gt, '>')
.replace(RXP.quote, '"')
.replace(RXP.apos, ''');
},
stripHTML: function (str) {
return str.replace(/<.*?>/g,'');
},
text: function (obj, txt) {
if (isUndefined(obj)) return;
if (txt) {
if (!isUndefined(obj.innerText)) {
obj.innerText = txt;
}
obj.textContent = txt;
return;
}
return obj.innerText || obj.textContent || obj.text;
},
plural: function(count, singular, plural) {
return count === 1 ? singular : plural;
},
trim: function (str) {
return str.replace(/^\s+|\s+$/g);
},
prepend: function (newNode, node) {
node.insertBefore(this.toNode(newNode), node.childNodes[0]);
},
append: function (newNode, node) {
node.appendChild(this.toNode(newNode));
},
before: function (newNode, node) {
//if (node.parentNode === BODY) {
//this.prepend(this.toNode(newNode), BODY);
//return;
//}
node.parentNode.insertBefore(this.toNode(newNode), node);
},
after: function (newNode, node) {
node.parentNode.insertBefore(this.toNode(newNode), node.nextSibling);
},
swap: function (a, b) {
a.parentNode.replaceChild(b, a);
},
remove: function (ele, recursive) {
if (!ele) return false;
recursive = recursive || true;
ele = this.toArray(ele);
var i = ele.length;
while (i--) {
if (!isUndefined(ele[i].parentNode)) {
if (recursive) {
this.destroy(ele[i]);
continue;
}
ele[i].parentNode.removeChild(ele[i]);
}
}
},
destroy: function(el) {
if (isUndefined(el)) return;
var trash = this.create('DIV');
trash.appendChild(el);
trash.innerHTML = '';
},
toNode: function(text) {
if (!isString(text)) return text;
return this.create(text);
},
create: function (tag) {
return DOC.createElement(tag.toUpperCase());
},
frag: function(str) {
var frag = DOC.createDocumentFragment();
var temp = this.create('DIV');
temp.innerHTML = str;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
},
// TODO: Execution Queue
// Cookies
createCookie: function (name, value, days, domain) {
var expires = '';
var cookie;
domain = domain || WIN.location.host;
if (days) {
var date = new Date();
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
expires = '; expires=' + date.toGMTString();
}
cookie = name + '=' + value + expires + ';';
if (domain) {
cookie += ' domain=.'+ domain +' ;';
}
if (path) {
cookie += 'path='+ path;
}
DOC.cookie = cookie;
},
eraseCookie: function (name) {
this.createCookie(name, '', -1);
},
readCookie: function (c_name) {
if (DOC.cookie.length) {
var c_start = DOC.cookie.indexOf(c_name + "=");
if (c_start !== -1) {
c_start = c_start + c_name.length + 1;
var c_end = DOC.cookie.indexOf(";", c_start);
if (c_end === -1) {
c_end = DOC.cookie.length;
}
return unescape(DOC.cookie.substring(c_start, c_end));
}
}
return null;
},
// Math
getMax: function (array) {
var m = Math;
return m.max.apply(m, array);
},
getMin: function (array) {
var m = Math;
return m.min.apply(m, array);
},
getRandom: function(min, max) {
var m = Math;
if (min) {
return m.floor(m.random() * (max - min + 1)) + min;
} else {
return m.round(m.random()); // 1 or 0
}
},
inArray: function(obj, arr) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
return true;
}
}
return false;
},
isDescendant: function(p, c) {
var node = c.parentNode;
while (!isNull(node)) {
if (node === p) {
return true;
}
node = node.parentNode;
}
return false;
},
sort: function(options) {
options = this.setDefaults({
arr: [],
type: 'alphabetical',
order: 'desc',
property: null,
method: null
}, options);
var $this = this;
var method;
switch(options.type) {
case 'alphabetical':
method = function(a, b) {
var A = a.toLowerCase();
var B = b.toLowerCase();
if (options.order === 'asc') {
if (A < B) { return -1; }
else if (A > B) { return 1; }
else { return 0; }
} else {
if (A > B) { return -1; }
else if (A < B) { return 1; }
else { return 0; }
}
};
break;
case 'numerical':
if (options.order === 'asc') {
method = function(a, b) { return a - b; };
} else {
method = function(a, b) { return b - a; };
}
break;
case 'random':
method = function() {
return Math.round(Math.random()) - 0.5;
};
break;
}
return options.arr.sort(method);
},
animate: function (el, options) {
var $this = this;
options = this.setDefaults({
property: 'width',
from: $this.style(el, options.property),
to: '0px',
duration: 200,
easing: function(pos) {
return (-Math.cos(pos * Math.PI) / 2) + 0.5;
},
callback: noop
}, options);
var fromNum = parseFloat(options.from);
var fromUnit = getUnit(options.from);
var toNum = parseFloat(options.to);
var toUnit = getUnit(options.to) || fromUnit;
var interval;
var start = +new Date();
var finish = start + options.duration;
function interpolate(source, target, pos) {
return (source + (target - source) * pos).toFixed(3);
}
function getUnit(prop){
return prop.toString().replace(/^[\-\d\.]+/,'') || '';
}
interval = setInterval(function() {
var time = +new Date();
var pos = time > finish ? 1 : (time-start) / options.duration;
var interpolation = interpolate(fromNum, toNum, options.easing(pos));
$this.style(el, options.property, interpolation + toUnit);
if (time > finish) {
clearInterval(interval);
options.callback();
}
}, 10);
},
fadeIn: function(el, duration, callback) {
this.fade(el, duration, 1, callback);
},
fadeOut: function(el, duration, callback) {
this.fade(el, duration, 0, callback);
},
- fade: function(el, duration, to, callback) {
+ fade: function(el, duration, to, callback, from) {
callback = callback || noop;
this.animate(el, {
property: 'opacity',
to: to,
duration: duration,
callback: callback
});
},
// Ajax
getUrlVars: function () {
var vars = [];
var hash;
var hashes = WIN.location.href.slice(WIN.location.href.indexOf('?') + 1).split('&');
var hashlen = hashes.length;
for (var i = 0; i < hashlen; ++i) {
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
},
serialize: function(obj) {
var viableNodes = ['input', 'select', 'textarea'];
var viableNodesLen = viableNodes.length;
var rawChildren = [];
var formChildren = [];
var returnObject = {};
var nodeList = [];
for (var i = 0; i < viableNodesLen; ++i) {
nodeList = obj.getElementsByTagName(viableNodes[i]);
var nodeListLen = nodeList.length;
for (var j = 0; j < nodeListLen; ++j) {
rawChildren.push(nodeList[j]);
}
}
// build list of viable form elements
var rawChildrenLen = rawChildren.length;
for (var k = 0; k < rawChildrenLen; ++k) {
var currentNode = rawChildren[k];
switch(rawChildren[k].nodeName.toLowerCase()) {
case 'input':
switch(currentNode.type) {
case 'text':
case 'hidden':
case 'password':
formChildren.push(currentNode);
break;
case 'radio':
case 'checkbox':
- if (currentNode.checked) {
+ if (currentNode.checked === 'checked') {
formChildren.push(currentNode);
}
break;
}
break;
case 'select':
case 'textarea':
formChildren.push(currentNode);
break;
}
}
//build object of the name-value pairs
var formChildrenLen = formChildren.length;
for (var m = 0; m < formChildrenLen; ++m) {
var currentChild = formChildren[m];
if (!returnObject.hasOwnProperty(currentChild.name)) {
returnObject[currentChild.name] = currentChild.value;
} else {
if (typeof returnObject[currentChild.name] === 'string') {
returnObject[currentChild.name] = [returnObject[currentChild.name], currentChild.value.toString()];
} else {
returnObject[currentChild.name].push(currentChild.value.toString());
}
}
}
return returnObject;
},
formatParams: formatParams,
setDefaults: function(defaults, options) {
if (!options) {
options = defaults;
} else {
for (var index in defaults) {
if (isUndefined(options[index])) {
options[index] = defaults[index];
}
}
}
return options;
},
parse: parse,
addScript: function(url, id) {
var script = this.create('script');
script.type = 'text/javascript';
script.src = url || '#';
script.id = id || 'awesome-script'; // id to remove
this.append(script, this.getTag('head')[0]);
return true;
},
ajax: function(options) {
options = this.setDefaults({
url: null,
data: null, // key:val
dataType: null,
type: 'post',
disguise: false,
requestId: null,
beforeSend: noop,
sendPrepared: noop,
afterSend: noop,
complete: noop,
failure: noop
}, options);
var MSxml = 'Msxml2.XMLHTTP';
// init
switch (options.type.toUpperCase()) {
case 'POST':
postRequest(options);
break;
case 'JSONP':
this.addScript(options.url, options.requestId || 'awesome-jsonp');
break;
default:
getRequest(options);
}
+ },
+ spinner: function(el, speed, slides) {
+ slides = slides || ['⊕', '⊗'];
+ var i = slides.length;
+ var cur = 0;
+
+ return setInterval(function() {
+ cur = cur + 1 === i ? 0 : cur + 1;
+ el.innerHTML = slides[cur];
+ }, speed || 100);
}
};
}(window, document));
diff --git a/test/awesome.test.js b/test/awesome.test.js
index caf3e03..7554754 100644
--- a/test/awesome.test.js
+++ b/test/awesome.test.js
@@ -1,224 +1,234 @@
(function($) {
$.log('ready', 'time');
unitTest = {
pass : true,
flag : []
};
$.ready(function() {
$.log('ready', 'timeEnd');
$.log('a', 'time');
var info = $.create('DIV');
info.id = 'info';
$.attr(info, 'rel', 'yee');
$.append(info, document.body);
var pass = function(method, test) {
test = $.isUndefined(test) ? true : test;
if (test) {
info.innerHTML += method +' works.<BR>';
} else {
info.innerHTML += '<b>'+ method +' FAILED</b>.<BR>';
}
};
pass('ready');
pass('create');
pass('attr', $.attr($.getId('info'), 'rel') === 'yee');
$.log('Safe Log Works.');
pass('log');
var a = $.create('DIV');
var b = $.create('DIV');
var c = $.create('DIV');
a.id = 'a';
b.id = 'b';
c.id = 'c';
$.before(a, info);
pass('before');
$.prepend(b, info);
pass('prepend');
pass('append'); // see beginning
$.after(c, info);
pass('after');
var bindTest = {
prop : false,
pass : false
};
function bindMethod() {
bindTest.pass = true;
};
$.bind(a, 'click', bindMethod);
$.fire(a, 'click');
if (bindTest.pass === true) {
pass('bind');
pass('fire');
pass('hover');// it's just using bind.. i'll pass it
bindTest.pass = false;// reset
$.unbind(a, 'click', bindMethod);
$.fire(a, 'click');
pass('unbind', (bindTest.pass === false));
}
var linkTest = $.create('A');
linkTest.id = 'link';
linkTest.href = 'http://www.google.com';
$.append(linkTest, $.getId('b'));
var propCanceled = true;
var propCanceled = true;
var linkPropCancelTest = function(e) {
propCanceled = false;
};
var linkCancelTest = function(e) {
$.cancelEvent(e);
$.cancelPropagation(e);
};
$.bind(linkTest, 'click', function(e) {
linkCancelTest(e);
});
$.bind(document.body, 'click', function(e) {
linkPropCancelTest(e);
});
$.fire(linkTest, 'click');
setTimeout(function() {
pass('cancelEvent');
if (propCanceled === true) {
pass('cancelPropagation');
}
}, 500);
if (typeof $.getId('a') === 'object') {
pass('getId');
}
if (typeof $.getTag('div')[0] === 'object') {
pass('getTag');
}
$.addClass($.getId('a'), 'test');
if (typeof $.getClass('test', document.body, 'DIV')[0] === 'object') {
pass('getClass');
}
if ($.hasClass($.getId('a'), 'fuuuuu')) {
pass('hasClass', false);
}
if ($.hasClass($.getId('a'), 'test')) {
pass('hasClass');
$.removeClass($.getId('a'), 'test');
if (!$.hasClass($.getId('a'), 'test')) {
pass('removeClass');
$.addClass($.getId('a'), 'testing');
if ($.hasClass($.getId('a'), 'testing')) {
pass('addClass');
}
}
}
$.remove($.getId('b'));
if ($.getId('b') === null) {
pass('remove');
}
var text = info.innerHTML.split('<BR>');
if (text.length === 1) {
text = info.innerHTML.split('<br>');
}
text.pop(); // clear end empty node
info.innerHTML = '';
var arr = $.sort({
arr: text
});
var arrLen = arr.length;
while (arrLen--) {
info.innerHTML += arr[arrLen] +'<BR>';
}
$.style(info, 'display', 'block');
if ($.style(info, 'display') === 'block') {
pass('style');
pass('toCamelCase');
}
if ($.docHeight() > 0) {
pass('docHeight');
}
if ($.docWidth() > 0) {
pass('docWidth');
}
var htmlStr = '<div>"hi there\'</div>';
htmlStr = $.encodeHTML(htmlStr);
if (htmlStr === "<div>"hi there'</div>") {
pass('encodeHTML');
}
$.text(linkTest, 'test');
if ($.text(linkTest) === 'test') {
pass('text');
}
$.remove(linkTest);
$.ajax({
url: 'test.json',
type: 'get',
dataType: 'json',
complete: function(data) {
if (data.glossary.title === 'example glossary') {
pass('ajax');
pass('parse(json)');
}
}
});
$.ajax({
url: 'test.xml',
type: 'get',
dataType: 'xml',
complete: function(data) {
var output = $.getTag('to', data)[0];
if (typeof $.text(output) === 'string') {
pass('parse(xml)');
}
}
});
var formArray = $.serialize($.getId('test_form'));
if (formArray.a === 'test' && formArray.d[0] === '1') {
pass('form serialize (to array)');
}
var params = $.formatParams(formArray);
if (params = 'a=test&c=3&d=1%2C3&b=1') {
pass('format params');
}
var template = "hey, #{name}. Your name is #{name} #{last}.";
var greeting = $.template(template, {name: 'dan', last: 'masq'});
if (greeting === 'hey, dan. Your name is dan masq.') {
pass('templating');
}
var IS_TEST = [$.create('P'), $.create('optgroup'), $.create('div'), $.create('link')];
var IS_TEST_LEN = IS_TEST.length;
while (IS_TEST_LEN--) {
if ($.is(IS_TEST[IS_TEST_LEN], 'p')) {
pass('is');
}
}
var inarraytest = ['1', 2, '34', 'dan'];
if ($.inArray(2, inarraytest) && $.inArray('34', inarraytest)) {
pass('inArray');
}
var passcount = info.innerHTML.split('<BR>');
if (passcount.length === 1) {
passcount = info.innerHTML.split('<br>');
}
var finalResults = $.create('b')
finalResults.innerHTML = passcount.length +' passed.<br>';
$.prepend(finalResults, info);
$.log('a', 'timeEnd');
+
+ var spinner = $.create('div');
+ $.append(spinner, document.body);
+ var timer = $.spinner(spinner, 65);
+ setTimeout(function() {
+ $.fadeOut(spinner, 500, function() {
+ clearInterval(timer);
+ $.remove(spinner);
+ });
+ }, 10000);
});
}(AWESOME));
|
dancrew32/AWESOME-JS | f869f91e1ecaf4f73707cd1490c75fd12ce236a7 | more optimizations (moving into private) | diff --git a/awesome.js b/awesome.js
index 7c174d3..7e140e8 100644
--- a/awesome.js
+++ b/awesome.js
@@ -1,977 +1,985 @@
// Awesome ensues
var AWESOME = (function (WIN, DOC) {
// PRIVATE
var BODY = DOC.body;
var DOCEL = DOC.documentElement;
var CANATTACH = isFunction(BODY.addEventListener) && isUndefined(BODY.attachEvent);
+ var CANCANVAS = null;
var RXP = {
ready: /loaded|complete/,
template: /#{([^}]*)}/g,
amp: /&/g,
lt: /</g,
gt: />/g,
quote: /"/g,
apos: /'/g,
number: '(?:-?\\b(?:0|[1-9][0-9]*)(?:\\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\\b)',
oneChar: '(?:[^\\0-\\x08\\x0a-\\x1f\"\\\\]|\\\\(?:[\"/\\\\bfnrt]|u[0-9A-Fa-f]{4}))',
jsonEscapeSeq: /\\\\(?:([^u])|u(.{4}))/g
};
if (!Array.indexOf) {
Array.prototype.indexOf = function(obj) {
for(var i = 0; i < this.length; i++) {
if (this[i] === obj){
return i;
}
}
return -1;
};
}
+ function noop() {}
// isTest's
function isObject(val) {
return typeof val === 'object';
}
function isArray(val) {
return isObject(val) && !isUndefined(val.length);
}
function isString(val) {
return typeof val === 'string';
}
function isFunction(val) {
return typeof val === 'function';
}
function isUndefined(val) {
return typeof val === 'undefined';
}
function isNull(val) {
return typeof val === 'null';
}
function isNullOrUndefined(val) {
return isNull(val) || isUndefined(val);
}
+ // ajax
+ function openRequest(options, method) {
+ var req = getHttpRequest();
+ if (isNull(req)) return;
+ var d = new Date();
+ var aborted = 'abort';
+
+ req.open(method, options.url, true);
+
+ if (method === 'POST') {
+ req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
+ }
+ if (!options.disguise) {
+ req.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
+ }
+ req.setRequestHeader('X-Request-Id', d.getTime());
+
+ req.onreadystatechange = function(e) {
+ var data = '';
+
+ switch (req.readyState) {
+ case 0:
+ options.beforeSend();
+ break;
+ case 1:
+ options.sendPrepared();
+ break;
+ case 2:
+ options.afterSend();
+ break;
+ case 4:
+
+ if (!isNull(options.dataType)) {
+ try {
+ data = parse(req.responseText, options.dataType);
+ } catch (erD) { data = aborted; }
+ } else {
+ try {
+ data = req.responseText;
+ } catch (erT) { data = aborted; }
+ }
+
+ if (data !== aborted && req.status >= 200 && req.status < 300) {
+ options.complete(data);
+ } else if (data !== aborted && req.status === 0) { // file:/// ajax
+ options.complete(data);
+ } else {
+ options.failure(data);
+ }
+ break;
+ }
+ };
+ return req;
+ }
+ function getRequest(options) {
+ var req = openRequest(options, 'GET');
+ req.send('');
+ return req;
+ }
+ function postRequest(options) {
+ var req = openRequest(options, 'POST');
+ req.send(formatParams(options.data));
+ return req;
+ }
+ function getHttpRequest() {
+ if (typeof XMLHttpRequest !== 'undefined')
+ return new XMLHttpRequest();
+ try {
+ return new ActiveXObject(MSxml +'.6.0');
+ } catch(e1) {}
+ try {
+ return new ActiveXObject(MSxml +'.3.0');
+ } catch(e2) {}
+ try {
+ return new ActiveXObject(MSxml);
+ } catch(e3) {}
+ try {
+ return new ActiveXObject('Microsoft.XMLHTTP');
+ } catch(e4) {}
+ }
+
+ function formatParams(obj) {
+ if (isNull(obj)) {return '';}
+ var q = [];
+ var encode = encodeURIComponent;
+ for (var prop in obj) {
+ if (obj.hasOwnProperty(prop)) {
+ q.push( encode(prop) +'='+ encode(obj[prop]) );
+ }
+ }
+ return q.join('&');
+ }
+
+
+ // parse
+ function parse(str, type) {
+ if (str === '') return;
+ type = type || 'json';
+ var result;
+ switch (type.toLowerCase()) {
+ case 'xml':
+ if (WIN.DOMParser) {
+ var parser = new DOMParser();
+ return parser.parseFromString(str, 'text/xml');
+ } else { // ie
+ var xmlDoc = new ActiveXObject('Microsoft.XMLDOM');
+ xmlDoc.async = 'false';
+ xmlDoc.loadXML(str);
+ return xmlDoc;
+ }
+ break;
+ case 'json':
+ if (JSON.parse) {
+ return JSON.parse(str);
+ }
+ var string = '(?:\"' + RXP.oneChar + '*\")';
+ var jsonToken = new RegExp(
+ '(?:false|true|null|[\\{\\}\\[\\]]'
+ + '|' + RXP.number
+ + '|' + string
+ + ')', 'g');
+ var escapes = {
+ '"': '"',
+ '/': '/',
+ '\\': '\\',
+ 'b': '\b',
+ 'f': '\f',
+ 'n': '\n',
+ 'r': '\r',
+ 't': '\t'
+ };
+ function unescapeOne(_, ch, hex) {
+ return ch ? escapes[ch] : String.fromCharCode(parseInt(hex, 16));
+ }
+
+ var toks = str.match(jsonToken);
+ var tok = toks[0];
+ var topLevelPrimitive = false;
+ if ('{' === tok) {
+ result = {};
+ } else if ('[' === tok) {
+ result = [];
+ } else {
+ result = [];
+ topLevelPrimitive = true;
+ }
+ var key;
+ var stack = [result];
+ for (var i = 1 - topLevelPrimitive, n = toks.length; i < n; ++i) {
+ tok = toks[i];
+ var cont;
+ switch (tok.charCodeAt(0)) {
+ case 0x22: // '"'
+ tok = tok.substring(1, tok.length - 1);
+ if (tok.indexOf('\\') !== -1) {
+ tok = tok.replace(RXP.jsonEscapeSeq, unescapeOne);
+ }
+ cont = stack[0];
+ if (!key) {
+ if (cont instanceof Array) {
+ key = cont.length;
+ } else {
+ key = tok || ''; // Use as key for next value seen.
+ break;
+ }
+ }
+ cont[key] = tok;
+ key = void 0;
+ break;
+ case 0x5b: // '['
+ cont = stack[0];
+ stack.unshift(cont[key || cont.length] = []);
+ key = void 0;
+ break;
+ case 0x5d: // ']'
+ stack.shift();
+ break;
+ case 0x66: // 'f'
+ cont = stack[0];
+ cont[key || cont.length] = false;
+ key = void 0;
+ break;
+ case 0x6e: // 'n'
+ cont = stack[0];
+ cont[key || cont.length] = null;
+ key = void 0;
+ break;
+ case 0x74: // 't'
+ cont = stack[0];
+ cont[key || cont.length] = true;
+ key = void 0;
+ break;
+ case 0x7b: // '{'
+ cont = stack[0];
+ stack.unshift(cont[key || cont.length] = {});
+ key = void 0;
+ break;
+ case 0x7d: // '}'
+ stack.shift();
+ break;
+ default: // sign or digit
+ cont = stack[0];
+ cont[key || cont.length] = +(tok);
+ key = void 0;
+ break;
+ }
+ }
+ if (topLevelPrimitive) {
+ if (stack.length !== 1) { throw new Error(); }
+ result = result[0];
+ } else {
+ if (stack.length) { throw new Error(); }
+ }
+ break;
+ }
+ return result;
+ }
+
+
// PUBLIC
return {
ready: function (fn, ctx) {
var contentLoaded = 'DOMContentLoaded';
var ready;
var timer;
var onStateChange = function (e) {
// Mozilla & Opera
if (e && e.type === contentLoaded) {
fireDOMReady();
// Legacy
} else if (e && e.type === 'load') {
fireDOMReady();
// Safari & IE
} else if (DOC.readyState) {
if ((RXP.ready).test(DOC.readyState)) {
fireDOMReady();
// IE
} else if (!!DOCEL.doScroll) {
try {
ready || DOCEL.doScroll('left');
} catch (ex) {
return;
}
fireDOMReady();
}
}
};
var fireDOMReady = function () {
if (!ready) {
ready = true;
// onload function in given context or window object
fn.call(ctx || WIN);
// Clean up after the DOM is ready
if (CANATTACH)
DOC.removeEventListener(contentLoaded, onStateChange, false);
DOC.onreadystatechange = null;
WIN.onload = null;
clearInterval(timer);
timer = null;
}
};
// Mozilla & Opera
if (CANATTACH) DOC.addEventListener(contentLoaded, onStateChange, false);
// IE
DOC.onreadystatechange = onStateChange;
// Safari & IE
timer = setInterval(onStateChange, 5);
// Legacy
WIN.onload = onStateChange;
},
log: function (data, type) {
if (typeof console === 'undefined') return;
type = type || 'log'
if (isUndefined(console)) return;
console[type](data);
},
- noop: function() {},
+ noop: noop,
cancelEvent: function (event) {
event = event || WIN.event;
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
},
cancelPropagation: function (event) {
event = event || WIN.event;
if (event.stopPropagation) {
event.stopPropagation();
} else {
event.cancelBubble = true;
}
},
bind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false; // bubble
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].addEventListener(type, handler, capture);
} else if (obj[i].attachEvent) {
obj[i].attachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = handler;
}
}
},
unbind: function (obj, type, handler, capture) {
if (isNullOrUndefined(obj)) return;
capture = capture || false;
obj = this.toArray(obj);
var i = obj.length;
while (i--) {
if (CANATTACH) {
obj[i].removeEventListener(type, handler, capture);
} else if (obj[i].detachEvent) {
obj[i].detachEvent('on'+ type, handler);
} else {
obj[i]['on'+ type] = null;
}
}
},
fire: function(obj, ev, capture, cancelable) {
var evt;
if (DOC.createEventObject) { // ie
evt = DOC.createEventObject();
return obj.fireEvent('on'+ ev, evt);
}
capture = capture || false;
cancelable = cancelable || true;
evt = DOC.createEvent('HTMLEvents');
evt.initEvent(ev, capture, cancelable);
return !obj.dispatchEvent(evt);
},
hover: function (obj, over, out, capture) {
if (isUndefined(obj)) {return;}
var $this = this;
out = out || null;
$this.bind(obj, 'mouseover', over, capture);
if (out) $this.bind(obj, 'mouseout', out, capture);
},
toArray: function(obj) {
if (!isArray(obj)) obj = [obj];
return obj;
},
isObject: isObject,
isArray: isArray,
isString: isString,
isUndefined: isUndefined,
isNull: isNull,
isNullOrUndefined: isNullOrUndefined,
hasClass: function (el, cls) {
var re = el.className.split(' ');
if (isUndefined(re)) { return false; }
return -1 !== re.indexOf(cls);
},
addClass: function (el, cls) {
if (!this.hasClass(el, cls)) el.className += ' '+ cls;
},
removeClass: function (el, cls) {
if (!this.hasClass(el, cls)) return;
var re = el.className.split(' ');
if (isUndefined(re)) return;
re.splice(re.indexOf(cls), 1);
var i = re.length;
el.className = ''; // empty
while (i--) { // reload
el.className += re[i] +' ';
}
},
getId: function (id) {
return DOC.getElementById(id);
},
getTag: function (tag, context) {
context = context || DOC;
tag = tag || '*';
return context.getElementsByTagName(tag);
},
getClass: function (searchClass, context, tag) {
var classElements = [];
var els = this.getTag(tag, context);
var elsLen = els.length;
var pattern = new RegExp('^|\\s' + searchClass + '\\s|$');
for (var i = 0, j = 0; i < elsLen; ++i) {
if (pattern.test(els[i].className)) {
classElements[j] = els[i];
j++;
}
}
return classElements;
},
is: function(el, type) {
if (isUndefined(type)) return el.nodeName;
return el.nodeName === type.toUpperCase();
},
toCamelCase: function (string) {
var strs = string.split('-');
if (strs.length === 1) return strs[0];
var ccstr = string.indexOf('-') === 0
? strs[0].charAt(0).toUpperCase() + strs[0].substring(1)
: strs[0];
for (var i = 1, len = strs.length; i < len; ++i) {
var s = strs[i];
ccstr += s.charAt(0).toUpperCase() + s.substring(1);
}
return ccstr;
},
style: function (el, prop, newVal) {
if (!isUndefined(el))
if (isUndefined(prop)) {
return el.currentStyle || getComputedStyle(el, null);
} else {
prop = this.toCamelCase(prop);
newVal = newVal || null;
if (newVal) {
if (prop === 'opacity') {
el.style.filter = "alpha(opacity=" + newVal * 100 + ")";
el.style.opacity = newVal;
} else {
prop = this.toCamelCase(prop);
el.style[prop] = newVal;
}
} else {
var view = DOC.defaultView;
if (view && view.getComputedStyle) {
return view.getComputedStyle(el, '')[prop] || null;
} else {
if (prop === 'opacity') {
if (el['filters'].length <= 0) {
el.style.filter = 'alpha(opacity = 100)';
}
var opacity = el['filters']('alpha').opacity;
return isNaN(opacity) ? 1 : (opacity ? opacity / 100 : 0);
}
return el.currentStyle[prop] || null;
}
}
}
},
getPosition: function(obj) {
if (!obj) return;
var curLeft = 0;
var curTop = 0;
do {
curLeft += obj.offsetLeft;
curTop += obj.offsetTop;
} while (obj = obj.offsetParent);
return {
top: curTop,
left: curLeft
};
},
getMousePosition: function(event, relativeTo) {
var x = event.pageX;
var y = event.pageY;
if (isNull(x) && !isNull(event.clientX)) {
var xScroll = (DOCEL && DOCEL.scrollLeft || BODY && BODY.scrollLeft || 0);
var xClient = (DOCEL && DOCEL.clientLeft || BODY && BODY.clientLeft || 0);
var yScroll = (DOCEL && DOCEL.scrollTop || BODY && BODY.scrollTop || 0);
var yClient = (DOCEL && DOCEL.clientTop || BODY && BODY.clientTop || 0);
x = event.clientX + xScroll - xClient;
y = event.clientY + yScroll - yClient;
}
if (!isNullOrUndefined(relativeTo)) {
var tar = (typeof relativeTo === 'object') ? relativeTo : event.target;
var tarPos = this.getPosition(tar);
x = x - tarPos.left;
y = y - tarPos.top;
}
return {
x: x,
y: y
};
},
getScrollPosition: function() {
if (!isUndefined(WIN.pageYOffset)) {
return WIN.pageYOffset;
}
return DOCEL.scrollTop;
},
docHeight: function () {
return Math.max(
Math.max(BODY.scrollHeight, DOCEL.scrollHeight),
Math.max(BODY.offsetHeight, DOCEL.offsetHeight),
Math.max(BODY.clientHeight, DOCEL.clientHeight)
);
},
docWidth: function () {
return Math.max(BODY.clientWidth, DOCEL.clientWidth);
},
viewportHeight: function () {
if (!isUndefined(WIN.innerHeight)) {
return WIN.innerHeight;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientHeight)
&& DOCEL.clientHeight) { //ie6
return DOCEL.clientHeight;
}
return BODY.clientHeight;
},
viewportWidth: function () {
if (!isUndefined(WIN.innerWidth)) {
return WIN.innerWidth;
} else if (!isUndefined(DOCEL)
&& !isUndefined(DOCEL.clientWidth)
&& DOCEL.clientWidth) { //ie6
return DOCEL.clientWidth;
}
return BODY.clientWidth;
},
attr: function (ele, attr, newVal) {
newVal = newVal || null;
if (newVal) {
ele.setAttribute(attr, newVal);
} else {
var attrs = ele.attributes,
attrsLen = attrs.length,
result = ele.getAttribute(attr) || ele[attr] || null;
if (!result) {
while (attrsLen--) {
if (attr[attrsLen].nodeName === attr)
result = attr[i].nodeValue;
}
}
return result;
}
},
template: function(template, obj){
var cache = {};
var strCache = template;
var matches = 0;
template.replace(RXP.template, function(tmpl, val) { // #{oKey}
cache[tmpl] = val;
});
for (var key in cache) {
strCache = strCache.replace(new RegExp(key, 'g'), obj[cache[key]]);
}
return strCache;
},
html: function(obj, str, coerce, coercePar) {
coerse = coerce || false;
if (coerce) {
var temp = obj.ownerDocument.createElement('DIV');
temp.innerHTML = '<'+ coercePar +'>'+ str +'</'+ coercePar +'>';
this.swap(temp.firstChild.firstChild, obj);
} else {
obj.innerHTML = str;
}
},
encodeHTML: function (str) {
return str.replace(RXP.amp, '&')
.replace(RXP.lt, '<')
.replace(RXP.gt, '>')
.replace(RXP.quote, '"')
.replace(RXP.apos, ''');
},
stripHTML: function (str) {
return str.replace(/<.*?>/g,'');
},
text: function (obj, txt) {
if (isUndefined(obj)) return;
if (txt) {
if (!isUndefined(obj.innerText)) {
obj.innerText = txt;
}
obj.textContent = txt;
return;
}
return obj.innerText || obj.textContent || obj.text;
},
plural: function(count, singular, plural) {
return count === 1 ? singular : plural;
},
trim: function (str) {
return str.replace(/^\s+|\s+$/g);
},
prepend: function (newNode, node) {
node.insertBefore(this.toNode(newNode), node.childNodes[0]);
},
append: function (newNode, node) {
node.appendChild(this.toNode(newNode));
},
before: function (newNode, node) {
//if (node.parentNode === BODY) {
//this.prepend(this.toNode(newNode), BODY);
//return;
//}
node.parentNode.insertBefore(this.toNode(newNode), node);
},
after: function (newNode, node) {
node.parentNode.insertBefore(this.toNode(newNode), node.nextSibling);
},
swap: function (a, b) {
a.parentNode.replaceChild(b, a);
},
remove: function (ele, recursive) {
if (!ele) return false;
recursive = recursive || true;
ele = this.toArray(ele);
var i = ele.length;
while (i--) {
if (!isUndefined(ele[i].parentNode)) {
if (recursive) {
this.destroy(ele[i]);
continue;
}
ele[i].parentNode.removeChild(ele[i]);
}
}
},
destroy: function(el) {
if (isUndefined(el)) return;
var trash = this.create('DIV');
trash.appendChild(el);
trash.innerHTML = '';
},
toNode: function(text) {
if (!isString(text)) return text;
return this.create(text);
},
create: function (tag) {
return DOC.createElement(tag.toUpperCase());
},
frag: function(str) {
var frag = DOC.createDocumentFragment();
var temp = this.create('DIV');
temp.innerHTML = str;
while (temp.firstChild) {
frag.appendChild(temp.firstChild);
}
return frag;
},
// TODO: Execution Queue
// Cookies
createCookie: function (name, value, days, domain) {
var expires = '';
var cookie;
domain = domain || WIN.location.host;
if (days) {
var date = new Date();
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
expires = '; expires=' + date.toGMTString();
}
cookie = name + '=' + value + expires + ';';
if (domain) {
cookie += ' domain=.'+ domain +' ;';
}
if (path) {
cookie += 'path='+ path;
}
DOC.cookie = cookie;
},
eraseCookie: function (name) {
this.createCookie(name, '', -1);
},
readCookie: function (c_name) {
if (DOC.cookie.length) {
var c_start = DOC.cookie.indexOf(c_name + "=");
if (c_start !== -1) {
c_start = c_start + c_name.length + 1;
var c_end = DOC.cookie.indexOf(";", c_start);
if (c_end === -1) {
c_end = DOC.cookie.length;
}
return unescape(DOC.cookie.substring(c_start, c_end));
}
}
return null;
},
// Math
getMax: function (array) {
var m = Math;
return m.max.apply(m, array);
},
getMin: function (array) {
var m = Math;
return m.min.apply(m, array);
},
getRandom: function(min, max) {
var m = Math;
if (min) {
return m.floor(m.random() * (max - min + 1)) + min;
} else {
return m.round(m.random()); // 1 or 0
}
},
inArray: function(obj, arr) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
return true;
}
}
return false;
},
isDescendant: function(p, c) {
var node = c.parentNode;
while (!isNull(node)) {
if (node === p) {
return true;
}
node = node.parentNode;
}
return false;
},
sort: function(options) {
options = this.setDefaults({
arr: [],
type: 'alphabetical',
order: 'desc',
property: null,
method: null
}, options);
var $this = this;
var method;
switch(options.type) {
case 'alphabetical':
method = function(a, b) {
var A = a.toLowerCase();
var B = b.toLowerCase();
if (options.order === 'asc') {
if (A < B) { return -1; }
else if (A > B) { return 1; }
else { return 0; }
} else {
if (A > B) { return -1; }
else if (A < B) { return 1; }
else { return 0; }
}
};
break;
case 'numerical':
if (options.order === 'asc') {
method = function(a, b) { return a - b; };
} else {
method = function(a, b) { return b - a; };
}
break;
case 'random':
method = function() {
return Math.round(Math.random()) - 0.5;
};
break;
}
return options.arr.sort(method);
},
animate: function (el, options) {
var $this = this;
options = this.setDefaults({
property: 'width',
from: $this.style(el, options.property),
to: '0px',
duration: 200,
easing: function(pos) {
return (-Math.cos(pos * Math.PI) / 2) + 0.5;
},
- callback: $this.noop
+ callback: noop
}, options);
var fromNum = parseFloat(options.from);
var fromUnit = getUnit(options.from);
var toNum = parseFloat(options.to);
var toUnit = getUnit(options.to) || fromUnit;
var interval;
var start = +new Date();
var finish = start + options.duration;
function interpolate(source, target, pos) {
return (source + (target - source) * pos).toFixed(3);
}
function getUnit(prop){
return prop.toString().replace(/^[\-\d\.]+/,'') || '';
}
interval = setInterval(function() {
var time = +new Date();
var pos = time > finish ? 1 : (time-start) / options.duration;
var interpolation = interpolate(fromNum, toNum, options.easing(pos));
$this.style(el, options.property, interpolation + toUnit);
if (time > finish) {
clearInterval(interval);
options.callback();
}
}, 10);
},
fadeIn: function(el, duration, callback) {
this.fade(el, duration, 1, callback);
},
fadeOut: function(el, duration, callback) {
this.fade(el, duration, 0, callback);
},
fade: function(el, duration, to, callback) {
- callback = callback || this.noop;
+ callback = callback || noop;
this.animate(el, {
property: 'opacity',
to: to,
duration: duration,
callback: callback
});
},
// Ajax
getUrlVars: function () {
var vars = [];
var hash;
var hashes = WIN.location.href.slice(WIN.location.href.indexOf('?') + 1).split('&');
var hashlen = hashes.length;
for (var i = 0; i < hashlen; ++i) {
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars;
},
serialize: function(obj) {
var viableNodes = ['input', 'select', 'textarea'];
var viableNodesLen = viableNodes.length;
var rawChildren = [];
var formChildren = [];
var returnObject = {};
var nodeList = [];
for (var i = 0; i < viableNodesLen; ++i) {
nodeList = obj.getElementsByTagName(viableNodes[i]);
var nodeListLen = nodeList.length;
for (var j = 0; j < nodeListLen; ++j) {
rawChildren.push(nodeList[j]);
}
}
// build list of viable form elements
var rawChildrenLen = rawChildren.length;
for (var k = 0; k < rawChildrenLen; ++k) {
var currentNode = rawChildren[k];
switch(rawChildren[k].nodeName.toLowerCase()) {
case 'input':
switch(currentNode.type) {
case 'text':
case 'hidden':
case 'password':
formChildren.push(currentNode);
break;
case 'radio':
case 'checkbox':
if (currentNode.checked) {
formChildren.push(currentNode);
}
break;
}
break;
case 'select':
case 'textarea':
formChildren.push(currentNode);
break;
}
}
//build object of the name-value pairs
var formChildrenLen = formChildren.length;
for (var m = 0; m < formChildrenLen; ++m) {
var currentChild = formChildren[m];
if (!returnObject.hasOwnProperty(currentChild.name)) {
returnObject[currentChild.name] = currentChild.value;
} else {
if (typeof returnObject[currentChild.name] === 'string') {
returnObject[currentChild.name] = [returnObject[currentChild.name], currentChild.value.toString()];
} else {
returnObject[currentChild.name].push(currentChild.value.toString());
}
}
}
return returnObject;
},
- formatParams: function (obj) {
- if (isNull(obj)) {return '';}
- var q = [];
- var encode = encodeURIComponent;
- for (var prop in obj) {
- if (obj.hasOwnProperty(prop)) {
- q.push( encode(prop) +'='+ encode(obj[prop]) );
- }
- }
- return q.join('&');
- },
+ formatParams: formatParams,
setDefaults: function(defaults, options) {
if (!options) {
options = defaults;
} else {
for (var index in defaults) {
if (isUndefined(options[index])) {
options[index] = defaults[index];
}
}
}
return options;
},
- parse: function(str, type) {
- if (str === '') return;
- type = type || 'json';
- var result;
- switch (type.toLowerCase()) {
- case 'xml':
- if (WIN.DOMParser) {
- var parser = new DOMParser();
- return parser.parseFromString(str, 'text/xml');
- } else { // ie
- var xmlDoc = new ActiveXObject('Microsoft.XMLDOM');
- xmlDoc.async = 'false';
- xmlDoc.loadXML(str);
- return xmlDoc;
- }
- break;
- case 'json':
- if (JSON.parse) {
- return JSON.parse(str);
- }
- var string = '(?:\"' + RXP.oneChar + '*\")';
- var jsonToken = new RegExp(
- '(?:false|true|null|[\\{\\}\\[\\]]'
- + '|' + RXP.number
- + '|' + string
- + ')', 'g');
- var escapes = {
- '"': '"',
- '/': '/',
- '\\': '\\',
- 'b': '\b',
- 'f': '\f',
- 'n': '\n',
- 'r': '\r',
- 't': '\t'
- };
- function unescapeOne(_, ch, hex) {
- return ch ? escapes[ch] : String.fromCharCode(parseInt(hex, 16));
- }
-
- var toks = str.match(jsonToken);
- var tok = toks[0];
- var topLevelPrimitive = false;
- if ('{' === tok) {
- result = {};
- } else if ('[' === tok) {
- result = [];
- } else {
- result = [];
- topLevelPrimitive = true;
- }
- var key;
- var stack = [result];
- for (var i = 1 - topLevelPrimitive, n = toks.length; i < n; ++i) {
- tok = toks[i];
- var cont;
- switch (tok.charCodeAt(0)) {
- case 0x22: // '"'
- tok = tok.substring(1, tok.length - 1);
- if (tok.indexOf('\\') !== -1) {
- tok = tok.replace(RXP.jsonEscapeSeq, unescapeOne);
- }
- cont = stack[0];
- if (!key) {
- if (cont instanceof Array) {
- key = cont.length;
- } else {
- key = tok || ''; // Use as key for next value seen.
- break;
- }
- }
- cont[key] = tok;
- key = void 0;
- break;
- case 0x5b: // '['
- cont = stack[0];
- stack.unshift(cont[key || cont.length] = []);
- key = void 0;
- break;
- case 0x5d: // ']'
- stack.shift();
- break;
- case 0x66: // 'f'
- cont = stack[0];
- cont[key || cont.length] = false;
- key = void 0;
- break;
- case 0x6e: // 'n'
- cont = stack[0];
- cont[key || cont.length] = null;
- key = void 0;
- break;
- case 0x74: // 't'
- cont = stack[0];
- cont[key || cont.length] = true;
- key = void 0;
- break;
- case 0x7b: // '{'
- cont = stack[0];
- stack.unshift(cont[key || cont.length] = {});
- key = void 0;
- break;
- case 0x7d: // '}'
- stack.shift();
- break;
- default: // sign or digit
- cont = stack[0];
- cont[key || cont.length] = +(tok);
- key = void 0;
- break;
- }
- }
- if (topLevelPrimitive) {
- if (stack.length !== 1) { throw new Error(); }
- result = result[0];
- } else {
- if (stack.length) { throw new Error(); }
- }
- break;
- }
- return result;
- },
+ parse: parse,
addScript: function(url, id) {
- var $this = this;
var script = this.create('script');
script.type = 'text/javascript';
script.src = url || '#';
script.id = id || 'awesome-script'; // id to remove
- this.append(script, $this.getTag('head')[0]);
+ this.append(script, this.getTag('head')[0]);
return true;
},
ajax: function(options) {
- var $this = this;
options = this.setDefaults({
url: null,
data: null, // key:val
dataType: null,
type: 'post',
disguise: false,
requestId: null,
- beforeSend: $this.noop,
- sendPrepared: $this.noop,
- afterSend: $this.noop,
- complete: $this.noop,
- failure: $this.noop
+ beforeSend: noop,
+ sendPrepared: noop,
+ afterSend: noop,
+ complete: noop,
+ failure: noop
}, options);
var MSxml = 'Msxml2.XMLHTTP';
// init
switch (options.type.toUpperCase()) {
case 'POST':
- this.postRequest(options);
+ postRequest(options);
break;
case 'JSONP':
this.addScript(options.url, options.requestId || 'awesome-jsonp');
break;
default:
- this.getRequest(options);
- }
- },
- openRequest: function(options, method) {
- var req = this.getHttpRequest();
- if (isNull(req)) return;
- var $this = this;
- var d = new Date();
- var aborted = 'abort';
-
- req.open(method, options.url, true);
-
- if (method === 'POST') {
- req.setRequestHeader('Content-Type', 'application/x-www-form-urlencoded');
- }
- if (!options.disguise) {
- req.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
+ getRequest(options);
}
- req.setRequestHeader('X-Request-Id', d.getTime());
-
- req.onreadystatechange = function(e) {
- var data = '';
-
- switch (req.readyState) {
- case 0:
- options.beforeSend();
- break;
- case 1:
- options.sendPrepared();
- break;
- case 2:
- options.afterSend();
- break;
- case 4:
-
- if (!isNull(options.dataType)) {
- try {
- data = $this.parse(req.responseText, options.dataType);
- } catch (erD) { data = aborted; }
- } else {
- try {
- data = req.responseText;
- } catch (erT) { data = aborted; }
- }
-
- if (data !== aborted && req.status >= 200 && req.status < 300) {
- options.complete(data);
- } else if (data !== aborted && req.status === 0) { // file:/// ajax
- options.complete(data);
- } else {
- options.failure(data);
- }
- break;
- }
- };
- return req;
- },
- postRequest: function(options) {
- var req = this.openRequest(options, 'POST');
- req.send(this.formatParams(options.data));
- return req;
- },
- getRequest: function(options) {
- var req = this.openRequest(options, 'GET');
- req.send('');
- return req;
- },
- getHttpRequest: function() {
- if (typeof XMLHttpRequest !== 'undefined')
- return new XMLHttpRequest();
- try {
- return new ActiveXObject(MSxml +'.6.0');
- } catch(e1) {}
- try {
- return new ActiveXObject(MSxml +'.3.0');
- } catch(e2) {}
- try {
- return new ActiveXObject(MSxml);
- } catch(e3) {}
- try {
- return new ActiveXObject('Microsoft.XMLHTTP');
- } catch(e4) {}
}
};
}(window, document));
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.