Added first version of IR importer/exporter
authorMoritz Kroll <Moritz.Kroll@gmx.de>
Wed, 4 Feb 2009 09:42:57 +0000 (09:42 +0000)
committerMoritz Kroll <Moritz.Kroll@gmx.de>
Wed, 4 Feb 2009 09:42:57 +0000 (09:42 +0000)
[r25422]

28 files changed:
include/libfirm/firm.h
include/libfirm/irio.h [new file with mode: 0644]
ir/ir/irio.c [new file with mode: 0644]
scripts/gen_ir_io.py [new file with mode: 0755]
scripts/ir_spec.py [new file with mode: 0755]
scripts/jinja2/__init__.py [new file with mode: 0644]
scripts/jinja2/_ipysupport.py [new file with mode: 0644]
scripts/jinja2/_speedups.c [new file with mode: 0644]
scripts/jinja2/bccache.py [new file with mode: 0644]
scripts/jinja2/compiler.py [new file with mode: 0644]
scripts/jinja2/constants.py [new file with mode: 0644]
scripts/jinja2/debug.py [new file with mode: 0644]
scripts/jinja2/defaults.py [new file with mode: 0644]
scripts/jinja2/environment.py [new file with mode: 0644]
scripts/jinja2/exceptions.py [new file with mode: 0644]
scripts/jinja2/ext.py [new file with mode: 0644]
scripts/jinja2/filters.py [new file with mode: 0644]
scripts/jinja2/lexer.py [new file with mode: 0644]
scripts/jinja2/loaders.py [new file with mode: 0644]
scripts/jinja2/nodes.py [new file with mode: 0644]
scripts/jinja2/optimizer.py [new file with mode: 0644]
scripts/jinja2/parser.py [new file with mode: 0644]
scripts/jinja2/runtime.py [new file with mode: 0644]
scripts/jinja2/sandbox.py [new file with mode: 0644]
scripts/jinja2/tests.py [new file with mode: 0644]
scripts/jinja2/utils.py [new file with mode: 0644]
scripts/jinja2/visitor.py [new file with mode: 0644]
vc2005/libfirm.vcproj

index e7c78da..823e2cf 100644 (file)
@@ -116,6 +116,7 @@ extern "C" {
 #include "firm_ycomp.h"    /* ycomp debugging support */
 
 #include "irdump.h"
+#include "irio.h"
 #include "irprintf.h"
 #include "irvrfy.h"
 
diff --git a/include/libfirm/irio.h b/include/libfirm/irio.h
new file mode 100644 (file)
index 0000000..89ed8b8
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief   Import/export textual representation of firm.
+ * @author  Moritz Kroll
+ * @version $Id$
+ */
+#ifndef FIRM_IR_IRIO_H
+#define FIRM_IR_IRIO_H
+
+#include <stdio.h>
+
+#include "firm_types.h"
+
+/**
+ * Exports the given ir graph to the given file in a textual form.
+ *
+ * @param irg       the ir graph
+ * @param filename  the name of the resulting file
+ *
+ * Exports the type graph used by the given graph and the graph itself.
+ */
+void ir_export_irg(ir_graph *irg, const char *filename);
+
+/**
+ * Imports the data stored in the given file.
+ *
+ * @param filename  the name of the file
+ *
+ * Imports any type graphs and ir graphs contained in the file.
+ */
+void ir_import(const char *filename);
+
+#endif
diff --git a/ir/ir/irio.c b/ir/ir/irio.c
new file mode 100644 (file)
index 0000000..99ff27f
--- /dev/null
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (C) 1995-2009 University of Karlsruhe.  All right reserved.
+ *
+ * This file is part of libFirm.
+ *
+ * This file may be distributed and/or modified under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation and appearing in the file LICENSE.GPL included in the
+ * packaging of this file.
+ *
+ * Licensees holding valid libFirm Professional Edition licenses may use
+ * this file in accordance with the libFirm Commercial License.
+ * Agreement provided with the Software.
+ *
+ * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
+ * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE.
+ */
+
+/**
+ * @file
+ * @brief   Write textual representation of firm to file.
+ * @author  Moritz Kroll
+ * @version $Id$
+ */
+#include "config.h"
+
+#include <string.h>
+
+#include "irio.h"
+
+#include "irprog.h"
+#include "irgraph_t.h"
+#include "ircons.h"
+#include "irgmod.h"
+#include "irflag_t.h"
+#include "irgwalk.h"
+#include "tv.h"
+#include "array.h"
+#include "error.h"
+#include "adt/set.h"
+
+#define LEXERROR ((unsigned) ~0)
+
+typedef struct io_env
+{
+       FILE *file;
+       set *idset;               /**< id_entry set, which maps from file ids to new Firm elements */
+       int ignoreblocks;
+       int line, col;
+       ir_type **fixedtypes;
+} io_env_t;
+
+typedef enum typetag_t
+{
+       tt_iro,
+       tt_tpo,
+       tt_align,
+       tt_allocation,
+       tt_peculiarity,
+       tt_pin_state,
+       tt_type_state,
+       tt_variability,
+       tt_visibility,
+       tt_volatility
+} typetag_t;
+
+typedef struct lex_entry
+{
+       const char *str;
+       typetag_t   typetag;
+       unsigned    code;
+} lex_entry;
+
+typedef struct id_entry
+{
+       long id;
+       void *elem;
+} id_entry;
+
+/** A set of lex_entry elements. */
+static set *lexset;
+
+
+static unsigned hash(const char *str, int len)
+{
+       return str[0] * 27893 ^ str[len-1] * 81 ^ str[len >> 1];
+}
+
+static int lex_cmp(const void *elt, const void *key, size_t size)
+{
+       const lex_entry *entry = (const lex_entry *) elt;
+       const lex_entry *keyentry = (const lex_entry *) key;
+       (void) size;
+       return strcmp(entry->str, keyentry->str);
+}
+
+static int id_cmp(const void *elt, const void *key, size_t size)
+{
+       const id_entry *entry = (const id_entry *) elt;
+       const id_entry *keyentry = (const id_entry *) key;
+       (void) size;
+       return entry->id - keyentry->id;
+}
+
+/** Initializes the lexer. May be called more than once without problems. */
+static void init_lexer(void)
+{
+       lex_entry key;
+
+       /* Only initialize once */
+       if(lexset != NULL) return;
+
+       lexset = new_set(lex_cmp, 32);
+
+#define INSERT(s, tt, cod)                                       \
+       key.str = (s);                                               \
+       key.typetag = (tt);                                          \
+       key.code = (cod);                                            \
+       set_insert(lexset, &key, sizeof(key), hash(s, sizeof(s)-1))
+
+#define INSERTENUM(tt, e) INSERT(#e, tt, e)
+
+       INSERT("primitive", tt_tpo, tpo_primitive);
+       INSERT("method", tt_tpo, tpo_method);
+       INSERT("array", tt_tpo, tpo_array);
+       INSERT("struct", tt_tpo, tpo_struct);
+       INSERT("Unknown", tt_tpo, tpo_unknown);
+
+#include "gen_irio_lex.inl"
+
+       INSERTENUM(tt_align, align_non_aligned);
+       INSERTENUM(tt_align, align_is_aligned);
+
+       INSERTENUM(tt_allocation, allocation_automatic);
+       INSERTENUM(tt_allocation, allocation_parameter);
+       INSERTENUM(tt_allocation, allocation_dynamic);
+       INSERTENUM(tt_allocation, allocation_static);
+
+       INSERTENUM(tt_pin_state, op_pin_state_floats);
+       INSERTENUM(tt_pin_state, op_pin_state_pinned);
+       INSERTENUM(tt_pin_state, op_pin_state_exc_pinned);
+       INSERTENUM(tt_pin_state, op_pin_state_mem_pinned);
+
+       INSERTENUM(tt_type_state, layout_undefined);
+       INSERTENUM(tt_type_state, layout_fixed);
+
+       INSERTENUM(tt_variability, variability_uninitialized);
+       INSERTENUM(tt_variability, variability_initialized);
+       INSERTENUM(tt_variability, variability_part_constant);
+       INSERTENUM(tt_variability, variability_constant);
+
+       INSERTENUM(tt_visibility, visibility_local);
+       INSERTENUM(tt_visibility, visibility_external_visible);
+       INSERTENUM(tt_visibility, visibility_external_allocated);
+
+       INSERTENUM(tt_volatility, volatility_non_volatile);
+       INSERTENUM(tt_volatility, volatility_is_volatile);
+
+       INSERTENUM(tt_peculiarity, peculiarity_description);
+       INSERTENUM(tt_peculiarity, peculiarity_inherited);
+       INSERTENUM(tt_peculiarity, peculiarity_existent);
+
+#undef INSERTENUM
+#undef INSERT
+}
+
+/** Returns the according enum entry for the given string and tag, or LEXERROR if none was found. */
+static unsigned lex(const char *str, typetag_t typetag)
+{
+       lex_entry key, *entry;
+
+       key.str = str;
+
+       entry = set_find(lexset, &key, sizeof(key), hash(str, strlen(str)));
+       if (entry && entry->typetag == typetag) {
+               return entry->code;
+       }
+       return LEXERROR;
+}
+
+static void *get_id(io_env_t *env, long id)
+{
+       id_entry key, *entry;
+       key.id = id;
+
+       entry = set_find(env->idset, &key, sizeof(key), (unsigned) id);
+       return entry ? entry->elem : NULL;
+}
+
+static void set_id(io_env_t *env, long id, void *elem)
+{
+       id_entry key;
+       key.id = id;
+       key.elem = elem;
+       set_insert(env->idset, &key, sizeof(key), (unsigned) id);
+}
+
+static void write_mode(io_env_t *env, ir_mode *mode)
+{
+       fputs(get_mode_name(mode), env->file);
+       fputc(' ', env->file);
+}
+
+static void write_pinned(io_env_t *env, ir_node *irn)
+{
+       fputs(get_op_pin_state_name(get_irn_pinned(irn)), env->file);
+       fputc(' ', env->file);
+}
+
+static void write_volatility(io_env_t *env, ir_node *irn)
+{
+       ir_volatility vol;
+
+       if(is_Load(irn)) vol = get_Load_volatility(irn);
+       else if(is_Store(irn)) vol = get_Store_volatility(irn);
+       else assert(0 && "Invalid optype for write_volatility");
+
+       fputs(get_volatility_name(vol), env->file);
+       fputc(' ', env->file);
+}
+
+static void write_align(io_env_t *env, ir_node *irn)
+{
+       ir_align align;
+
+       if(is_Load(irn)) align = get_Load_align(irn);
+       else if(is_Store(irn)) align = get_Store_align(irn);
+       else assert(0 && "Invalid optype for write_align");
+
+       fputs(get_align_name(align), env->file);
+       fputc(' ', env->file);
+}
+
+static void export_type(io_env_t *env, ir_type *tp)
+{
+       FILE *f = env->file;
+       int i;
+       fprintf(f, "\ttype %ld %s \"%s\" %u %u %s %s ",
+                       get_type_nr(tp),
+                       get_type_tpop_name(tp),
+                       get_type_name(tp),
+                       get_type_size_bytes(tp),
+                       get_type_alignment_bytes(tp),
+                       get_type_state_name(get_type_state(tp)),
+                       get_visibility_name(get_type_visibility(tp)));
+
+       switch(get_type_tpop_code(tp))
+       {
+               case tpo_array:
+               {
+                       int n = get_array_n_dimensions(tp);
+                       fprintf(f, "%i %ld ", n, get_type_nr(get_array_element_type(tp)));
+                       for(i = 0; i < n; i++)
+                       {
+                               ir_node *lower = get_array_lower_bound(tp, i);
+                               ir_node *upper = get_array_upper_bound(tp, i);
+
+                               if(is_Const(lower)) fprintf(f, "%ld ", get_tarval_long(get_Const_tarval(lower)));
+                               else panic("Lower array bound is not constant");
+
+                               if(is_Const(upper)) fprintf(f, "%ld ", get_tarval_long(get_Const_tarval(upper)));
+                               else panic("Upper array bound is not constant");
+                       }
+                       break;
+               }
+
+               case tpo_method:
+               {
+                       int nparams = get_method_n_params(tp);
+                       int nresults = get_method_n_ress(tp);
+                       fprintf(f, "%i %i ", nparams, nresults);
+                       for(i = 0; i < nparams; i++)
+                               fprintf(f, "%ld ", get_type_nr(get_method_param_type(tp, i)));
+                       for(i = 0; i < nresults; i++)
+                               fprintf(f, "%ld ", get_type_nr(get_method_res_type(tp, i)));
+                       break;
+               }
+
+               case tpo_primitive:
+               {
+                       write_mode(env, get_type_mode(tp));
+                       break;
+               }
+
+               case tpo_struct:
+                       break;
+
+               case tpo_class:
+                       // TODO: inheritance stuff not supported yet
+                       printf("Inheritance of classes not supported yet!\n");
+                       break;
+
+               case tpo_unknown:
+                       break;
+
+               default:
+                       printf("export_type: Unknown type code \"%s\".\n", get_type_tpop_name(tp));
+                       break;
+       }
+       fputc('\n', f);
+}
+
+static void export_entity(io_env_t *env, ir_entity *ent)
+{
+       ir_type *owner = get_entity_owner(ent);
+       fprintf(env->file, "\tentity %ld \"%s\" %ld %ld %d %d %s %s %s %s %s\n",
+                       get_entity_nr(ent),
+                       get_entity_name(ent),
+                       get_type_nr(get_entity_type(ent)),
+                       get_type_nr(owner),
+                       get_entity_offset(ent),
+                       (int) get_entity_offset_bits_remainder(ent),
+                       get_allocation_name(get_entity_allocation(ent)),
+                       get_visibility_name(get_entity_visibility(ent)),
+                       get_variability_name(get_entity_variability(ent)),
+                       get_peculiarity_name(get_entity_peculiarity(ent)),
+                       get_volatility_name(get_entity_volatility(ent)));
+
+       // TODO: inheritance stuff for class entities not supported yet
+       if(is_Class_type(owner) && owner != get_glob_type())
+               printf("Inheritance of class entities not supported yet!\n");
+}
+
+static void export_type_or_ent(type_or_ent tore, void *ctx)
+{
+       io_env_t *env = (io_env_t *) ctx;
+
+       switch(get_kind(tore.ent))
+       {
+               case k_entity:
+                       export_entity(env, tore.ent);
+                       break;
+
+               case k_type:
+                       export_type(env, tore.typ);
+                       break;
+
+               default:
+                       printf("export_type_or_ent: Unknown type or entity.\n");
+                       break;
+       }
+}
+
+static void export_node(ir_node *irn, void *ctx)
+{
+       io_env_t *env = (io_env_t *) ctx;
+       int i, n;
+       unsigned opcode = get_irn_opcode(irn);
+       char buf[1024];
+
+       if(env->ignoreblocks && opcode == iro_Block) return;
+
+       n = get_irn_arity(irn);
+
+       fprintf(env->file, "\n\t%s %ld [ ", get_irn_opname(irn), get_irn_node_nr(irn));
+
+       for(i = -1; i < n; i++)
+       {
+               ir_node *pred = get_irn_n(irn, i);
+               if(!pred)
+                       fputs("-1 ", env->file);
+               else
+                       fprintf(env->file, "%ld ", get_irn_node_nr(pred));
+       }
+
+       fprintf(env->file, "] { ");
+
+       switch(opcode)
+       {
+               #include "gen_irio_export.inl"
+       }
+       fputc('}', env->file);
+}
+
+/** Exports the given irg to the given file. */
+void ir_export_irg(ir_graph *irg, const char *filename)
+{
+       io_env_t env;
+
+       env.file = fopen(filename, "wt");
+       if(!env.file)
+       {
+               perror(filename);
+               return;
+       }
+
+       fputs("typegraph {\n", env.file);
+
+       type_walk_irg(irg, NULL, export_type_or_ent, &env);
+
+       fprintf(env.file, "}\n\nirg %ld {", get_entity_nr(get_irg_entity(irg)));
+
+       env.ignoreblocks = 0;
+       irg_block_walk_graph(irg, NULL, export_node, &env);
+
+       env.ignoreblocks = 1;
+       irg_walk_anchors(irg, NULL, export_node, &env);
+
+       fputs("\n}\n", env.file);
+
+       fclose(env.file);
+}
+
+static int read_c(io_env_t *env)
+{
+       int ch = fgetc(env->file);
+       switch(ch)
+       {
+               case '\t':
+                       env->col += 4;
+                       break;
+
+               case '\n':
+                       env->col = 0;
+                       env->line++;
+                       break;
+
+               default:
+                       env->col++;
+                       break;
+       }
+       return ch;
+}
+
+/** Returns the first non-whitespace character or EOF. **/
+static int skip_ws(io_env_t *env)
+{
+       while(1)
+       {
+               int ch = read_c(env);
+               switch(ch)
+               {
+                       case ' ':
+                       case '\t':
+                       case '\n':
+                       case '\r':
+                               break;
+
+                       default:
+                               return ch;
+               }
+       }
+}
+
+static void skip_to(io_env_t *env, char to_ch)
+{
+       int ch;
+       do
+       {
+               ch = read_c(env);
+       }
+       while(ch != to_ch && ch != EOF);
+}
+
+static int expect_char(io_env_t *env, char ch)
+{
+       int curch = skip_ws(env);
+       if(curch != ch)
+       {
+               printf("Unexpected char '%c', expected '%c' in line %i:%i\n", curch, ch, env->line, env->col);
+               return 0;
+       }
+       return 1;
+}
+
+#define EXPECT(c) if(expect_char(env, (c))) {} else return 0
+#define EXPECT_OR_EXIT(c) if(expect_char(env, (c))) {} else exit(1)
+
+inline static const char *read_str_to(io_env_t *env, char *buf, size_t bufsize)
+{
+       size_t i;
+       for(i = 0; i < bufsize - 1; i++)
+       {
+               int ch = read_c(env);
+               if(ch == EOF) break;
+               switch(ch)
+               {
+                       case ' ':
+                       case '\t':
+                       case '\n':
+                       case '\r':
+                               if(i != 0)
+                                       goto endofword;
+                               i--;    // skip whitespace
+                               break;
+
+                       default:
+                               buf[i] = ch;
+                               break;
+               }
+       }
+endofword:
+       buf[i] = 0;
+       return buf;
+}
+
+static const char *read_str(io_env_t *env)
+{
+       static char buf[1024];
+       return read_str_to(env, buf, sizeof(buf));
+}
+
+static const char *read_qstr_to(io_env_t *env, char *buf, size_t bufsize)
+{
+       size_t i;
+       EXPECT_OR_EXIT('\"');
+       for(i = 0; i < bufsize - 1; i++)
+       {
+               int ch = read_c(env);
+               if(ch == EOF)
+               {
+                       printf("Unexpected end of quoted string!\n");
+                       exit(1);
+               }
+               if(ch == '\"') break;
+
+               buf[i] = ch;
+       }
+       if(i == bufsize - 1)
+       {
+               printf("Quoted string too long!\n");
+               exit(1);
+       }
+       buf[i] = 0;
+       return buf;
+}
+
+static long read_long2(io_env_t *env, char **endptr)
+{
+       static char buf[1024];
+       return strtol(read_str_to(env, buf, sizeof(buf)), endptr, 0);
+}
+
+static long read_long(io_env_t *env)
+{
+       return read_long2(env, NULL);
+}
+
+static ir_node *get_node_or_null(io_env_t *env, long nodenr)
+{
+       ir_node *node = (ir_node *) get_id(env, nodenr);
+       if(node && node->kind != k_ir_node)
+       {
+               panic("Irn ID %ld collides with something else in line %i:%i\n", nodenr, env->line, env->col);
+       }
+       return node;
+}
+
+static ir_node *get_node(io_env_t *env, long nodenr)
+{
+       ir_node *node = get_node_or_null(env, nodenr);
+       if(!node)
+               panic("Unknown node: %ld in line %i:%i\n", nodenr, env->line, env->col);
+
+       return node;
+}
+
+static ir_node *get_node_or_dummy(io_env_t *env, long nodenr)
+{
+       ir_node *node = get_node_or_null(env, nodenr);
+       if(!node)
+       {
+               node = new_Dummy(mode_X);
+               set_id(env, nodenr, node);
+       }
+       return node;
+}
+
+static ir_type *get_type(io_env_t *env, long typenr)
+{
+       ir_type *type = (ir_type *) get_id(env, typenr);
+       if(!type)
+       {
+               panic("Unknown type: %ld in line %i:%i\n", typenr, env->line, env->col);
+       }
+       else if(type->kind != k_type)
+       {
+               panic("Type ID %ld collides with something else in line %i:%i\n", typenr, env->line, env->col);
+       }
+       return type;
+}
+
+static ir_type *read_type(io_env_t *env)
+{
+       return get_type(env, read_long(env));
+}
+
+static ir_entity *get_entity(io_env_t *env, long entnr)
+{
+       ir_entity *entity = (ir_entity *) get_id(env, entnr);
+       if(!entity)
+       {
+               printf("Unknown entity: %ld in line %i:%i\n", entnr, env->line, env->col);
+               exit(1);
+       }
+       else if(entity->kind != k_entity)
+       {
+               panic("Entity ID %ld collides with something else in line %i:%i\n", entnr, env->line, env->col);
+       }
+       return entity;
+}
+
+static ir_entity *read_entity(io_env_t *env)
+{
+       return get_entity(env, read_long(env));
+}
+
+static ir_mode *read_mode(io_env_t *env)
+{
+       static char buf[128];
+       int i, n;
+
+       read_str_to(env, buf, sizeof(buf));
+
+       n = get_irp_n_modes();
+       for(i = 0; i < n; i++)
+       {
+               ir_mode *mode = get_irp_mode(i);
+               if(!strcmp(buf, get_mode_name(mode)))
+                       return mode;
+       }
+
+       printf("Unknown mode \"%s\" in line %i:%i\n", buf, env->line, env->col);
+       return mode_ANY;
+}
+
+static const char *get_typetag_name(typetag_t typetag)
+{
+       switch(typetag)
+       {
+               case tt_iro:         return "opcode";
+               case tt_tpo:         return "type";
+               case tt_align:       return "align";
+               case tt_allocation:  return "allocation";
+               case tt_peculiarity: return "peculiarity";
+               case tt_pin_state:   return "pin state";
+               case tt_type_state:  return "type state";
+               case tt_variability: return "variability";
+               case tt_visibility:  return "visibility";
+               case tt_volatility:  return "volatility";
+               default: return "<UNKNOWN>";
+       }
+}
+
+static unsigned read_enum(io_env_t *env, typetag_t typetag)
+{
+       static char buf[128];
+       unsigned code = lex(read_str_to(env, buf, sizeof(buf)), typetag);
+       if(code != LEXERROR) return code;
+
+       printf("Invalid %s: \"%s\" in %i:%i\n", get_typetag_name(typetag), buf, env->line, env->col);
+       return 0;
+}
+
+#define read_align(env)       ((ir_align)       read_enum(env, tt_align))
+#define read_allocation(env)  ((ir_allocation)  read_enum(env, tt_allocation))
+#define read_peculiarity(env) ((ir_peculiarity) read_enum(env, tt_peculiarity))
+#define read_pinned(env)      ((op_pin_state)   read_enum(env, tt_pin_state))
+#define read_type_state(env)  ((ir_type_state)  read_enum(env, tt_type_state))
+#define read_variability(env) ((ir_variability) read_enum(env, tt_variability))
+#define read_visibility(env)  ((ir_visibility)  read_enum(env, tt_visibility))
+#define read_volatility(env)  ((ir_volatility)  read_enum(env, tt_volatility))
+
+static tarval *read_tv(io_env_t *env)
+{
+       static char buf[128];
+       ir_mode *tvmode = read_mode(env);
+       read_str_to(env, buf, sizeof(buf));
+       return new_tarval_from_str(buf, strlen(buf), tvmode);
+}
+
+/** Reads a type description and remembers it by its id. */
+static void import_type(io_env_t *env)
+{
+       char           buf[1024];
+       int            i;
+       ir_type       *type;
+       long           typenr = read_long(env);
+       const char    *tpop   = read_str(env);
+       const char    *name   = read_qstr_to(env, buf, sizeof(buf));
+       unsigned       size   = (unsigned) read_long(env);
+       unsigned       align  = (unsigned) read_long(env);
+       ir_type_state  state  = read_type_state(env);
+       ir_visibility  vis    = read_visibility(env);
+
+       ident         *id     = new_id_from_str(name);
+
+       switch(lex(tpop, tt_tpo))
+       {
+               case tpo_primitive:
+               {
+                       ir_mode *mode = read_mode(env);
+                       type = new_type_primitive(id, mode);
+                       break;
+               }
+
+               case tpo_method:
+               {
+                       int nparams  = (int) read_long(env);
+                       int nresults = (int) read_long(env);
+
+                       type = new_type_method(id, nparams, nresults);
+
+                       for(i = 0; i < nparams; i++)
+                       {
+                               long     typenr = read_long(env);
+                               ir_type *paramtype = get_type(env, typenr);
+
+                               set_method_param_type(type, i, paramtype);
+                       }
+                       for(i = 0; i < nresults; i++)
+                       {
+                               long typenr = read_long(env);
+                               ir_type *restype = get_type(env, typenr);
+
+                               set_method_res_type(type, i, restype);
+                       }
+                       break;
+               }
+
+               case tpo_array:
+               {
+                       int ndims = (int) read_long(env);
+                       long elemtypenr = read_long(env);
+                       ir_type *elemtype = get_type(env, elemtypenr);
+
+                       type = new_type_array(id, ndims, elemtype);
+                       for(i = 0; i < ndims; i++)
+                       {
+                               long lowerbound = read_long(env);
+                               long upperbound = read_long(env);
+                               set_array_bounds_int(type, i, lowerbound, upperbound);
+                       }
+                       set_type_size_bytes(type, size);
+                       break;
+               }
+
+               case tpo_class:
+                       type = new_type_class(id);
+                       set_type_size_bytes(type, size);
+                       break;
+
+               case tpo_struct:
+                       type = new_type_struct(id);
+                       set_type_size_bytes(type, size);
+                       break;
+
+               case tpo_union:
+                       type = new_type_union(id);
+                       set_type_size_bytes(type, size);
+                       break;
+
+               case tpo_unknown:
+                       return;   // ignore unknown type
+
+               default:
+                       if(typenr != 0)  // ignore global type
+                               printf("Unknown type kind: \"%s\" in line %i:%i\n", tpop, env->line, env->col);
+                       skip_to(env, '\n');
+                       return;
+       }
+
+       set_type_alignment_bytes(type, align);
+       set_type_visibility(type, vis);
+
+       if(state == layout_fixed)
+               ARR_APP1(ir_type *, env->fixedtypes, type);
+
+       set_id(env, typenr, type);
+       printf("Insert type %s %ld\n", name, typenr);
+}
+
+/** Reads an entity description and remembers it by its id. */
+static void import_entity(io_env_t *env)
+{
+       char          buf[1024];
+       long          entnr       = read_long(env);
+       const char   *name        = read_qstr_to(env, buf, sizeof(buf));
+       long          typenr      = read_long(env);
+       long          ownertypenr = read_long(env);
+
+       ir_type   *type      = get_type(env, typenr);
+       ir_type   *ownertype = !ownertypenr ? get_glob_type() : get_type(env, ownertypenr);
+       ir_entity *entity    = new_entity(ownertype, new_id_from_str(name), type);
+
+       set_entity_offset     (entity, (int) read_long(env));
+       set_entity_offset_bits_remainder(entity, (unsigned char) read_long(env));
+       set_entity_allocation (entity, read_allocation(env));
+       set_entity_visibility (entity, read_visibility(env));
+       set_entity_variability(entity, read_variability(env));
+       set_entity_peculiarity(entity, read_peculiarity(env));
+       set_entity_volatility (entity, read_volatility(env));
+
+       set_id(env, entnr, entity);
+       printf("Insert entity %s %ld\n", name, entnr);
+}
+
+/** Parses the whole type graph. */
+static int parse_typegraph(io_env_t *env)
+{
+       const char *kind;
+       long curfpos;
+
+       EXPECT('{');
+
+       curfpos = ftell(env->file);
+
+       // parse all types first
+       while(1)
+       {
+               kind = read_str(env);
+               if(kind[0] == '}' && !kind[1]) break;
+
+               if(!strcmp(kind, "type"))
+                       import_type(env);
+               else
+                       skip_to(env, '\n');
+       }
+
+       // now parse rest
+       fseek(env->file, curfpos, SEEK_SET);
+       while(1)
+       {
+               kind = read_str(env);
+               if(kind[0] == '}' && !kind[1]) break;
+
+               if(!strcmp(kind, "type"))
+                       skip_to(env, '\n');
+               else if(!strcmp(kind, "entity"))
+                       import_entity(env);
+               else
+               {
+                       printf("Type graph element not supported yet: \"%s\"\n", kind);
+                       skip_to(env, '\n');
+               }
+       }
+       return 1;
+}
+
+static int read_node_header(io_env_t *env, long *nodenr, long **preds, const char **nodename)
+{
+       int numpreds;
+       *nodename = read_str(env);
+       if((*nodename)[0] == '}' && !(*nodename)[1]) return -1;  // end-of-graph
+
+       *nodenr = read_long(env);
+
+       ARR_RESIZE(ir_node *, *preds, 0);
+
+       EXPECT('[');
+       for(numpreds = 0; !feof(env->file); numpreds++)
+       {
+               char *endptr;
+               ARR_APP1(long, *preds, read_long2(env, &endptr));
+               if(*endptr == ']') break;
+       }
+       return numpreds;
+}
+
+/** Parses an IRG. */
+static int parse_graph(io_env_t *env)
+{
+       long       *preds = NEW_ARR_F(long, 16);
+       ir_node   **prednodes = NEW_ARR_F(ir_node *, 16);
+       int         i, numpreds, ret = 1;
+       long        nodenr;
+       const char *nodename;
+       ir_node    *node, *newnode;
+
+       current_ir_graph = new_ir_graph(get_entity(env, read_long(env)), 0);
+
+       EXPECT('{');
+
+       while(1)
+       {
+               numpreds = read_node_header(env, &nodenr, &preds, &nodename);
+               if(numpreds == -1) break;  // end-of-graph
+               if(!numpreds)
+               {
+                       printf("Node %s %ld is missing predecessors!", nodename, nodenr);
+                       ret = 0;
+                       break;
+               }
+
+               ARR_RESIZE(ir_node *, prednodes, numpreds);
+               for(i = 0; i < numpreds - 1; i++)
+                       prednodes[i] = get_node_or_dummy(env, preds[i + 1]);
+
+               node = get_node_or_null(env, nodenr);
+               newnode = NULL;
+
+               EXPECT('{');
+
+               switch(lex(nodename, tt_iro))
+               {
+                       case iro_End:
+                       {
+                               ir_node *newendblock = get_node(env, preds[0]);
+                               newnode = get_irg_end(current_ir_graph);
+                               exchange(get_nodes_block(newnode), newendblock);
+                               break;
+                       }
+
+                       case iro_Start:
+                       {
+                               ir_node *newstartblock = get_node(env, preds[0]);
+                               newnode = get_irg_start(current_ir_graph);
+                               exchange(get_nodes_block(newnode), newstartblock);
+                               break;
+                       }
+
+                       case iro_Block:
+                       {
+                               if(preds[0] != nodenr)
+                               {
+                                       printf("Invalid block: preds[0] != nodenr (%ld != %ld)\n",
+                                               preds[0], nodenr);
+                                       ret = 0;
+                                       goto endloop;
+                               }
+
+                               newnode = new_Block(numpreds - 1, prednodes);
+                               break;
+                       }
+
+                       case iro_Anchor:
+                               newnode = current_ir_graph->anchor;
+                               for(i = 0; i < numpreds - 1; i++)
+                                       set_irn_n(newnode, i, prednodes[i]);
+                               set_irn_n(newnode, -1, get_node(env, preds[0]));
+                               break;
+
+                       case iro_SymConst:
+                       {
+                               long entnr = read_long(env);
+                               union symconst_symbol sym;
+                               sym.entity_p = get_entity(env, entnr);
+                               newnode = new_SymConst(mode_P, sym, symconst_addr_ent);
+                               break;
+                       }
+
+                       #include "gen_irio_import.inl"
+
+                       default:
+                               goto notsupported;
+               }
+
+               EXPECT('}');
+
+               if(!newnode)
+               {
+notsupported:
+                       printf("Node type not supported yet: %s in line %i:%i\n", nodename, env->line, env->col);
+                       assert(0 && "Invalid node type");
+               }
+
+               if(node)
+                       exchange(node, newnode);
+               /* Always update hash entry to avoid more uses of id nodes */
+               set_id(env, nodenr, newnode);
+               printf("Insert %s %ld\n", nodename, nodenr);
+       }
+
+endloop:
+       DEL_ARR_F(preds);
+       DEL_ARR_F(prednodes);
+
+       return ret;
+}
+
+/** Imports an previously exported textual representation of an (maybe partial) irp */
+void ir_import(const char *filename)
+{
+       int oldoptimize = get_optimize();
+       firm_verification_t oldver = get_node_verification_mode();
+       io_env_t ioenv;
+       io_env_t *env = &ioenv;
+       int i, n;
+
+       init_lexer();
+
+       memset(env, 0, sizeof(*env));
+       env->idset = new_set(id_cmp, 128);
+       env->fixedtypes = NEW_ARR_F(ir_type *, 0);
+
+       env->file = fopen(filename, "rt");
+       if(!env->file)
+       {
+               perror(filename);
+               exit(1);
+       }
+
+       set_optimize(0);
+       do_node_verification(FIRM_VERIFICATION_OFF);
+
+       while(1)
+       {
+               const char *str = read_str(env);
+               if(!*str) break;
+               if(!strcmp(str, "typegraph"))
+               {
+                       if(!parse_typegraph(env)) break;
+               }
+               else if(!strcmp(str, "irg"))
+               {
+                       if(!parse_graph(env)) break;
+               }
+       }
+
+       n = ARR_LEN(env->fixedtypes);
+       for(i = 0; i < n; i++)
+               set_type_state(env->fixedtypes[i], layout_fixed);
+
+       DEL_ARR_F(env->fixedtypes);
+
+       del_set(env->idset);
+
+       irp_finalize_cons();
+
+       do_node_verification(oldver);
+       set_optimize(oldoptimize);
+
+       fclose(env->file);
+}
diff --git a/scripts/gen_ir_io.py b/scripts/gen_ir_io.py
new file mode 100755 (executable)
index 0000000..6ace69f
--- /dev/null
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+import sys
+from jinja2 import Environment, Template
+import ir_spec
+
+def format_args(arglist):
+       #argstrings = map(lambda arg : arg["name"], arglist)
+       #return ", ".join(argstrings)
+       s = ", ".join(arglist)
+       if len(s) == 0:
+         return "";
+       return ", " + s;
+
+def format_ifnset(string, node, key):
+       if key in node:
+               return ""
+       return string
+
+def format_block(node):
+       if node.get("knownBlock"):
+               return ""
+       else:
+               return ", get_node(env, preds[0])"
+
+env = Environment()
+env.filters['args']   = format_args
+env.filters['ifnset'] = format_ifnset
+env.filters['block']  = format_block
+
+def get_io_type(type, attrname, nodename):
+       if type == "tarval*":
+               importcmd = "tarval *%s = read_tv(env);" % attrname
+               exportcmd = """
+                       write_mode(env, get_tarval_mode(%(val)s));
+                       tarval_snprintf(buf, sizeof(buf), %(val)s);
+                       fprintf(env->file, "%%s ", buf);"""
+       elif type == "ir_mode*":
+               importcmd = "ir_mode *%s = read_mode(env);" % attrname
+               exportcmd = "write_mode(env, %(val)s);"
+       elif type == "ir_entity*":
+               importcmd = "ir_entity *%s = read_entity(env);" % attrname
+               exportcmd = """fprintf(env->file, "%%ld ", get_entity_nr(%(val)s));"""
+       elif type == "ir_type*":
+               importcmd = "ir_type *%s = read_type(env);" % attrname
+               exportcmd = """fprintf(env->file, "%%ld ", get_type_nr(%(val)s));"""
+       elif type == "long" and nodename == "Proj":
+               importcmd = "long %s = read_long(env);" % attrname
+               exportcmd = """fprintf(env->file, "%%ld ", %(val)s);"""
+       elif type == "pn_Cmp" or type == "ir_where_alloc":
+               importcmd = "%s %s = (%s) read_long(env);" % (type, attrname, type)
+               exportcmd = """fprintf(env->file, "%%ld ", (long) %(val)s);"""
+       elif type == "cons_flags" and nodename == "Store":
+               importcmd = """ir_cons_flags %s = read_pinned(env)
+                               | read_volatility(env)
+                               | read_align(env);""" % attrname
+               exportcmd = """write_pinned(env, irn);
+                       write_volatility(env, irn);
+                       write_align(env, irn);"""
+       elif type == "cons_flags" and nodename == "Load":
+               importcmd = """ir_cons_flags %s = read_pinned(env)
+                               | read_volatility(env)
+                               | read_align(env);""" % attrname
+               exportcmd = """write_pinned(env, irn);
+                       write_volatility(env, irn);
+                       write_align(env, irn);"""
+       else:
+               print "UNKNOWN TYPE: %s" % type
+               importcmd = """// BAD: %s %s
+                       %s %s = (%s) 0;""" % (type, attrname, type, attrname, type)
+               exportcmd = "// BAD: %s" % type
+       return (importcmd, exportcmd)
+
+"""    if type == "ir_type*":
+               java_type    = "firm.Type"
+               wrap_type    = "Pointer"
+               to_wrapper   = "%s.ptr"
+               from_wrapper = "firm.Type.createWrapper(%s)"
+       elif type == "ir_mode*":
+               java_type    = "firm.Mode"
+               wrap_type    = "Pointer"
+               to_wrapper   = "%s.ptr"
+               from_wrapper = "new firm.Mode(%s)"
+       elif type == "tarval*":
+               java_type    = "firm.TargetValue"
+               wrap_type    = "Pointer"
+               to_wrapper   = "%s.ptr"
+               from_wrapper = "new firm.TargetValue(%s)"
+       elif type == "pn_Cmp":
+               java_type    = "int"
+               wrap_type    = "int"
+               to_wrapper   = "%s"
+               from_wrapper = "%s"
+       elif type == "long":
+               java_type    = "int"
+               wrap_type    = "com.sun.jna.NativeLong"
+               to_wrapper   = "new com.sun.jna.NativeLong(%s)"
+               from_wrapper = "%s.intValue()"
+       elif type == "cons_flags":
+               java_type    = "firm.bindings.binding_ircons.ir_cons_flags"
+               wrap_type    = "int"
+               to_wrapper   = "%s.val"
+               from_wrapper = "firm.bindings.binding_ircons.ir_cons_flags.getEnum(%s)"
+       elif type == "ir_where_alloc":
+               java_type    = "firm.bindings.binding_ircons.ir_where_alloc"
+               wrap_type    = "int"
+               to_wrapper   = "%s.val"
+               from_wrapper = "firm.bindings.binding_ircons.ir_where_alloc.getEnum(%s)"
+       elif type == "ir_entity*":
+               java_type    = "firm.Entity"
+               wrap_type    = "Pointer"
+               to_wrapper   = "%s.ptr"
+               from_wrapper = "new firm.Entity(%s)"
+       else:
+               print "UNKNOWN TYPE"
+               java_type    = "BAD"
+               wrap_type    = "BAD"
+               to_wrapper   = "BAD"
+               from_wrapper = "BAD"
+       return (java_type,wrap_type,to_wrapper,from_wrapper)"""
+
+def prepare_attr(nodename, attr):
+       (importcmd,exportcmd) = get_io_type(attr["type"], attr["name"], nodename)
+       attr["importcmd"] = importcmd
+       attr["exportcmd"] = exportcmd % {"val": "get_%s_%s(irn)" % (nodename, attr["name"])}
+
+def preprocess_node(nodename, node):
+       if "is_a" in node:
+               parent = ir_spec.nodes[node["is_a"]]
+               node["ins"] = parent["ins"]
+               if "outs" in parent:
+                       node["outs"] = parent["outs"]
+       if "ins" not in node:
+               node["ins"] = []
+       if "outs" in node:
+               node["mode"] = "mode_T"
+       if "arity" not in node:
+               node["arity"] = len(node["ins"])
+       if "attrs" not in node:
+               node["attrs"] = []
+       if "constructor_args" not in node:
+               node["constructor_args"] = []
+
+       # construct node arguments
+       arguments = [ ]
+       i = 0
+       for input in node["ins"]:
+               arguments.append("prednodes[%i]" % i)
+               i += 1
+
+       if node["arity"] == "variable" or node["arity"] == "dynamic":
+               arguments.append("numpreds - %i" % (i + 1))
+               arguments.append("prednodes + %i" % i)
+
+       if "mode" not in node:
+               arguments.append("mode")
+
+       for attr in node["attrs"]:
+               prepare_attr(nodename, attr)
+               arguments.append(attr["name"])
+
+       for arg in node["constructor_args"]:
+               prepare_attr(nodename, arg)
+               arguments.append(arg["name"])
+
+       node["arguments"] = arguments
+
+export_attrs_template = env.from_string('''
+       case iro_{{nodename}}:
+               {{"write_mode(env, get_irn_mode(irn));"|ifnset(node,"mode")}}
+               {% for attr in node.attrs %}{{attr.exportcmd}}
+               {% endfor %}
+               {% for attr in node.constructor_args %}{{attr.exportcmd}}
+               {% endfor %}break;''')
+
+import_attrs_template = env.from_string('''
+       case iro_{{nodename}}:
+       {
+               {{"ir_mode *mode = read_mode(env);"|ifnset(node,"mode")}}
+               {% for attr in node.attrs %}{{attr.importcmd}}
+               {% endfor %}
+               {% for attr in node.constructor_args %}{{attr.importcmd}}
+               {% endfor %}newnode = new_r_{{nodename}}(current_ir_graph{{node|block}}{{node["arguments"]|args}});
+               break;
+       }
+''')
+
+def main(argv):
+       """the main function"""
+
+       if len(argv) < 2:
+               print "usage: %s destdirectory" % argv[0]
+               sys.exit(1)
+
+       gendir = argv[1]
+
+       file = open(gendir + "/gen_irio_export.inl", "w");
+       for nodename, node in ir_spec.nodes.iteritems():
+               preprocess_node(nodename, node)
+               if not "abstract" in node:
+                       file.write(export_attrs_template.render(vars()))
+       file.write("\n")
+       file.close()
+
+       file = open(gendir + "/gen_irio_import.inl", "w");
+       for nodename, node in ir_spec.nodes.iteritems():
+               if not "abstract" in node and nodename != "Start" and nodename != "End" and nodename != "Anchor" and nodename != "SymConst" and nodename != "Block":
+                       file.write(import_attrs_template.render(vars()))
+       # TODO: SymConst
+       file.write("\n")
+       file.close()
+
+       file = open(gendir + "/gen_irio_lex.inl", "w");
+       for nodename, node in ir_spec.nodes.iteritems():
+               if not "abstract" in node:
+                       file.write("\tINSERT(\"" + nodename + "\", tt_iro, iro_" + nodename + ");\n");
+       file.close()
+
+if __name__ == "__main__":
+       main(sys.argv)
diff --git a/scripts/ir_spec.py b/scripts/ir_spec.py
new file mode 100755 (executable)
index 0000000..d070782
--- /dev/null
@@ -0,0 +1,333 @@
+nodes = dict(
+Start = dict(
+       mode       = "mode_T",
+       op_flags   = "cfopcode",
+       state      = "pinned",
+       knownBlock = True,
+       noconstr   = True,
+),
+
+End = dict(
+       mode       = "mode_X",
+       op_flags   = "cfopcode",
+       state      = "pinned",
+       arity      = "dynamic",
+       knownBlock = True,
+       noconstr   = True,
+),
+
+Phi = dict(
+       noconstr = True,
+       state    = "pinned",
+       arity    = "variable",
+),
+
+Jmp = dict(
+       mode     = "mode_X",
+       op_flags = "cfopcode",
+       state    = "pinned",
+       ins      = [],
+),
+
+IJmp = dict(
+       mode     = "mode_X",
+       op_flags = "cfopcode",
+       state    = "pinned",
+       ins      = [ "target" ],
+),
+
+Const = dict(
+       mode       = "",
+       knownBlock = True,
+       attrs      = [
+               dict(
+                       type = "tarval*",
+                       name = "tarval",
+               )
+       ],
+),
+
+Block = dict(
+       mode   = "mode_BB",
+       knownBlock = True,
+       noconstr   = True,
+       arity      = "variable",
+       java_add   = '''
+       public void addPred(Node node) {
+               binding_cons.add_immBlock_pred(ptr, node.ptr);
+       }
+
+       public void mature() {
+               binding_cons.mature_immBlock(ptr);
+       }
+
+       @Override
+       public Block getBlock() {
+               return null;
+       }
+
+       public boolean blockVisited() {
+               return 0 != binding.Block_block_visited(ptr);
+       }
+
+       public void markBlockVisited() {
+               binding.mark_Block_block_visited(ptr);
+       }''',
+),
+
+SymConst = dict(
+       mode       = "mode_P",
+       knownBlock = True,
+       noconstr   = True,
+       attrs      = [
+               dict(
+                       type = "ir_entity*",
+                       name = "entity"
+               )
+       ],
+),
+
+# SymConst
+
+Call = dict(
+       ins      = [ "mem", "ptr" ],
+       arity    = "variable",
+       outs     = [ "M_regular", "X_regular", "X_except", "T_result", "M_except", "P_value_res_base" ],
+       attrs    = [
+               dict(
+                       type = "ir_type*",
+                       name = "type"
+               )
+       ]
+),
+
+binop = dict(
+       abstract = True,
+       ins      = [ "left", "right" ]
+),
+
+Add = dict(
+       is_a     = "binop"
+),
+
+Carry = dict(
+       is_a     = "binop"
+),
+
+Sub = dict(
+       is_a     = "binop"
+),
+
+Borrow = dict(
+       is_a     = "binop"
+),
+
+Mul = dict(
+       is_a     = "binop"
+),
+
+Mulh = dict(
+       is_a     = "binop"
+),
+
+Abs = dict(
+       is_a     = "unop"
+),
+
+And = dict(
+       is_a     = "binop"
+),
+
+Or = dict(
+       is_a     = "binop"
+),
+
+Eor = dict(
+       is_a     = "binop"
+),
+
+Not = dict(
+       is_a     = "unop"
+),
+
+Shl = dict(
+       is_a     = "binop"
+),
+
+Shr = dict(
+       is_a     = "binop"
+),
+
+Shrs = dict(
+       is_a     = "binop"
+),
+
+Rotl = dict(
+       is_a     = "binop"
+),
+
+Load = dict(
+       ins      = [ "mem", "ptr" ],
+       outs     = [ "M", "X_regular", "X_except", "res" ],
+       attrs    = [
+               dict(
+                       type = "ir_mode*",
+                       name = "mode",
+                       java_name = "load_mode"
+               ),
+       ],
+       constructor_args = [
+               dict(
+                       type = "cons_flags",
+                       name = "flags",
+               ),
+       ],
+),
+
+Store = dict(
+       ins      = [ "mem", "ptr", "value" ],
+       outs     = [ "M", "X_regular", "X_except" ],
+       constructor_args = [
+               dict(
+                       type = "cons_flags",
+                       name = "flags",
+               ),
+       ],
+),
+
+Anchor = dict(
+       mode       = "mode_ANY",
+       ins        = [ "end_block", "start_block", "end", "start",
+                      "end_reg", "end_except", "initial_exec",
+                                  "frame", "tls", "initial_mem", "args",
+                                  "bad", "no_mem" ],
+       knownBlock = True,
+       noconstr   = True
+),
+
+NoMem = dict(
+       mode       = "mode_M",
+       knownBlock = True,
+),
+
+Bad = dict(
+       mode       = "mode_Bad",
+       knownBlock = True,
+),
+
+Pin = dict(
+       ins      = [ "op" ],
+       mode     = "get_irn_mode(op);"
+),
+
+Proj = dict(
+       ins      = [ "pred" ],
+       attrs    = [
+               dict(
+                       type = "long",
+                       name = "proj"
+               )
+       ]
+),
+
+Sel = dict(
+       ins    = [ "mem", "ptr" ],
+       arity  = "variable",
+       mode   = "mode_P",
+       attrs    = [
+               dict(
+                       type = "ir_entity*",
+                       name = "entity"
+               )
+       ]
+),
+
+Sync = dict(
+       mode     = "mode_M",
+       arity    = "dynamic"
+),
+
+Tuple = dict(
+       arity    = "variable",
+       mode     = "mode_T",
+),
+
+Unknown = dict(
+       knownBlock = True
+),
+
+Confirm = dict(
+       ins      = [ "value", "bound" ],
+       block    = "get_nodes_block(value)",
+       mode     = "get_irn_mode(value)",
+       attrs    = [
+               dict(
+                       name = "cmp",
+                       type = "pn_Cmp"
+               ),
+       ],
+),
+
+Return = dict(
+       ins      = [ "mem" ],
+       arity    = "variable",
+       mode     = "mode_X"
+),
+
+unop = dict(
+       abstract = True,
+       ins      = [ "op" ]
+),
+
+Minus = dict(
+       is_a     = "unop"
+),
+
+Mux = dict(
+       ins      = [ "sel", "false", "true" ]
+),
+
+Cond = dict(
+       ins      = [ "selector" ],
+       outs     = [ "false", "true" ],
+),
+
+Cmp = dict(
+       is_a     = "binop",
+       outs     = [ "False", "Eq", "Lt", "Le", "Gt", "Ge", "Lg", "Leg", "Uo", "Ue", "Ul", "Ule", "Ug", "Uge", "Ne", "True" ],
+),
+
+Conv = dict(
+       is_a     = "unop"
+),
+
+Alloc = dict(
+       ins   = [ "mem", "size" ],
+       outs  = [ "M", "X_regular", "X_except", "res" ],
+       attrs = [
+               dict(
+                       name = "type",
+                       type = "ir_type*"
+               ),
+               dict(
+                       name = "where",
+                       type = "ir_where_alloc"
+               )
+       ]
+),
+
+Free = dict(
+       ins   = [ "mem", "ptr", "size" ],
+       mode  = "mode_M",
+       attrs = [
+               dict(
+                       name = "type",
+                       type = "ir_type*"
+               ),
+               dict(
+                       name = "where",
+                       type = "ir_where_alloc"
+               )
+       ]
+),
+)
diff --git a/scripts/jinja2/__init__.py b/scripts/jinja2/__init__.py
new file mode 100644 (file)
index 0000000..95b2d5b
--- /dev/null
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2
+    ~~~~~~
+
+    Jinja2 is a template engine written in pure Python.  It provides a
+    Django inspired non-XML syntax but supports inline expressions and
+    an optional sandboxed environment.
+
+    Nutshell
+    --------
+
+    Here a small example of a Jinja2 template::
+
+        {% extends 'base.html' %}
+        {% block title %}Memberlist{% endblock %}
+        {% block content %}
+          <ul>
+          {% for user in users %}
+            <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+          {% endfor %}
+          </ul>
+        {% endblock %}
+
+
+    :copyright: 2008 by Armin Ronacher, Christoph Hack.
+    :license: BSD, see LICENSE for more details.
+"""
+__docformat__ = 'restructuredtext en'
+try:
+    __version__ = __import__('pkg_resources') \
+        .get_distribution('Jinja2').version
+except:
+    __version__ = 'unknown'
+
+# high level interface
+from jinja2.environment import Environment, Template
+
+# loaders
+from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
+     DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader
+
+# bytecode caches
+from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
+     MemcachedBytecodeCache
+
+# undefined types
+from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined
+
+# exceptions
+from jinja2.exceptions import TemplateError, UndefinedError, \
+     TemplateNotFound, TemplateSyntaxError, TemplateAssertionError
+
+# decorators and public utilities
+from jinja2.filters import environmentfilter, contextfilter
+from jinja2.utils import Markup, escape, clear_caches, \
+     environmentfunction, contextfunction, is_undefined
+
+__all__ = [
+    'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
+    'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
+    'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
+    'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
+    'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
+    'TemplateSyntaxError', 'TemplateAssertionError', 'environmentfilter',
+    'contextfilter', 'Markup', 'escape', 'environmentfunction',
+    'contextfunction', 'clear_caches', 'is_undefined'
+]
diff --git a/scripts/jinja2/_ipysupport.py b/scripts/jinja2/_ipysupport.py
new file mode 100644 (file)
index 0000000..22ae823
--- /dev/null
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2._ipysupport
+    ~~~~~~~~~~~~~~~~~~
+
+    IronPython support library.  This library exports functionality from
+    the CLR to Python that is normally available in the standard library.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+from System import DateTime
+from System.IO import Path, File, FileInfo
+
+
+epoch = DateTime(1970, 1, 1)
+
+
+class _PathModule(object):
+    """A minimal path module."""
+
+    sep = str(Path.DirectorySeparatorChar)
+    altsep = str(Path.AltDirectorySeparatorChar)
+    pardir = '..'
+
+    def join(self, path, *args):
+        args = list(args[::-1])
+        while args:
+            path = Path.Combine(path, args.pop())
+        return path
+
+    def isfile(self, filename):
+        return File.Exists(filename)
+
+    def getmtime(self, filename):
+        info = FileInfo(filename)
+        return int((info.LastAccessTimeUtc - epoch).TotalSeconds)
+
+
+path = _PathModule()
diff --git a/scripts/jinja2/_speedups.c b/scripts/jinja2/_speedups.c
new file mode 100644 (file)
index 0000000..40bb8c0
--- /dev/null
@@ -0,0 +1,221 @@
+/**
+ * jinja2._speedups
+ * ~~~~~~~~~~~~~~~~
+ *
+ * This module implements functions for automatic escaping in C for better
+ * performance.  Additionally it defines a `tb_set_next` function to patch the
+ * debug traceback.  If the speedups module is not compiled a ctypes
+ * implementation of `tb_set_next` and Python implementations of the other
+ * functions are used.
+ *
+ * :copyright: 2008 by Armin Ronacher, Mickaël Guérin.
+ * :license: BSD.
+ */
+
+#include <Python.h>
+
+#define ESCAPED_CHARS_TABLE_SIZE 63
+#define UNICHR(x) (((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL))->str);
+
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+typedef int Py_ssize_t;
+#define PY_SSIZE_T_MAX INT_MAX
+#define PY_SSIZE_T_MIN INT_MIN
+#endif
+
+
+static PyObject* markup;
+static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
+static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
+
+static int
+init_constants(void)
+{
+       PyObject *module;
+       /* happing of characters to replace */
+       escaped_chars_repl['"'] = UNICHR("&#34;");
+       escaped_chars_repl['\''] = UNICHR("&#39;");
+       escaped_chars_repl['&'] = UNICHR("&amp;");
+       escaped_chars_repl['<'] = UNICHR("&lt;");
+       escaped_chars_repl['>'] = UNICHR("&gt;");
+
+       /* lengths of those characters when replaced - 1 */
+       memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
+       escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
+               escaped_chars_delta_len['&'] = 4;
+       escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
+
+       /* import markup type so that we can mark the return value */
+       module = PyImport_ImportModule("jinja2.utils");
+       if (!module)
+               return 0;
+       markup = PyObject_GetAttrString(module, "Markup");
+       Py_DECREF(module);
+
+       return 1;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+       PyUnicodeObject *out;
+       Py_UNICODE *inp = in->str;
+       const Py_UNICODE *inp_end = in->str + in->length;
+       Py_UNICODE *next_escp;
+       Py_UNICODE *outp;
+       Py_ssize_t delta=0, erepl=0, delta_len=0;
+
+       /* First we need to figure out how long the escaped string will be */
+       while (*(inp) || inp < inp_end) {
+               if (*inp < ESCAPED_CHARS_TABLE_SIZE && escaped_chars_delta_len[*inp]) {
+                       delta += escaped_chars_delta_len[*inp];
+                       ++erepl;
+               }
+               ++inp;
+       }
+
+       /* Do we need to escape anything at all? */
+       if (!erepl) {
+               Py_INCREF(in);
+               return (PyObject*)in;
+       }
+
+       out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, in->length + delta);
+       if (!out)
+               return NULL;
+
+       outp = out->str;
+       inp = in->str;
+       while (erepl-- > 0) {
+               /* look for the next substitution */
+               next_escp = inp;
+               while (next_escp < inp_end) {
+                       if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
+                           (delta_len = escaped_chars_delta_len[*next_escp])) {
+                               ++delta_len;
+                               break;
+                       }
+                       ++next_escp;
+               }
+
+               if (next_escp > inp) {
+                       /* copy unescaped chars between inp and next_escp */
+                       Py_UNICODE_COPY(outp, inp, next_escp-inp);
+                       outp += next_escp - inp;
+               }
+
+               /* escape 'next_escp' */
+               Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
+               outp += delta_len;
+
+               inp = next_escp + 1;
+       }
+       if (inp < inp_end)
+               Py_UNICODE_COPY(outp, inp, in->length - (inp - in->str));
+
+       return (PyObject*)out;
+}
+
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+       PyObject *s = NULL, *rv = NULL, *html;
+
+       /* we don't have to escape integers, bools or floats */
+       if (PyInt_CheckExact(text) || PyLong_CheckExact(text) ||
+           PyFloat_CheckExact(text) || PyBool_Check(text) ||
+           text == Py_None)
+               return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+       /* if the object has an __html__ method that performs the escaping */
+       html = PyObject_GetAttrString(text, "__html__");
+       if (html) {
+               rv = PyObject_CallObject(html, NULL);
+               Py_DECREF(html);
+               return rv;
+       }
+
+       /* otherwise make the object unicode if it isn't, then escape */
+       PyErr_Clear();
+       if (!PyUnicode_Check(text)) {
+               PyObject *unicode = PyObject_Unicode(text);
+               if (!unicode)
+                       return NULL;
+               s = escape_unicode((PyUnicodeObject*)unicode);
+               Py_DECREF(unicode);
+       }
+       else
+               s = escape_unicode((PyUnicodeObject*)text);
+
+       /* convert the unicode string into a markup object. */
+       rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+       Py_DECREF(s);
+       return rv;
+}
+
+
+static PyObject*
+soft_unicode(PyObject *self, PyObject *s)
+{
+       if (!PyUnicode_Check(s))
+               return PyObject_Unicode(s);
+       Py_INCREF(s);
+       return s;
+}
+
+
+static PyObject*
+tb_set_next(PyObject *self, PyObject *args)
+{
+       PyTracebackObject *tb, *old;
+       PyObject *next;
+
+       if (!PyArg_ParseTuple(args, "O!O:tb_set_next", &PyTraceBack_Type, &tb, &next))
+               return NULL;
+       if (next == Py_None)
+               next = NULL;
+       else if (!PyTraceBack_Check(next)) {
+               PyErr_SetString(PyExc_TypeError,
+                               "tb_set_next arg 2 must be traceback or None");
+               return NULL;
+       }
+       else
+               Py_INCREF(next);
+
+       old = tb->tb_next;
+       tb->tb_next = (PyTracebackObject*)next;
+       Py_XDECREF(old);
+
+       Py_INCREF(Py_None);
+       return Py_None;
+}
+
+
+static PyMethodDef module_methods[] = {
+       {"escape", (PyCFunction)escape, METH_O,
+        "escape(s) -> markup\n\n"
+        "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
+        "sequences.  Use this if you need to display text that might contain\n"
+        "such characters in HTML.  Marks return value as markup string."},
+       {"soft_unicode", (PyCFunction)soft_unicode, METH_O,
+        "soft_unicode(object) -> string\n\n"
+         "Make a string unicode if it isn't already.  That way a markup\n"
+         "string is not converted back to unicode."},
+       {"tb_set_next", (PyCFunction)tb_set_next, METH_VARARGS,
+        "Set the tb_next member of a traceback object."},
+       {NULL, NULL, 0, NULL}           /* Sentinel */
+};
+
+
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+init_speedups(void)
+{
+       if (!init_constants())
+               return;
+
+       Py_InitModule3("jinja2._speedups", module_methods, "");
+}
diff --git a/scripts/jinja2/bccache.py b/scripts/jinja2/bccache.py
new file mode 100644 (file)
index 0000000..2c57616
--- /dev/null
@@ -0,0 +1,280 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.bccache
+    ~~~~~~~~~~~~~~
+
+    This module implements the bytecode cache system Jinja is optionally
+    using.  This is useful if you have very complex template situations and
+    the compiliation of all those templates slow down your application too
+    much.
+
+    Situations where this is useful are often forking web applications that
+    are initialized on the first request.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+from os import path, listdir
+import marshal
+import tempfile
+import cPickle as pickle
+import fnmatch
+from cStringIO import StringIO
+try:
+    from hashlib import sha1
+except ImportError:
+    from sha import new as sha1
+from jinja2.utils import open_if_exists
+
+
+bc_version = 1
+bc_magic = 'j2' + pickle.dumps(bc_version, 2)
+
+
+class Bucket(object):
+    """Buckets are used to store the bytecode for one template.  It's created
+    and initialized by the bytecode cache and passed to the loading functions.
+
+    The buckets get an internal checksum from the cache assigned and use this
+    to automatically reject outdated cache material.  Individual bytecode
+    cache subclasses don't have to care about cache invalidation.
+    """
+
+    def __init__(self, environment, key, checksum):
+        self.environment = environment
+        self.key = key
+        self.checksum = checksum
+        self.reset()
+
+    def reset(self):
+        """Resets the bucket (unloads the bytecode)."""
+        self.code = None
+
+    def load_bytecode(self, f):
+        """Loads bytecode from a file or file like object."""
+        # make sure the magic header is correct
+        magic = f.read(len(bc_magic))
+        if magic != bc_magic:
+            self.reset()
+            return
+        # the source code of the file changed, we need to reload
+        checksum = pickle.load(f)
+        if self.checksum != checksum:
+            self.reset()
+            return
+        # now load the code.  Because marshal is not able to load
+        # from arbitrary streams we have to work around that
+        if isinstance(f, file):
+            self.code = marshal.load(f)
+        else:
+            self.code = marshal.loads(f.read())
+
+    def write_bytecode(self, f):
+        """Dump the bytecode into the file or file like object passed."""
+        if self.code is None:
+            raise TypeError('can\'t write empty bucket')
+        f.write(bc_magic)
+        pickle.dump(self.checksum, f, 2)
+        if isinstance(f, file):
+            marshal.dump(self.code, f)
+        else:
+            f.write(marshal.dumps(self.code))
+
+    def bytecode_from_string(self, string):
+        """Load bytecode from a string."""
+        self.load_bytecode(StringIO(string))
+
+    def bytecode_to_string(self):
+        """Return the bytecode as string."""
+        out = StringIO()
+        self.write_bytecode(out)
+        return out.getvalue()
+
+
+class BytecodeCache(object):
+    """To implement your own bytecode cache you have to subclass this class
+    and override :meth:`load_bytecode` and :meth:`dump_bytecode`.  Both of
+    these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+    A very basic bytecode cache that saves the bytecode on the file system::
+
+        from os import path
+
+        class MyCache(BytecodeCache):
+
+            def __init__(self, directory):
+                self.directory = directory
+
+            def load_bytecode(self, bucket):
+                filename = path.join(self.directory, bucket.key)
+                if path.exists(filename):
+                    with file(filename, 'rb') as f:
+                        bucket.load_bytecode(f)
+
+            def dump_bytecode(self, bucket):
+                filename = path.join(self.directory, bucket.key)
+                with file(filename, 'wb') as f:
+                    bucket.write_bytecode(f)
+
+    A more advanced version of a filesystem based bytecode cache is part of
+    Jinja2.
+    """
+
+    def load_bytecode(self, bucket):
+        """Subclasses have to override this method to load bytecode into a
+        bucket.  If they are not able to find code in the cache for the
+        bucket, it must not do anything.
+        """
+        raise NotImplementedError()
+
+    def dump_bytecode(self, bucket):
+        """Subclasses have to override this method to write the bytecode
+        from a bucket back to the cache.  If it unable to do so it must not
+        fail silently but raise an exception.
+        """
+        raise NotImplementedError()
+
+    def clear(self):
+        """Clears the cache.  This method is not used by Jinja2 but should be
+        implemented to allow applications to clear the bytecode cache used
+        by a particular environment.
+        """
+
+    def get_cache_key(self, name, filename=None):
+        """Returns the unique hash key for this template name."""
+        hash = sha1(name.encode('utf-8'))
+        if filename is not None:
+            if isinstance(filename, unicode):
+                filename = filename.encode('utf-8')
+            hash.update('|' + filename)
+        return hash.hexdigest()
+
+    def get_source_checksum(self, source):
+        """Returns a checksum for the source."""
+        return sha1(source.encode('utf-8')).hexdigest()
+
+    def get_bucket(self, environment, name, filename, source):
+        """Return a cache bucket for the given template.  All arguments are
+        mandatory but filename may be `None`.
+        """
+        key = self.get_cache_key(name, filename)
+        checksum = self.get_source_checksum(source)
+        bucket = Bucket(environment, key, checksum)
+        self.load_bytecode(bucket)
+        return bucket
+
+    def set_bucket(self, bucket):
+        """Put the bucket into the cache."""
+        self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+    """A bytecode cache that stores bytecode on the filesystem.  It accepts
+    two arguments: The directory where the cache items are stored and a
+    pattern string that is used to build the filename.
+
+    If no directory is specified the system temporary items folder is used.
+
+    The pattern can be used to have multiple separate caches operate on the
+    same directory.  The default pattern is ``'__jinja2_%s.cache'``.  ``%s``
+    is replaced with the cache key.
+
+    >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+    This bytecode cache supports clearing of the cache using the clear method.
+    """
+
+    def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+        if directory is None:
+            directory = tempfile.gettempdir()
+        self.directory = directory
+        self.pattern = pattern
+
+    def _get_cache_filename(self, bucket):
+        return path.join(self.directory, self.pattern % bucket.key)
+
+    def load_bytecode(self, bucket):
+        f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+        if f is not None:
+            try:
+                bucket.load_bytecode(f)
+            finally:
+                f.close()
+
+    def dump_bytecode(self, bucket):
+        f = file(self._get_cache_filename(bucket), 'wb')
+        try:
+            bucket.write_bytecode(f)
+        finally:
+            f.close()
+
+    def clear(self):
+        # imported lazily here because google app-engine doesn't support
+        # write access on the file system and the function does not exist
+        # normally.
+        from os import remove
+        files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+        for filename in files:
+            try:
+                remove(path.join(self.directory, filename))
+            except OSError:
+                pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+    """This class implements a bytecode cache that uses a memcache cache for
+    storing the information.  It does not enforce a specific memcache library
+    (tummy's memcache or cmemcache) but will accept any class that provides
+    the minimal interface required.
+
+    Libraries compatible with this class:
+
+    -   `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
+    -   `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
+    -   `cmemcache <http://gijsbert.org/cmemcache/>`_
+
+    (Unfortunately the django cache interface is not compatible because it
+    does not support storing binary data, only unicode.  You can however pass
+    the underlying cache client to the bytecode cache which is available
+    as `django.core.cache.cache._client`.)
+
+    The minimal interface for the client passed to the constructor is this:
+
+    .. class:: MinimalClientInterface
+
+        .. method:: set(key, value[, timeout])
+
+            Stores the bytecode in the cache.  `value` is a string and
+            `timeout` the timeout of the key.  If timeout is not provided
+            a default timeout or no timeout should be assumed, if it's
+            provided it's an integer with the number of seconds the cache
+            item should exist.
+
+        .. method:: get(key)
+
+            Returns the value for the cache key.  If the item does not
+            exist in the cache the return value must be `None`.
+
+    The other arguments to the constructor are the prefix for all keys that
+    is added before the actual cache key and the timeout for the bytecode in
+    the cache system.  We recommend a high (or no) timeout.
+
+    This bytecode cache does not support clearing of used items in the cache.
+    The clear method is a no-operation function.
+    """
+
+    def __init__(self, client, prefix='jinja2/bytecode/', timeout=None):
+        self.client = client
+        self.prefix = prefix
+        self.timeout = timeout
+
+    def load_bytecode(self, bucket):
+        code = self.client.get(self.prefix + bucket.key)
+        if code is not None:
+            bucket.bytecode_from_string(code)
+
+    def dump_bytecode(self, bucket):
+        args = (self.prefix + bucket.key, bucket.bytecode_to_string())
+        if self.timeout is not None:
+            args += (self.timeout,)
+        self.client.set(*args)
diff --git a/scripts/jinja2/compiler.py b/scripts/jinja2/compiler.py
new file mode 100644 (file)
index 0000000..5074a34
--- /dev/null
@@ -0,0 +1,1443 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.compiler
+    ~~~~~~~~~~~~~~~
+
+    Compiles nodes into python code.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+from cStringIO import StringIO
+from itertools import chain
+from jinja2 import nodes
+from jinja2.visitor import NodeVisitor, NodeTransformer
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import Markup, concat, escape, is_python_keyword
+
+
+operators = {
+    'eq':       '==',
+    'ne':       '!=',
+    'gt':       '>',
+    'gteq':     '>=',
+    'lt':       '<',
+    'lteq':     '<=',
+    'in':       'in',
+    'notin':    'not in'
+}
+
+try:
+    exec '(0 if 0 else 0)'
+except SyntaxError:
+    have_condexpr = False
+else:
+    have_condexpr = True
+
+
+def generate(node, environment, name, filename, stream=None):
+    """Generate the python source for a node tree."""
+    if not isinstance(node, nodes.Template):
+        raise TypeError('Can\'t compile non template nodes')
+    generator = CodeGenerator(environment, name, filename, stream)
+    generator.visit(node)
+    if stream is None:
+        return generator.stream.getvalue()
+
+
+def has_safe_repr(value):
+    """Does the node have a safe representation?"""
+    if value is None or value is NotImplemented or value is Ellipsis:
+        return True
+    if isinstance(value, (bool, int, long, float, complex, basestring,
+                          xrange, Markup)):
+        return True
+    if isinstance(value, (tuple, list, set, frozenset)):
+        for item in value:
+            if not has_safe_repr(item):
+                return False
+        return True
+    elif isinstance(value, dict):
+        for key, value in value.iteritems():
+            if not has_safe_repr(key):
+                return False
+            if not has_safe_repr(value):
+                return False
+        return True
+    return False
+
+
+def find_undeclared(nodes, names):
+    """Check if the names passed are accessed undeclared.  The return value
+    is a set of all the undeclared names from the sequence of names found.
+    """
+    visitor = UndeclaredNameVisitor(names)
+    try:
+        for node in nodes:
+            visitor.visit(node)
+    except VisitorExit:
+        pass
+    return visitor.undeclared
+
+
+class Identifiers(object):
+    """Tracks the status of identifiers in frames."""
+
+    def __init__(self):
+        # variables that are known to be declared (probably from outer
+        # frames or because they are special for the frame)
+        self.declared = set()
+
+        # undeclared variables from outer scopes
+        self.outer_undeclared = set()
+
+        # names that are accessed without being explicitly declared by
+        # this one or any of the outer scopes.  Names can appear both in
+        # declared and undeclared.
+        self.undeclared = set()
+
+        # names that are declared locally
+        self.declared_locally = set()
+
+        # names that are declared by parameters
+        self.declared_parameter = set()
+
+    def add_special(self, name):
+        """Register a special name like `loop`."""
+        self.undeclared.discard(name)
+        self.declared.add(name)
+
+    def is_declared(self, name, local_only=False):
+        """Check if a name is declared in this or an outer scope."""
+        if name in self.declared_locally or name in self.declared_parameter:
+            return True
+        if local_only:
+            return False
+        return name in self.declared
+
+    def find_shadowed(self, extra=()):
+        """Find all the shadowed names.  extra is an iterable of variables
+        that may be defined with `add_special` which may occour scoped.
+        """
+        return (self.declared | self.outer_undeclared) & \
+               (self.declared_locally | self.declared_parameter) | \
+               set(x for x in extra if self.is_declared(x))
+
+
+class Frame(object):
+    """Holds compile time information for us."""
+
+    def __init__(self, parent=None):
+        self.identifiers = Identifiers()
+
+        # a toplevel frame is the root + soft frames such as if conditions.
+        self.toplevel = False
+
+        # the root frame is basically just the outermost frame, so no if
+        # conditions.  This information is used to optimize inheritance
+        # situations.
+        self.rootlevel = False
+
+        # in some dynamic inheritance situations the compiler needs to add
+        # write tests around output statements.
+        self.require_output_check = parent and parent.require_output_check
+
+        # inside some tags we are using a buffer rather than yield statements.
+        # this for example affects {% filter %} or {% macro %}.  If a frame
+        # is buffered this variable points to the name of the list used as
+        # buffer.
+        self.buffer = None
+
+        # the name of the block we're in, otherwise None.
+        self.block = parent and parent.block or None
+
+        # the parent of this frame
+        self.parent = parent
+
+        if parent is not None:
+            self.identifiers.declared.update(
+                parent.identifiers.declared |
+                parent.identifiers.declared_locally |
+                parent.identifiers.declared_parameter |
+                parent.identifiers.undeclared
+            )
+            self.identifiers.outer_undeclared.update(
+                parent.identifiers.undeclared -
+                self.identifiers.declared
+            )
+            self.buffer = parent.buffer
+
+    def copy(self):
+        """Create a copy of the current one."""
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.identifiers = object.__new__(self.identifiers.__class__)
+        rv.identifiers.__dict__.update(self.identifiers.__dict__)
+        return rv
+
+    def inspect(self, nodes, hard_scope=False):
+        """Walk the node and check for identifiers.  If the scope is hard (eg:
+        enforce on a python level) overrides from outer scopes are tracked
+        differently.
+        """
+        visitor = FrameIdentifierVisitor(self.identifiers, hard_scope)
+        for node in nodes:
+            visitor.visit(node)
+
+    def inner(self):
+        """Return an inner frame."""
+        return Frame(self)
+
+    def soft(self):
+        """Return a soft frame.  A soft frame may not be modified as
+        standalone thing as it shares the resources with the frame it
+        was created of, but it's not a rootlevel frame any longer.
+        """
+        rv = self.copy()
+        rv.rootlevel = False
+        return rv
+
+    __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+    """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+    """A visitor that collects filter and test calls."""
+
+    def __init__(self):
+        self.filters = set()
+        self.tests = set()
+
+    def visit_Filter(self, node):
+        self.generic_visit(node)
+        self.filters.add(node.name)
+
+    def visit_Test(self, node):
+        self.generic_visit(node)
+        self.tests.add(node.name)
+
+    def visit_Block(self, node):
+        """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+    """A visitor that checks if a name is accessed without being
+    declared.  This is different from the frame visitor as it will
+    not stop at closure frames.
+    """
+
+    def __init__(self, names):
+        self.names = set(names)
+        self.undeclared = set()
+
+    def visit_Name(self, node):
+        if node.ctx == 'load' and node.name in self.names:
+            self.undeclared.add(node.name)
+            if self.undeclared == self.names:
+                raise VisitorExit()
+        else:
+            self.names.discard(node.name)
+
+    def visit_Block(self, node):
+        """Stop visiting a blocks."""
+
+
+class FrameIdentifierVisitor(NodeVisitor):
+    """A visitor for `Frame.inspect`."""
+
+    def __init__(self, identifiers, hard_scope):
+        self.identifiers = identifiers
+        self.hard_scope = hard_scope
+
+    def visit_Name(self, node):
+        """All assignments to names go through this function."""
+        if node.ctx == 'store':
+            self.identifiers.declared_locally.add(node.name)
+        elif node.ctx == 'param':
+            self.identifiers.declared_parameter.add(node.name)
+        elif node.ctx == 'load' and not \
+             self.identifiers.is_declared(node.name, self.hard_scope):
+            self.identifiers.undeclared.add(node.name)
+
+    def visit_Macro(self, node):
+        self.identifiers.declared_locally.add(node.name)
+
+    def visit_Import(self, node):
+        self.generic_visit(node)
+        self.identifiers.declared_locally.add(node.target)
+
+    def visit_FromImport(self, node):
+        self.generic_visit(node)
+        for name in node.names:
+            if isinstance(name, tuple):
+                self.identifiers.declared_locally.add(name[1])
+            else:
+                self.identifiers.declared_locally.add(name)
+
+    def visit_Assign(self, node):
+        """Visit assignments in the correct order."""
+        self.visit(node.node)
+        self.visit(node.target)
+
+    def visit_For(self, node):
+        """Visiting stops at for blocks.  However the block sequence
+        is visited as part of the outer scope.
+        """
+        self.visit(node.iter)
+
+    def visit_CallBlock(self, node):
+        for child in node.iter_child_nodes(exclude=('body',)):
+            self.visit(child)
+
+    def visit_FilterBlock(self, node):
+        self.visit(node.filter)
+
+    def visit_Block(self, node):
+        """Stop visiting at blocks."""
+
+
+class CompilerExit(Exception):
+    """Raised if the compiler encountered a situation where it just
+    doesn't make sense to further process the code.  Any block that
+    raises such an exception is not further processed.
+    """
+
+
+class CodeGenerator(NodeVisitor):
+
+    def __init__(self, environment, name, filename, stream=None):
+        if stream is None:
+            stream = StringIO()
+        self.environment = environment
+        self.name = name
+        self.filename = filename
+        self.stream = stream
+
+        # aliases for imports
+        self.import_aliases = {}
+
+        # a registry for all blocks.  Because blocks are moved out
+        # into the global python scope they are registered here
+        self.blocks = {}
+
+        # the number of extends statements so far
+        self.extends_so_far = 0
+
+        # some templates have a rootlevel extends.  In this case we
+        # can safely assume that we're a child template and do some
+        # more optimizations.
+        self.has_known_extends = False
+
+        # the current line number
+        self.code_lineno = 1
+
+        # registry of all filters and tests (global, not block local)
+        self.tests = {}
+        self.filters = {}
+
+        # the debug information
+        self.debug_info = []
+        self._write_debug_info = None
+
+        # the number of new lines before the next write()
+        self._new_lines = 0
+
+        # the line number of the last written statement
+        self._last_line = 0
+
+        # true if nothing was written so far.
+        self._first_write = True
+
+        # used by the `temporary_identifier` method to get new
+        # unique, temporary identifier
+        self._last_identifier = 0
+
+        # the current indentation
+        self._indentation = 0
+
+    # -- Various compilation helpers
+
+    def fail(self, msg, lineno):
+        """Fail with a `TemplateAssertionError`."""
+        raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+    def temporary_identifier(self):
+        """Get a new unique identifier."""
+        self._last_identifier += 1
+        return 't_%d' % self._last_identifier
+
+    def buffer(self, frame):
+        """Enable buffering for the frame from that point onwards."""
+        frame.buffer = self.temporary_identifier()
+        self.writeline('%s = []' % frame.buffer)
+
+    def return_buffer_contents(self, frame):
+        """Return the buffer contents of the frame."""
+        if self.environment.autoescape:
+            self.writeline('return Markup(concat(%s))' % frame.buffer)
+        else:
+            self.writeline('return concat(%s)' % frame.buffer)
+
+    def indent(self):
+        """Indent by one."""
+        self._indentation += 1
+
+    def outdent(self, step=1):
+        """Outdent by step."""
+        self._indentation -= step
+
+    def start_write(self, frame, node=None):
+        """Yield or write into the frame buffer."""
+        if frame.buffer is None:
+            self.writeline('yield ', node)
+        else:
+            self.writeline('%s.append(' % frame.buffer, node)
+
+    def end_write(self, frame):
+        """End the writing process started by `start_write`."""
+        if frame.buffer is not None:
+            self.write(')')
+
+    def simple_write(self, s, frame, node=None):
+        """Simple shortcut for start_write + write + end_write."""
+        self.start_write(frame, node)
+        self.write(s)
+        self.end_write(frame)
+
+    def blockvisit(self, nodes, frame):
+        """Visit a list of nodes as block in a frame.  If the current frame
+        is no buffer a dummy ``if 0: yield None`` is written automatically
+        unless the force_generator parameter is set to False.
+        """
+        if frame.buffer is None:
+            self.writeline('if 0: yield None')
+        else:
+            self.writeline('pass')
+        try:
+            for node in nodes:
+                self.visit(node, frame)
+        except CompilerExit:
+            pass
+
+    def write(self, x):
+        """Write a string into the output stream."""
+        if self._new_lines:
+            if not self._first_write:
+                self.stream.write('\n' * self._new_lines)
+                self.code_lineno += self._new_lines
+                if self._write_debug_info is not None:
+                    self.debug_info.append((self._write_debug_info,
+                                            self.code_lineno))
+                    self._write_debug_info = None
+            self._first_write = False
+            self.stream.write('    ' * self._indentation)
+            self._new_lines = 0
+        self.stream.write(x)
+
+    def writeline(self, x, node=None, extra=0):
+        """Combination of newline and write."""
+        self.newline(node, extra)
+        self.write(x)
+
+    def newline(self, node=None, extra=0):
+        """Add one or more newlines before the next write."""
+        self._new_lines = max(self._new_lines, 1 + extra)
+        if node is not None and node.lineno != self._last_line:
+            self._write_debug_info = node.lineno
+            self._last_line = node.lineno
+
+    def signature(self, node, frame, extra_kwargs=None):
+        """Writes a function call to the stream for the current node.
+        A leading comma is added automatically.  The extra keyword
+        arguments may not include python keywords otherwise a syntax
+        error could occour.  The extra keyword arguments should be given
+        as python dict.
+        """
+        # if any of the given keyword arguments is a python keyword
+        # we have to make sure that no invalid call is created.
+        kwarg_workaround = False
+        for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
+            if is_python_keyword(kwarg):
+                kwarg_workaround = True
+                break
+
+        for arg in node.args:
+            self.write(', ')
+            self.visit(arg, frame)
+
+        if not kwarg_workaround:
+            for kwarg in node.kwargs:
+                self.write(', ')
+                self.visit(kwarg, frame)
+            if extra_kwargs is not None:
+                for key, value in extra_kwargs.iteritems():
+                    self.write(', %s=%s' % (key, value))
+        if node.dyn_args:
+            self.write(', *')
+            self.visit(node.dyn_args, frame)
+
+        if kwarg_workaround:
+            if node.dyn_kwargs is not None:
+                self.write(', **dict({')
+            else:
+                self.write(', **{')
+            for kwarg in node.kwargs:
+                self.write('%r: ' % kwarg.key)
+                self.visit(kwarg.value, frame)
+                self.write(', ')
+            if extra_kwargs is not None:
+                for key, value in extra_kwargs.iteritems():
+                    self.write('%r: %s, ' % (key, value))
+            if node.dyn_kwargs is not None:
+                self.write('}, **')
+                self.visit(node.dyn_kwargs, frame)
+                self.write(')')
+            else:
+                self.write('}')
+
+        elif node.dyn_kwargs is not None:
+            self.write(', **')
+            self.visit(node.dyn_kwargs, frame)
+
+    def pull_locals(self, frame):
+        """Pull all the references identifiers into the local scope."""
+        for name in frame.identifiers.undeclared:
+            self.writeline('l_%s = context.resolve(%r)' % (name, name))
+
+    def pull_dependencies(self, nodes):
+        """Pull all the dependencies."""
+        visitor = DependencyFinderVisitor()
+        for node in nodes:
+            visitor.visit(node)
+        for dependency in 'filters', 'tests':
+            mapping = getattr(self, dependency)
+            for name in getattr(visitor, dependency):
+                if name not in mapping:
+                    mapping[name] = self.temporary_identifier()
+                self.writeline('%s = environment.%s[%r]' %
+                               (mapping[name], dependency, name))
+
+    def push_scope(self, frame, extra_vars=()):
+        """This function returns all the shadowed variables in a dict
+        in the form name: alias and will write the required assignments
+        into the current scope.  No indentation takes place.
+
+        This also predefines locally declared variables from the loop
+        body because under some circumstances it may be the case that
+
+        `extra_vars` is passed to `Identifiers.find_shadowed`.
+        """
+        aliases = {}
+        for name in frame.identifiers.find_shadowed(extra_vars):
+            aliases[name] = ident = self.temporary_identifier()
+            self.writeline('%s = l_%s' % (ident, name))
+        to_declare = set()
+        for name in frame.identifiers.declared_locally:
+            if name not in aliases:
+                to_declare.add('l_' + name)
+        if to_declare:
+            self.writeline(' = '.join(to_declare) + ' = missing')
+        return aliases
+
+    def pop_scope(self, aliases, frame):
+        """Restore all aliases and delete unused variables."""
+        for name, alias in aliases.iteritems():
+            self.writeline('l_%s = %s' % (name, alias))
+        to_delete = set()
+        for name in frame.identifiers.declared_locally:
+            if name not in aliases:
+                to_delete.add('l_' + name)
+        if to_delete:
+            self.writeline('del ' + ', '.join(to_delete))
+
+    def function_scoping(self, node, frame, children=None,
+                         find_special=True):
+        """In Jinja a few statements require the help of anonymous
+        functions.  Those are currently macros and call blocks and in
+        the future also recursive loops.  As there is currently
+        technical limitation that doesn't allow reading and writing a
+        variable in a scope where the initial value is coming from an
+        outer scope, this function tries to fall back with a common
+        error message.  Additionally the frame passed is modified so
+        that the argumetns are collected and callers are looked up.
+
+        This will return the modified frame.
+        """
+        # we have to iterate twice over it, make sure that works
+        if children is None:
+            children = node.iter_child_nodes()
+        children = list(children)
+        func_frame = frame.inner()
+        func_frame.inspect(children, hard_scope=True)
+
+        # variables that are undeclared (accessed before declaration) and
+        # declared locally *and* part of an outside scope raise a template
+        # assertion error. Reason: we can't generate reasonable code from
+        # it without aliasing all the variables.  XXX: alias them ^^
+        overriden_closure_vars = (
+            func_frame.identifiers.undeclared &
+            func_frame.identifiers.declared &
+            (func_frame.identifiers.declared_locally |
+             func_frame.identifiers.declared_parameter)
+        )
+        if overriden_closure_vars:
+            self.fail('It\'s not possible to set and access variables '
+                      'derived from an outer scope! (affects: %s' %
+                      ', '.join(sorted(overriden_closure_vars)), node.lineno)
+
+        # remove variables from a closure from the frame's undeclared
+        # identifiers.
+        func_frame.identifiers.undeclared -= (
+            func_frame.identifiers.undeclared &
+            func_frame.identifiers.declared
+        )
+
+        # no special variables for this scope, abort early
+        if not find_special:
+            return func_frame
+
+        func_frame.accesses_kwargs = False
+        func_frame.accesses_varargs = False
+        func_frame.accesses_caller = False
+        func_frame.arguments = args = ['l_' + x.name for x in node.args]
+
+        undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
+
+        if 'caller' in undeclared:
+            func_frame.accesses_caller = True
+            func_frame.identifiers.add_special('caller')
+            args.append('l_caller')
+        if 'kwargs' in undeclared:
+            func_frame.accesses_kwargs = True
+            func_frame.identifiers.add_special('kwargs')
+            args.append('l_kwargs')
+        if 'varargs' in undeclared:
+            func_frame.accesses_varargs = True
+            func_frame.identifiers.add_special('varargs')
+            args.append('l_varargs')
+        return func_frame
+
+    def macro_body(self, node, frame, children=None):
+        """Dump the function def of a macro or call block."""
+        frame = self.function_scoping(node, frame, children)
+        # macros are delayed, they never require output checks
+        frame.require_output_check = False
+        args = frame.arguments
+        self.writeline('def macro(%s):' % ', '.join(args), node)
+        self.indent()
+        self.buffer(frame)
+        self.pull_locals(frame)
+        self.blockvisit(node.body, frame)
+        self.return_buffer_contents(frame)
+        self.outdent()
+        return frame
+
+    def macro_def(self, node, frame):
+        """Dump the macro definition for the def created by macro_body."""
+        arg_tuple = ', '.join(repr(x.name) for x in node.args)
+        name = getattr(node, 'name', None)
+        if len(node.args) == 1:
+            arg_tuple += ','
+        self.write('Macro(environment, macro, %r, (%s), (' %
+                   (name, arg_tuple))
+        for arg in node.defaults:
+            self.visit(arg, frame)
+            self.write(', ')
+        self.write('), %r, %r, %r)' % (
+            bool(frame.accesses_kwargs),
+            bool(frame.accesses_varargs),
+            bool(frame.accesses_caller)
+        ))
+
+    def position(self, node):
+        """Return a human readable position for the node."""
+        rv = 'line %d' % node.lineno
+        if self.name is not None:
+            rv += ' in' + repr(self.name)
+        return rv
+
+    # -- Statement Visitors
+
+    def visit_Template(self, node, frame=None):
+        assert frame is None, 'no root frame allowed'
+        from jinja2.runtime import __all__ as exported
+        self.writeline('from __future__ import division')
+        self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+
+        # do we have an extends tag at all?  If not, we can save some
+        # overhead by just not processing any inheritance code.
+        have_extends = node.find(nodes.Extends) is not None
+
+        # find all blocks
+        for block in node.find_all(nodes.Block):
+            if block.name in self.blocks:
+                self.fail('block %r defined twice' % block.name, block.lineno)
+            self.blocks[block.name] = block
+
+        # find all imports and import them
+        for import_ in node.find_all(nodes.ImportedName):
+            if import_.importname not in self.import_aliases:
+                imp = import_.importname
+                self.import_aliases[imp] = alias = self.temporary_identifier()
+                if '.' in imp:
+                    module, obj = imp.rsplit('.', 1)
+                    self.writeline('from %s import %s as %s' %
+                                   (module, obj, alias))
+                else:
+                    self.writeline('import %s as %s' % (imp, alias))
+
+        # add the load name
+        self.writeline('name = %r' % self.name)
+
+        # generate the root render function.
+        self.writeline('def root(context, environment=environment):', extra=1)
+
+        # process the root
+        frame = Frame()
+        frame.inspect(node.body)
+        frame.toplevel = frame.rootlevel = True
+        frame.require_output_check = have_extends and not self.has_known_extends
+        self.indent()
+        if have_extends:
+            self.writeline('parent_template = None')
+        if 'self' in find_undeclared(node.body, ('self',)):
+            frame.identifiers.add_special('self')
+            self.writeline('l_self = TemplateReference(context)')
+        self.pull_locals(frame)
+        self.pull_dependencies(node.body)
+        self.blockvisit(node.body, frame)
+        self.outdent()
+
+        # make sure that the parent root is called.
+        if have_extends:
+            if not self.has_known_extends:
+                self.indent()
+                self.writeline('if parent_template is not None:')
+            self.indent()
+            self.writeline('for event in parent_template.'
+                           'root_render_func(context):')
+            self.indent()
+            self.writeline('yield event')
+            self.outdent(2 + (not self.has_known_extends))
+
+        # at this point we now have the blocks collected and can visit them too.
+        for name, block in self.blocks.iteritems():
+            block_frame = Frame()
+            block_frame.inspect(block.body)
+            block_frame.block = name
+            self.writeline('def block_%s(context, environment=environment):'
+                           % name, block, 1)
+            self.indent()
+            undeclared = find_undeclared(block.body, ('self', 'super'))
+            if 'self' in undeclared:
+                block_frame.identifiers.add_special('self')
+                self.writeline('l_self = TemplateReference(context)')
+            if 'super' in undeclared:
+                block_frame.identifiers.add_special('super')
+                self.writeline('l_super = context.super(%r, '
+                               'block_%s)' % (name, name))
+            self.pull_locals(block_frame)
+            self.pull_dependencies(block.body)
+            self.blockvisit(block.body, block_frame)
+            self.outdent()
+
+        self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
+                                                   for x in self.blocks),
+                       extra=1)
+
+        # add a function that returns the debug info
+        self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
+                                                    in self.debug_info))
+
+    def visit_Block(self, node, frame):
+        """Call a block and register it for the template."""
+        level = 1
+        if frame.toplevel:
+            # if we know that we are a child template, there is no need to
+            # check if we are one
+            if self.has_known_extends:
+                return
+            if self.extends_so_far > 0:
+                self.writeline('if parent_template is None:')
+                self.indent()
+                level += 1
+        self.writeline('for event in context.blocks[%r][0](context):' %
+                       node.name, node)
+        self.indent()
+        self.simple_write('event', frame)
+        self.outdent(level)
+
+    def visit_Extends(self, node, frame):
+        """Calls the extender."""
+        if not frame.toplevel:
+            self.fail('cannot use extend from a non top-level scope',
+                      node.lineno)
+
+        # if the number of extends statements in general is zero so
+        # far, we don't have to add a check if something extended
+        # the template before this one.
+        if self.extends_so_far > 0:
+
+            # if we have a known extends we just add a template runtime
+            # error into the generated code.  We could catch that at compile
+            # time too, but i welcome it not to confuse users by throwing the
+            # same error at different times just "because we can".
+            if not self.has_known_extends:
+                self.writeline('if parent_template is not None:')
+                self.indent()
+            self.writeline('raise TemplateRuntimeError(%r)' %
+                           'extended multiple times')
+            self.outdent()
+
+            # if we have a known extends already we don't need that code here
+            # as we know that the template execution will end here.
+            if self.has_known_extends:
+                raise CompilerExit()
+
+        self.writeline('parent_template = environment.get_template(', node)
+        self.visit(node.template, frame)
+        self.write(', %r)' % self.name)
+        self.writeline('for name, parent_block in parent_template.'
+                       'blocks.iteritems():')
+        self.indent()
+        self.writeline('context.blocks.setdefault(name, []).'
+                       'append(parent_block)')
+        self.outdent()
+
+        # if this extends statement was in the root level we can take
+        # advantage of that information and simplify the generated code
+        # in the top level from this point onwards
+        if frame.rootlevel:
+            self.has_known_extends = True
+
+        # and now we have one more
+        self.extends_so_far += 1
+
+    def visit_Include(self, node, frame):
+        """Handles includes."""
+        if node.with_context:
+            self.writeline('template = environment.get_template(', node)
+            self.visit(node.template, frame)
+            self.write(', %r)' % self.name)
+            self.writeline('for event in template.root_render_func('
+                           'template.new_context(context.parent, True, '
+                           'locals())):')
+        else:
+            self.writeline('for event in environment.get_template(', node)
+            self.visit(node.template, frame)
+            self.write(', %r).module._body_stream:' %
+                       self.name)
+        self.indent()
+        self.simple_write('event', frame)
+        self.outdent()
+
+    def visit_Import(self, node, frame):
+        """Visit regular imports."""
+        self.writeline('l_%s = ' % node.target, node)
+        if frame.toplevel:
+            self.write('context.vars[%r] = ' % node.target)
+        self.write('environment.get_template(')
+        self.visit(node.template, frame)
+        self.write(', %r).' % self.name)
+        if node.with_context:
+            self.write('make_module(context.parent, True, locals())')
+        else:
+            self.write('module')
+        if frame.toplevel and not node.target.startswith('_'):
+            self.writeline('context.exported_vars.discard(%r)' % node.target)
+
+    def visit_FromImport(self, node, frame):
+        """Visit named imports."""
+        self.newline(node)
+        self.write('included_template = environment.get_template(')
+        self.visit(node.template, frame)
+        self.write(', %r).' % self.name)
+        if node.with_context:
+            self.write('make_module(context.parent, True)')
+        else:
+            self.write('module')
+
+        var_names = []
+        discarded_names = []
+        for name in node.names:
+            if isinstance(name, tuple):
+                name, alias = name
+            else:
+                alias = name
+            self.writeline('l_%s = getattr(included_template, '
+                           '%r, missing)' % (alias, name))
+            self.writeline('if l_%s is missing:' % alias)
+            self.indent()
+            self.writeline('l_%s = environment.undefined(%r %% '
+                           'included_template.__name__, '
+                           'name=%r)' %
+                           (alias, 'the template %%r (imported on %s) does '
+                           'not export the requested name %s' % (
+                                self.position(node),
+                                repr(name)
+                           ), name))
+            self.outdent()
+            if frame.toplevel:
+                var_names.append(alias)
+                if not alias.startswith('_'):
+                    discarded_names.append(alias)
+
+        if var_names:
+            if len(var_names) == 1:
+                name = var_names[0]
+                self.writeline('context.vars[%r] = l_%s' % (name, name))
+            else:
+                self.writeline('context.vars.update({%s})' % ', '.join(
+                    '%r: l_%s' % (name, name) for name in var_names
+                ))
+        if discarded_names:
+            if len(discarded_names) == 1:
+                self.writeline('context.exported_vars.discard(%r)' %
+                               discarded_names[0])
+            else:
+                self.writeline('context.exported_vars.difference_'
+                               'update((%s))' % ', '.join(map(repr, discarded_names)))
+
+    def visit_For(self, node, frame):
+        # when calculating the nodes for the inner frame we have to exclude
+        # the iterator contents from it
+        children = node.iter_child_nodes(exclude=('iter',))
+        if node.recursive:
+            loop_frame = self.function_scoping(node, frame, children,
+                                               find_special=False)
+        else:
+            loop_frame = frame.inner()
+            loop_frame.inspect(children)
+
+        # try to figure out if we have an extended loop.  An extended loop
+        # is necessary if the loop is in recursive mode if the special loop
+        # variable is accessed in the body.
+        extended_loop = node.recursive or 'loop' in \
+                        find_undeclared(node.iter_child_nodes(
+                            only=('body',)), ('loop',))
+
+        # if we don't have an recursive loop we have to find the shadowed
+        # variables at that point.  Because loops can be nested but the loop
+        # variable is a special one we have to enforce aliasing for it.
+        if not node.recursive:
+            aliases = self.push_scope(loop_frame, ('loop',))
+
+        # otherwise we set up a buffer and add a function def
+        else:
+            self.writeline('def loop(reciter, loop_render_func):', node)
+            self.indent()
+            self.buffer(loop_frame)
+            aliases = {}
+
+        # make sure the loop variable is a special one and raise a template
+        # assertion error if a loop tries to write to loop
+        if extended_loop:
+            loop_frame.identifiers.add_special('loop')
+        for name in node.find_all(nodes.Name):
+            if name.ctx == 'store' and name.name == 'loop':
+                self.fail('Can\'t assign to special loop variable '
+                          'in for-loop target', name.lineno)
+
+        self.pull_locals(loop_frame)
+        if node.else_:
+            iteration_indicator = self.temporary_identifier()
+            self.writeline('%s = 1' % iteration_indicator)
+
+        # Create a fake parent loop if the else or test section of a
+        # loop is accessing the special loop variable and no parent loop
+        # exists.
+        if 'loop' not in aliases and 'loop' in find_undeclared(
+           node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
+            self.writeline("l_loop = environment.undefined(%r, name='loop')" %
+                ("'loop' is undefined. the filter section of a loop as well "
+                 "as the else block doesn't have access to the special 'loop'"
+                 " variable of the current loop.  Because there is no parent "
+                 "loop it's undefined.  Happened in loop on %s" %
+                 self.position(node)))
+
+        self.writeline('for ', node)
+        self.visit(node.target, loop_frame)
+        self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
+
+        # if we have an extened loop and a node test, we filter in the
+        # "outer frame".
+        if extended_loop and node.test is not None:
+            self.write('(')
+            self.visit(node.target, loop_frame)
+            self.write(' for ')
+            self.visit(node.target, loop_frame)
+            self.write(' in ')
+            if node.recursive:
+                self.write('reciter')
+            else:
+                self.visit(node.iter, loop_frame)
+            self.write(' if (')
+            test_frame = loop_frame.copy()
+            self.visit(node.test, test_frame)
+            self.write('))')
+
+        elif node.recursive:
+            self.write('reciter')
+        else:
+            self.visit(node.iter, loop_frame)
+
+        if node.recursive:
+            self.write(', recurse=loop_render_func):')
+        else:
+            self.write(extended_loop and '):' or ':')
+
+        # tests in not extended loops become a continue
+        if not extended_loop and node.test is not None:
+            self.indent()
+            self.writeline('if not ')
+            self.visit(node.test, loop_frame)
+            self.write(':')
+            self.indent()
+            self.writeline('continue')
+            self.outdent(2)
+
+        self.indent()
+        self.blockvisit(node.body, loop_frame)
+        if node.else_:
+            self.writeline('%s = 0' % iteration_indicator)
+        self.outdent()
+
+        if node.else_:
+            self.writeline('if %s:' % iteration_indicator)
+            self.indent()
+            self.blockvisit(node.else_, loop_frame)
+            self.outdent()
+
+        # reset the aliases if there are any.
+        self.pop_scope(aliases, loop_frame)
+
+        # if the node was recursive we have to return the buffer contents
+        # and start the iteration code
+        if node.recursive:
+            self.return_buffer_contents(loop_frame)
+            self.outdent()
+            self.start_write(frame, node)
+            self.write('loop(')
+            self.visit(node.iter, frame)
+            self.write(', loop)')
+            self.end_write(frame)
+
+    def visit_If(self, node, frame):
+        if_frame = frame.soft()
+        self.writeline('if ', node)
+        self.visit(node.test, if_frame)
+        self.write(':')
+        self.indent()
+        self.blockvisit(node.body, if_frame)
+        self.outdent()
+        if node.else_:
+            self.writeline('else:')
+            self.indent()
+            self.blockvisit(node.else_, if_frame)
+            self.outdent()
+
+    def visit_Macro(self, node, frame):
+        macro_frame = self.macro_body(node, frame)
+        self.newline()
+        if frame.toplevel:
+            if not node.name.startswith('_'):
+                self.write('context.exported_vars.add(%r)' % node.name)
+            self.writeline('context.vars[%r] = ' % node.name)
+        self.write('l_%s = ' % node.name)
+        self.macro_def(node, macro_frame)
+
+    def visit_CallBlock(self, node, frame):
+        children = node.iter_child_nodes(exclude=('call',))
+        call_frame = self.macro_body(node, frame, children)
+        self.writeline('caller = ')
+        self.macro_def(node, call_frame)
+        self.start_write(frame, node)
+        self.visit_Call(node.call, call_frame, forward_caller=True)
+        self.end_write(frame)
+
+    def visit_FilterBlock(self, node, frame):
+        filter_frame = frame.inner()
+        filter_frame.inspect(node.iter_child_nodes())
+        aliases = self.push_scope(filter_frame)
+        self.pull_locals(filter_frame)
+        self.buffer(filter_frame)
+        self.blockvisit(node.body, filter_frame)
+        self.start_write(frame, node)
+        self.visit_Filter(node.filter, filter_frame)
+        self.end_write(frame)
+        self.pop_scope(aliases, filter_frame)
+
+    def visit_ExprStmt(self, node, frame):
+        self.newline(node)
+        self.visit(node.node, frame)
+
+    def visit_Output(self, node, frame):
+        # if we have a known extends statement, we don't output anything
+        # if we are in a require_output_check section
+        if self.has_known_extends and frame.require_output_check:
+            return
+
+        if self.environment.finalize:
+            finalize = lambda x: unicode(self.environment.finalize(x))
+        else:
+            finalize = unicode
+
+        self.newline(node)
+
+        # if we are inside a frame that requires output checking, we do so
+        outdent_later = False
+        if frame.require_output_check:
+            self.writeline('if parent_template is None:')
+            self.indent()
+            outdent_later = True
+
+        # try to evaluate as many chunks as possible into a static
+        # string at compile time.
+        body = []
+        for child in node.nodes:
+            try:
+                const = child.as_const()
+            except nodes.Impossible:
+                body.append(child)
+                continue
+            try:
+                if self.environment.autoescape:
+                    if hasattr(const, '__html__'):
+                        const = const.__html__()
+                    else:
+                        const = escape(const)
+                const = finalize(const)
+            except:
+                # if something goes wrong here we evaluate the node
+                # at runtime for easier debugging
+                body.append(child)
+                continue
+            if body and isinstance(body[-1], list):
+                body[-1].append(const)
+            else:
+                body.append([const])
+
+        # if we have less than 3 nodes or a buffer we yield or extend/append
+        if len(body) < 3 or frame.buffer is not None:
+            if frame.buffer is not None:
+                # for one item we append, for more we extend
+                if len(body) == 1:
+                    self.writeline('%s.append(' % frame.buffer)
+                else:
+                    self.writeline('%s.extend((' % frame.buffer)
+                self.indent()
+            for item in body:
+                if isinstance(item, list):
+                    val = repr(concat(item))
+                    if frame.buffer is None:
+                        self.writeline('yield ' + val)
+                    else:
+                        self.writeline(val + ', ')
+                else:
+                    if frame.buffer is None:
+                        self.writeline('yield ', item)
+                    else:
+                        self.newline(item)
+                    close = 1
+                    if self.environment.autoescape:
+                        self.write('escape(')
+                    else:
+                        self.write('unicode(')
+                    if self.environment.finalize is not None:
+                        self.write('environment.finalize(')
+                        close += 1
+                    self.visit(item, frame)
+                    self.write(')' * close)
+                    if frame.buffer is not None:
+                        self.write(', ')
+            if frame.buffer is not None:
+                # close the open parentheses
+                self.outdent()
+                self.writeline(len(body) == 1 and ')' or '))')
+
+        # otherwise we create a format string as this is faster in that case
+        else:
+            format = []
+            arguments = []
+            for item in body:
+                if isinstance(item, list):
+                    format.append(concat(item).replace('%', '%%'))
+                else:
+                    format.append('%s')
+                    arguments.append(item)
+            self.writeline('yield ')
+            self.write(repr(concat(format)) + ' % (')
+            idx = -1
+            self.indent()
+            for argument in arguments:
+                self.newline(argument)
+                close = 0
+                if self.environment.autoescape:
+                    self.write('escape(')
+                    close += 1
+                if self.environment.finalize is not None:
+                    self.write('environment.finalize(')
+                    close += 1
+                self.visit(argument, frame)
+                self.write(')' * close + ', ')
+            self.outdent()
+            self.writeline(')')
+
+        if outdent_later:
+            self.outdent()
+
+    def visit_Assign(self, node, frame):
+        self.newline(node)
+        # toplevel assignments however go into the local namespace and
+        # the current template's context.  We create a copy of the frame
+        # here and add a set so that the Name visitor can add the assigned
+        # names here.
+        if frame.toplevel:
+            assignment_frame = frame.copy()
+            assignment_frame.assigned_names = set()
+        else:
+            assignment_frame = frame
+        self.visit(node.target, assignment_frame)
+        self.write(' = ')
+        self.visit(node.node, frame)
+
+        # make sure toplevel assignments are added to the context.
+        if frame.toplevel:
+            public_names = [x for x in assignment_frame.assigned_names
+                            if not x.startswith('_')]
+            if len(assignment_frame.assigned_names) == 1:
+                name = iter(assignment_frame.assigned_names).next()
+                self.writeline('context.vars[%r] = l_%s' % (name, name))
+            else:
+                self.writeline('context.vars.update({')
+                for idx, name in enumerate(assignment_frame.assigned_names):
+                    if idx:
+                        self.write(', ')
+                    self.write('%r: l_%s' % (name, name))
+                self.write('})')
+            if public_names:
+                if len(public_names) == 1:
+                    self.writeline('context.exported_vars.add(%r)' %
+                                   public_names[0])
+                else:
+                    self.writeline('context.exported_vars.update((%s))' %
+                                   ', '.join(map(repr, public_names)))
+
+    # -- Expression Visitors
+
+    def visit_Name(self, node, frame):
+        if node.ctx == 'store' and frame.toplevel:
+            frame.assigned_names.add(node.name)
+        self.write('l_' + node.name)
+
+    def visit_Const(self, node, frame):
+        val = node.value
+        if isinstance(val, float):
+            self.write(str(val))
+        else:
+            self.write(repr(val))
+
+    def visit_TemplateData(self, node, frame):
+        self.write(repr(node.as_const()))
+
+    def visit_Tuple(self, node, frame):
+        self.write('(')
+        idx = -1
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item, frame)
+        self.write(idx == 0 and ',)' or ')')
+
+    def visit_List(self, node, frame):
+        self.write('[')
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item, frame)
+        self.write(']')
+
+    def visit_Dict(self, node, frame):
+        self.write('{')
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item.key, frame)
+            self.write(': ')
+            self.visit(item.value, frame)
+        self.write('}')
+
+    def binop(operator):
+        def visitor(self, node, frame):
+            self.write('(')
+            self.visit(node.left, frame)
+            self.write(' %s ' % operator)
+            self.visit(node.right, frame)
+            self.write(')')
+        return visitor
+
+    def uaop(operator):
+        def visitor(self, node, frame):
+            self.write('(' + operator)
+            self.visit(node.node, frame)
+            self.write(')')
+        return visitor
+
+    visit_Add = binop('+')
+    visit_Sub = binop('-')
+    visit_Mul = binop('*')
+    visit_Div = binop('/')
+    visit_FloorDiv = binop('//')
+    visit_Pow = binop('**')
+    visit_Mod = binop('%')
+    visit_And = binop('and')
+    visit_Or = binop('or')
+    visit_Pos = uaop('+')
+    visit_Neg = uaop('-')
+    visit_Not = uaop('not ')
+    del binop, uaop
+
+    def visit_Concat(self, node, frame):
+        self.write('%s((' % (self.environment.autoescape and
+                             'markup_join' or 'unicode_join'))
+        for arg in node.nodes:
+            self.visit(arg, frame)
+            self.write(', ')
+        self.write('))')
+
+    def visit_Compare(self, node, frame):
+        self.visit(node.expr, frame)
+        for op in node.ops:
+            self.visit(op, frame)
+
+    def visit_Operand(self, node, frame):
+        self.write(' %s ' % operators[node.op])
+        self.visit(node.expr, frame)
+
+    def visit_Getattr(self, node, frame):
+        self.write('environment.getattr(')
+        self.visit(node.node, frame)
+        self.write(', %r)' % node.attr)
+
+    def visit_Getitem(self, node, frame):
+        # slices bypass the environment getitem method.
+        if isinstance(node.arg, nodes.Slice):
+            self.visit(node.node, frame)
+            self.write('[')
+            self.visit(node.arg, frame)
+            self.write(']')
+        else:
+            self.write('environment.getitem(')
+            self.visit(node.node, frame)
+            self.write(', ')
+            self.visit(node.arg, frame)
+            self.write(')')
+
+    def visit_Slice(self, node, frame):
+        if node.start is not None:
+            self.visit(node.start, frame)
+        self.write(':')
+        if node.stop is not None:
+            self.visit(node.stop, frame)
+        if node.step is not None:
+            self.write(':')
+            self.visit(node.step, frame)
+
+    def visit_Filter(self, node, frame):
+        self.write(self.filters[node.name] + '(')
+        func = self.environment.filters.get(node.name)
+        if func is None:
+            self.fail('no filter named %r' % node.name, node.lineno)
+        if getattr(func, 'contextfilter', False):
+            self.write('context, ')
+        elif getattr(func, 'environmentfilter', False):
+            self.write('environment, ')
+
+        # if the filter node is None we are inside a filter block
+        # and want to write to the current buffer
+        if node.node is not None:
+            self.visit(node.node, frame)
+        elif self.environment.autoescape:
+            self.write('Markup(concat(%s))' % frame.buffer)
+        else:
+            self.write('concat(%s)' % frame.buffer)
+        self.signature(node, frame)
+        self.write(')')
+
+    def visit_Test(self, node, frame):
+        self.write(self.tests[node.name] + '(')
+        if node.name not in self.environment.tests:
+            self.fail('no test named %r' % node.name, node.lineno)
+        self.visit(node.node, frame)
+        self.signature(node, frame)
+        self.write(')')
+
+    def visit_CondExpr(self, node, frame):
+        def write_expr2():
+            if node.expr2 is not None:
+                return self.visit(node.expr2, frame)
+            self.write('environment.undefined(%r)' % ('the inline if-'
+                       'expression on %s evaluated to false and '
+                       'no else section was defined.' % self.position(node)))
+
+        if not have_condexpr:
+            self.write('((')
+            self.visit(node.test, frame)
+            self.write(') and (')
+            self.visit(node.expr1, frame)
+            self.write(',) or (')
+            write_expr2()
+            self.write(',))[0]')
+        else:
+            self.write('(')
+            self.visit(node.expr1, frame)
+            self.write(' if ')
+            self.visit(node.test, frame)
+            self.write(' else ')
+            write_expr2()
+            self.write(')')
+
+    def visit_Call(self, node, frame, forward_caller=False):
+        if self.environment.sandboxed:
+            self.write('environment.call(context, ')
+        else:
+            self.write('context.call(')
+        self.visit(node.node, frame)
+        extra_kwargs = forward_caller and {'caller': 'caller'} or None
+        self.signature(node, frame, extra_kwargs)
+        self.write(')')
+
+    def visit_Keyword(self, node, frame):
+        self.write(node.key + '=')
+        self.visit(node.value, frame)
+
+    # -- Unused nodes for extensions
+
+    def visit_MarkSafe(self, node, frame):
+        self.write('Markup(')
+        self.visit(node.expr, frame)
+        self.write(')')
+
+    def visit_EnvironmentAttribute(self, node, frame):
+        self.write('environment.' + node.name)
+
+    def visit_ExtensionAttribute(self, node, frame):
+        self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+
+    def visit_ImportedName(self, node, frame):
+        self.write(self.import_aliases[node.importname])
+
+    def visit_InternalName(self, node, frame):
+        self.write(node.name)
+
+    def visit_ContextReference(self, node, frame):
+        self.write('context')
+
+    def visit_Continue(self, node, frame):
+        self.writeline('continue', node)
+
+    def visit_Break(self, node, frame):
+        self.writeline('break', node)
diff --git a/scripts/jinja2/constants.py b/scripts/jinja2/constants.py
new file mode 100644 (file)
index 0000000..c471e79
--- /dev/null
@@ -0,0 +1,290 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja.constants
+    ~~~~~~~~~~~~~~~
+
+    Various constants.
+
+    :copyright: 2007 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+
+
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = u'''\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate'''
+
+
+#: a dict of all html entities + apos
+HTML_ENTITIES = {
+    'AElig': 198,
+    'Aacute': 193,
+    'Acirc': 194,
+    'Agrave': 192,
+    'Alpha': 913,
+    'Aring': 197,
+    'Atilde': 195,
+    'Auml': 196,
+    'Beta': 914,
+    'Ccedil': 199,
+    'Chi': 935,
+    'Dagger': 8225,
+    'Delta': 916,
+    'ETH': 208,
+    'Eacute': 201,
+    'Ecirc': 202,
+    'Egrave': 200,
+    'Epsilon': 917,
+    'Eta': 919,
+    'Euml': 203,
+    'Gamma': 915,
+    'Iacute': 205,
+    'Icirc': 206,
+    'Igrave': 204,
+    'Iota': 921,
+    'Iuml': 207,
+    'Kappa': 922,
+    'Lambda': 923,
+    'Mu': 924,
+    'Ntilde': 209,
+    'Nu': 925,
+    'OElig': 338,
+    'Oacute': 211,
+    'Ocirc': 212,
+    'Ograve': 210,
+    'Omega': 937,
+    'Omicron': 927,
+    'Oslash': 216,
+    'Otilde': 213,
+    'Ouml': 214,
+    'Phi': 934,
+    'Pi': 928,
+    'Prime': 8243,
+    'Psi': 936,
+    'Rho': 929,
+    'Scaron': 352,
+    'Sigma': 931,
+    'THORN': 222,
+    'Tau': 932,
+    'Theta': 920,
+    'Uacute': 218,
+    'Ucirc': 219,
+    'Ugrave': 217,
+    'Upsilon': 933,
+    'Uuml': 220,
+    'Xi': 926,
+    'Yacute': 221,
+    'Yuml': 376,
+    'Zeta': 918,
+    'aacute': 225,
+    'acirc': 226,
+    'acute': 180,
+    'aelig': 230,
+    'agrave': 224,
+    'alefsym': 8501,
+    'alpha': 945,
+    'amp': 38,
+    'and': 8743,
+    'ang': 8736,
+    'apos': 39,
+    'aring': 229,
+    'asymp': 8776,
+    'atilde': 227,
+    'auml': 228,
+    'bdquo': 8222,
+    'beta': 946,
+    'brvbar': 166,
+    'bull': 8226,
+    'cap': 8745,
+    'ccedil': 231,
+    'cedil': 184,
+    'cent': 162,
+    'chi': 967,
+    'circ': 710,
+    'clubs': 9827,
+    'cong': 8773,
+    'copy': 169,
+    'crarr': 8629,
+    'cup': 8746,
+    'curren': 164,
+    'dArr': 8659,
+    'dagger': 8224,
+    'darr': 8595,
+    'deg': 176,
+    'delta': 948,
+    'diams': 9830,
+    'divide': 247,
+    'eacute': 233,
+    'ecirc': 234,
+    'egrave': 232,
+    'empty': 8709,
+    'emsp': 8195,
+    'ensp': 8194,
+    'epsilon': 949,
+    'equiv': 8801,
+    'eta': 951,
+    'eth': 240,
+    'euml': 235,
+    'euro': 8364,
+    'exist': 8707,
+    'fnof': 402,
+    'forall': 8704,
+    'frac12': 189,
+    'frac14': 188,
+    'frac34': 190,
+    'frasl': 8260,
+    'gamma': 947,
+    'ge': 8805,
+    'gt': 62,
+    'hArr': 8660,
+    'harr': 8596,
+    'hearts': 9829,
+    'hellip': 8230,
+    'iacute': 237,
+    'icirc': 238,
+    'iexcl': 161,
+    'igrave': 236,
+    'image': 8465,
+    'infin': 8734,
+    'int': 8747,
+    'iota': 953,
+    'iquest': 191,
+    'isin': 8712,
+    'iuml': 239,
+    'kappa': 954,
+    'lArr': 8656,
+    'lambda': 955,
+    'lang': 9001,
+    'laquo': 171,
+    'larr': 8592,
+    'lceil': 8968,
+    'ldquo': 8220,
+    'le': 8804,
+    'lfloor': 8970,
+    'lowast': 8727,
+    'loz': 9674,
+    'lrm': 8206,
+    'lsaquo': 8249,
+    'lsquo': 8216,
+    'lt': 60,
+    'macr': 175,
+    'mdash': 8212,
+    'micro': 181,
+    'middot': 183,
+    'minus': 8722,
+    'mu': 956,
+    'nabla': 8711,
+    'nbsp': 160,
+    'ndash': 8211,
+    'ne': 8800,
+    'ni': 8715,
+    'not': 172,
+    'notin': 8713,
+    'nsub': 8836,
+    'ntilde': 241,
+    'nu': 957,
+    'oacute': 243,
+    'ocirc': 244,
+    'oelig': 339,
+    'ograve': 242,
+    'oline': 8254,
+    'omega': 969,
+    'omicron': 959,
+    'oplus': 8853,
+    'or': 8744,
+    'ordf': 170,
+    'ordm': 186,
+    'oslash': 248,
+    'otilde': 245,
+    'otimes': 8855,
+    'ouml': 246,
+    'para': 182,
+    'part': 8706,
+    'permil': 8240,
+    'perp': 8869,
+    'phi': 966,
+    'pi': 960,
+    'piv': 982,
+    'plusmn': 177,
+    'pound': 163,
+    'prime': 8242,
+    'prod': 8719,
+    'prop': 8733,
+    'psi': 968,
+    'quot': 34,
+    'rArr': 8658,
+    'radic': 8730,
+    'rang': 9002,
+    'raquo': 187,
+    'rarr': 8594,
+    'rceil': 8969,
+    'rdquo': 8221,
+    'real': 8476,
+    'reg': 174,
+    'rfloor': 8971,
+    'rho': 961,
+    'rlm': 8207,
+    'rsaquo': 8250,
+    'rsquo': 8217,
+    'sbquo': 8218,
+    'scaron': 353,
+    'sdot': 8901,
+    'sect': 167,
+    'shy': 173,
+    'sigma': 963,
+    'sigmaf': 962,
+    'sim': 8764,
+    'spades': 9824,
+    'sub': 8834,
+    'sube': 8838,
+    'sum': 8721,
+    'sup': 8835,
+    'sup1': 185,
+    'sup2': 178,
+    'sup3': 179,
+    'supe': 8839,
+    'szlig': 223,
+    'tau': 964,
+    'there4': 8756,
+    'theta': 952,
+    'thetasym': 977,
+    'thinsp': 8201,
+    'thorn': 254,
+    'tilde': 732,
+    'times': 215,
+    'trade': 8482,
+    'uArr': 8657,
+    'uacute': 250,
+    'uarr': 8593,
+    'ucirc': 251,
+    'ugrave': 249,
+    'uml': 168,
+    'upsih': 978,
+    'upsilon': 965,
+    'uuml': 252,
+    'weierp': 8472,
+    'xi': 958,
+    'yacute': 253,
+    'yen': 165,
+    'yuml': 255,
+    'zeta': 950,
+    'zwj': 8205,
+    'zwnj': 8204
+}
diff --git a/scripts/jinja2/debug.py b/scripts/jinja2/debug.py
new file mode 100644 (file)
index 0000000..53dac4d
--- /dev/null
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.debug
+    ~~~~~~~~~~~~
+
+    Implements the debug interface for Jinja.  This module does some pretty
+    ugly stuff with the Python traceback system in order to achieve tracebacks
+    with correct line numbers, locals and contents.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+import sys
+from jinja2.utils import CodeType
+
+
+def translate_exception(exc_info):
+    """If passed an exc_info it will automatically rewrite the exceptions
+    all the way down to the correct line numbers and frames.
+    """
+    result_tb = prev_tb = None
+    initial_tb = tb = exc_info[2].tb_next
+
+    while tb is not None:
+        template = tb.tb_frame.f_globals.get('__jinja_template__')
+        if template is not None:
+            lineno = template.get_corresponding_lineno(tb.tb_lineno)
+            tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
+                               lineno, prev_tb)[2]
+        if result_tb is None:
+            result_tb = tb
+        prev_tb = tb
+        tb = tb.tb_next
+
+    return exc_info[:2] + (result_tb or initial_tb,)
+
+
+def fake_exc_info(exc_info, filename, lineno, tb_back=None):
+    """Helper for `translate_exception`."""
+    exc_type, exc_value, tb = exc_info
+
+    # figure the real context out
+    real_locals = tb.tb_frame.f_locals.copy()
+    ctx = real_locals.get('context')
+    if ctx:
+        locals = ctx.get_all()
+    else:
+        locals = {}
+    for name, value in real_locals.iteritems():
+        if name.startswith('l_'):
+            locals[name[2:]] = value
+
+    # if there is a local called __jinja_exception__, we get
+    # rid of it to not break the debug functionality.
+    locals.pop('__jinja_exception__', None)
+
+    # assamble fake globals we need
+    globals = {
+        '__name__':             filename,
+        '__file__':             filename,
+        '__jinja_exception__':  exc_info[:2]
+    }
+
+    # and fake the exception
+    code = compile('\n' * (lineno - 1) + 'raise __jinja_exception__[0], ' +
+                   '__jinja_exception__[1]', filename, 'exec')
+
+    # if it's possible, change the name of the code.  This won't work
+    # on some python environments such as google appengine
+    try:
+        function = tb.tb_frame.f_code.co_name
+        if function == 'root':
+            location = 'top-level template code'
+        elif function.startswith('block_'):
+            location = 'block "%s"' % function[6:]
+        else:
+            location = 'template'
+        code = CodeType(0, code.co_nlocals, code.co_stacksize,
+                        code.co_flags, code.co_code, code.co_consts,
+                        code.co_names, code.co_varnames, filename,
+                        location, code.co_firstlineno,
+                        code.co_lnotab, (), ())
+    except:
+        pass
+
+    # execute the code and catch the new traceback
+    try:
+        exec code in globals, locals
+    except:
+        exc_info = sys.exc_info()
+        new_tb = exc_info[2].tb_next
+
+    # now we can patch the exc info accordingly
+    if tb_set_next is not None:
+        if tb_back is not None:
+            tb_set_next(tb_back, new_tb)
+        if tb is not None:
+            tb_set_next(new_tb, tb.tb_next)
+
+    # return without this frame
+    return exc_info[:2] + (new_tb,)
+
+
+def _init_ugly_crap():
+    """This function implements a few ugly things so that we can patch the
+    traceback objects.  The function returned allows resetting `tb_next` on
+    any python traceback object.
+    """
+    import ctypes
+    from types import TracebackType
+
+    # figure out side of _Py_ssize_t
+    if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
+        _Py_ssize_t = ctypes.c_int64
+    else:
+        _Py_ssize_t = ctypes.c_int
+
+    # regular python
+    class _PyObject(ctypes.Structure):
+        pass
+    _PyObject._fields_ = [
+        ('ob_refcnt', _Py_ssize_t),
+        ('ob_type', ctypes.POINTER(_PyObject))
+    ]
+
+    # python with trace
+    if object.__basicsize__ != ctypes.sizeof(_PyObject):
+        class _PyObject(ctypes.Structure):
+            pass
+        _PyObject._fields_ = [
+            ('_ob_next', ctypes.POINTER(_PyObject)),
+            ('_ob_prev', ctypes.POINTER(_PyObject)),
+            ('ob_refcnt', _Py_ssize_t),
+            ('ob_type', ctypes.POINTER(_PyObject))
+        ]
+
+    class _Traceback(_PyObject):
+        pass
+    _Traceback._fields_ = [
+        ('tb_next', ctypes.POINTER(_Traceback)),
+        ('tb_frame', ctypes.POINTER(_PyObject)),
+        ('tb_lasti', ctypes.c_int),
+        ('tb_lineno', ctypes.c_int)
+    ]
+
+    def tb_set_next(tb, next):
+        """Set the tb_next attribute of a traceback object."""
+        if not (isinstance(tb, TracebackType) and
+                (next is None or isinstance(next, TracebackType))):
+            raise TypeError('tb_set_next arguments must be traceback objects')
+        obj = _Traceback.from_address(id(tb))
+        if tb.tb_next is not None:
+            old = _Traceback.from_address(id(tb.tb_next))
+            old.ob_refcnt -= 1
+        if next is None:
+            obj.tb_next = ctypes.POINTER(_Traceback)()
+        else:
+            next = _Traceback.from_address(id(next))
+            next.ob_refcnt += 1
+            obj.tb_next = ctypes.pointer(next)
+
+    return tb_set_next
+
+
+# try to get a tb_set_next implementation
+try:
+    from jinja2._speedups import tb_set_next
+except ImportError:
+    try:
+        tb_set_next = _init_ugly_crap()
+    except:
+        tb_set_next = None
+del _init_ugly_crap
diff --git a/scripts/jinja2/defaults.py b/scripts/jinja2/defaults.py
new file mode 100644 (file)
index 0000000..3e24e7d
--- /dev/null
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.defaults
+    ~~~~~~~~~~~~~~~
+
+    Jinja default filters and tags.
+
+    :copyright: 2007-2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
+
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = '{%'
+BLOCK_END_STRING = '%}'
+VARIABLE_START_STRING = '{{'
+VARIABLE_END_STRING = '}}'
+COMMENT_START_STRING = '{#'
+COMMENT_END_STRING = '#}'
+LINE_STATEMENT_PREFIX = None
+TRIM_BLOCKS = False
+NEWLINE_SEQUENCE = '\n'
+
+
+# default filters, tests and namespace
+from jinja2.filters import FILTERS as DEFAULT_FILTERS
+from jinja2.tests import TESTS as DEFAULT_TESTS
+DEFAULT_NAMESPACE = {
+    'range':        xrange,
+    'dict':         lambda **kw: kw,
+    'lipsum':       generate_lorem_ipsum,
+    'cycler':       Cycler,
+    'joiner':       Joiner
+}
+
+
+# export all constants
+__all__ = tuple(x for x in locals() if x.isupper())
diff --git a/scripts/jinja2/environment.py b/scripts/jinja2/environment.py
new file mode 100644 (file)
index 0000000..4a9c9d1
--- /dev/null
@@ -0,0 +1,848 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.environment
+    ~~~~~~~~~~~~~~~~~~
+
+    Provides a class that holds runtime and parsing time options.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+from jinja2 import nodes
+from jinja2.defaults import *
+from jinja2.lexer import get_lexer, TokenStream
+from jinja2.parser import Parser
+from jinja2.optimizer import optimize
+from jinja2.compiler import generate
+from jinja2.runtime import Undefined, Context
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import import_string, LRUCache, Markup, missing, \
+     concat, consume
+
+
+# for direct template usage we have up to ten living environments
+_spontaneous_environments = LRUCache(10)
+
+
+def get_spontaneous_environment(*args):
+    """Return a new spontaneous environment.  A spontaneous environment is an
+    unnamed and unaccessible (in theory) environment that is used for
+    templates generated from a string and not from the file system.
+    """
+    try:
+        env = _spontaneous_environments.get(args)
+    except TypeError:
+        return Environment(*args)
+    if env is not None:
+        return env
+    _spontaneous_environments[args] = env = Environment(*args)
+    env.shared = True
+    return env
+
+
+def create_cache(size):
+    """Return the cache class for the given size."""
+    if size == 0:
+        return None
+    if size < 0:
+        return {}
+    return LRUCache(size)
+
+
+def copy_cache(cache):
+    """Create an empty copy of the given cache."""
+    if cache is None:
+        return Noe
+    elif type(cache) is dict:
+        return {}
+    return LRUCache(cache.capacity)
+
+
+def load_extensions(environment, extensions):
+    """Load the extensions from the list and bind it to the environment.
+    Returns a dict of instanciated environments.
+    """
+    result = {}
+    for extension in extensions:
+        if isinstance(extension, basestring):
+            extension = import_string(extension)
+        result[extension.identifier] = extension(environment)
+    return result
+
+
+def _environment_sanity_check(environment):
+    """Perform a sanity check on the environment."""
+    assert issubclass(environment.undefined, Undefined), 'undefined must ' \
+           'be a subclass of undefined because filters depend on it.'
+    assert environment.block_start_string != \
+           environment.variable_start_string != \
+           environment.comment_start_string, 'block, variable and comment ' \
+           'start strings must be different'
+    assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+           'newline_sequence set to unknown line ending string.'
+    return environment
+
+
+class Environment(object):
+    r"""The core component of Jinja is the `Environment`.  It contains
+    important shared variables like configuration, filters, tests,
+    globals and others.  Instances of this class may be modified if
+    they are not shared and if no template was loaded so far.
+    Modifications on environments after the first template was loaded
+    will lead to surprising effects and undefined behavior.
+
+    Here the possible initialization parameters:
+
+        `block_start_string`
+            The string marking the begin of a block.  Defaults to ``'{%'``.
+
+        `block_end_string`
+            The string marking the end of a block.  Defaults to ``'%}'``.
+
+        `variable_start_string`
+            The string marking the begin of a print statement.
+            Defaults to ``'{{'``.
+
+        `variable_end_string`
+            The string marking the end of a print statement.  Defaults to
+            ``'}}'``.
+
+        `comment_start_string`
+            The string marking the begin of a comment.  Defaults to ``'{#'``.
+
+        `comment_end_string`
+            The string marking the end of a comment.  Defaults to ``'#}'``.
+
+        `line_statement_prefix`
+            If given and a string, this will be used as prefix for line based
+            statements.  See also :ref:`line-statements`.
+
+        `trim_blocks`
+            If this is set to ``True`` the first newline after a block is
+            removed (block, not variable tag!).  Defaults to `False`.
+
+        `newline_sequence`
+            The sequence that starts a newline.  Must be one of ``'\r'``,
+            ``'\n'`` or ``'\r\n'``.  The default is ``'\n'`` which is a
+            useful default for Linux and OS X systems as well as web
+            applications.
+
+        `extensions`
+            List of Jinja extensions to use.  This can either be import paths
+            as strings or extension classes.  For more information have a
+            look at :ref:`the extensions documentation <jinja-extensions>`.
+
+        `optimized`
+            should the optimizer be enabled?  Default is `True`.
+
+        `undefined`
+            :class:`Undefined` or a subclass of it that is used to represent
+            undefined values in the template.
+
+        `finalize`
+            A callable that finalizes the variable.  Per default no finalizing
+            is applied.
+
+        `autoescape`
+            If set to true the XML/HTML autoescaping feature is enabled.
+            For more details about auto escaping see
+            :class:`~jinja2.utils.Markup`.
+
+        `loader`
+            The template loader for this environment.
+
+        `cache_size`
+            The size of the cache.  Per default this is ``50`` which means
+            that if more than 50 templates are loaded the loader will clean
+            out the least recently used template.  If the cache size is set to
+            ``0`` templates are recompiled all the time, if the cache size is
+            ``-1`` the cache will not be cleaned.
+
+        `auto_reload`
+            Some loaders load templates from locations where the template
+            sources may change (ie: file system or database).  If
+            `auto_reload` is set to `True` (default) every time a template is
+            requested the loader checks if the source changed and if yes, it
+            will reload the template.  For higher performance it's possible to
+            disable that.
+
+        `bytecode_cache`
+            If set to a bytecode cache object, this object will provide a
+            cache for the internal Jinja bytecode so that templates don't
+            have to be parsed if they were not changed.
+
+            See :ref:`bytecode-cache` for more information.
+    """
+
+    #: if this environment is sandboxed.  Modifying this variable won't make
+    #: the environment sandboxed though.  For a real sandboxed environment
+    #: have a look at jinja2.sandbox
+    sandboxed = False
+
+    #: True if the environment is just an overlay
+    overlay = False
+
+    #: the environment this environment is linked to if it is an overlay
+    linked_to = None
+
+    #: shared environments have this set to `True`.  A shared environment
+    #: must not be modified
+    shared = False
+
+    def __init__(self,
+                 block_start_string=BLOCK_START_STRING,
+                 block_end_string=BLOCK_END_STRING,
+                 variable_start_string=VARIABLE_START_STRING,
+                 variable_end_string=VARIABLE_END_STRING,
+                 comment_start_string=COMMENT_START_STRING,
+                 comment_end_string=COMMENT_END_STRING,
+                 line_statement_prefix=LINE_STATEMENT_PREFIX,
+                 trim_blocks=TRIM_BLOCKS,
+                 newline_sequence=NEWLINE_SEQUENCE,
+                 extensions=(),
+                 optimized=True,
+                 undefined=Undefined,
+                 finalize=None,
+                 autoescape=False,
+                 loader=None,
+                 cache_size=50,
+                 auto_reload=True,
+                 bytecode_cache=None):
+        # !!Important notice!!
+        #   The constructor accepts quite a few arguments that should be
+        #   passed by keyword rather than position.  However it's important to
+        #   not change the order of arguments because it's used at least
+        #   internally in those cases:
+        #       -   spontaneus environments (i18n extension and Template)
+        #       -   unittests
+        #   If parameter changes are required only add parameters at the end
+        #   and don't change the arguments (or the defaults!) of the arguments
+        #   existing already.
+
+        # lexer / parser information
+        self.block_start_string = block_start_string
+        self.block_end_string = block_end_string
+        self.variable_start_string = variable_start_string
+        self.variable_end_string = variable_end_string
+        self.comment_start_string = comment_start_string
+        self.comment_end_string = comment_end_string
+        self.line_statement_prefix = line_statement_prefix
+        self.trim_blocks = trim_blocks
+        self.newline_sequence = newline_sequence
+
+        # runtime information
+        self.undefined = undefined
+        self.optimized = optimized
+        self.finalize = finalize
+        self.autoescape = autoescape
+
+        # defaults
+        self.filters = DEFAULT_FILTERS.copy()
+        self.tests = DEFAULT_TESTS.copy()
+        self.globals = DEFAULT_NAMESPACE.copy()
+
+        # set the loader provided
+        self.loader = loader
+        self.bytecode_cache = None
+        self.cache = create_cache(cache_size)
+        self.bytecode_cache = bytecode_cache
+        self.auto_reload = auto_reload
+
+        # load extensions
+        self.extensions = load_extensions(self, extensions)
+
+        _environment_sanity_check(self)
+
+    def extend(self, **attributes):
+        """Add the items to the instance of the environment if they do not exist
+        yet.  This is used by :ref:`extensions <writing-extensions>` to register
+        callbacks and configuration values without breaking inheritance.
+        """
+        for key, value in attributes.iteritems():
+            if not hasattr(self, key):
+                setattr(self, key, value)
+
+    def overlay(self, block_start_string=missing, block_end_string=missing,
+                variable_start_string=missing, variable_end_string=missing,
+                comment_start_string=missing, comment_end_string=missing,
+                line_statement_prefix=missing, trim_blocks=missing,
+                extensions=missing, optimized=missing, undefined=missing,
+                finalize=missing, autoescape=missing, loader=missing,
+                cache_size=missing, auto_reload=missing,
+                bytecode_cache=missing):
+        """Create a new overlay environment that shares all the data with the
+        current environment except of cache and the overriden attributes.
+        Extensions cannot be removed for a overlayed environment.  A overlayed
+        environment automatically gets all the extensions of the environment it
+        is linked to plus optional extra extensions.
+
+        Creating overlays should happen after the initial environment was set
+        up completely.  Not all attributes are truly linked, some are just
+        copied over so modifications on the original environment may not shine
+        through.
+        """
+        args = dict(locals())
+        del args['self'], args['cache_size'], args['extensions']
+
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.overlay = True
+        rv.linked_to = self
+
+        for key, value in args.iteritems():
+            if value is not missing:
+                setattr(rv, key, value)
+
+        if cache_size is not missing:
+            rv.cache = create_cache(cache_size)
+        else:
+            rv.cache = copy_cache(self.cache)
+
+        rv.extensions = {}
+        for key, value in self.extensions.iteritems():
+            rv.extensions[key] = value.bind(rv)
+        if extensions is not missing:
+            rv.extensions.update(load_extensions(extensions))
+
+        return _environment_sanity_check(rv)
+
+    lexer = property(get_lexer, doc="The lexer for this environment.")
+
+    def getitem(self, obj, argument):
+        """Get an item or attribute of an object but prefer the item."""
+        try:
+            return obj[argument]
+        except (TypeError, LookupError):
+            if isinstance(argument, basestring):
+                try:
+                    attr = str(argument)
+                except:
+                    pass
+                else:
+                    try:
+                        return getattr(obj, attr)
+                    except AttributeError:
+                        pass
+            return self.undefined(obj=obj, name=argument)
+
+    def getattr(self, obj, attribute):
+        """Get an item or attribute of an object but prefer the attribute.
+        Unlike :meth:`getitem` the attribute *must* be a bytestring.
+        """
+        try:
+            return getattr(obj, attribute)
+        except AttributeError:
+            pass
+        try:
+            return obj[attribute]
+        except (TypeError, LookupError, AttributeError):
+            return self.undefined(obj=obj, name=attribute)
+
+    def parse(self, source, name=None, filename=None):
+        """Parse the sourcecode and return the abstract syntax tree.  This
+        tree of nodes is used by the compiler to convert the template into
+        executable source- or bytecode.  This is useful for debugging or to
+        extract information from templates.
+
+        If you are :ref:`developing Jinja2 extensions <writing-extensions>`
+        this gives you a good overview of the node tree generated.
+        """
+        if isinstance(filename, unicode):
+            filename = filename.encode('utf-8')
+        try:
+            return Parser(self, source, name, filename).parse()
+        except TemplateSyntaxError, e:
+            e.source = source
+            raise e
+
+    def lex(self, source, name=None, filename=None):
+        """Lex the given sourcecode and return a generator that yields
+        tokens as tuples in the form ``(lineno, token_type, value)``.
+        This can be useful for :ref:`extension development <writing-extensions>`
+        and debugging templates.
+
+        This does not perform preprocessing.  If you want the preprocessing
+        of the extensions to be applied you have to filter source through
+        the :meth:`preprocess` method.
+        """
+        source = unicode(source)
+        try:
+            return self.lexer.tokeniter(source, name, filename)
+        except TemplateSyntaxError, e:
+            e.source = source
+            raise e
+
+    def preprocess(self, source, name=None, filename=None):
+        """Preprocesses the source with all extensions.  This is automatically
+        called for all parsing and compiling methods but *not* for :meth:`lex`
+        because there you usually only want the actual source tokenized.
+        """
+        return reduce(lambda s, e: e.preprocess(s, name, filename),
+                      self.extensions.itervalues(), unicode(source))
+
+    def _tokenize(self, source, name, filename=None, state=None):
+        """Called by the parser to do the preprocessing and filtering
+        for all the extensions.  Returns a :class:`~jinja2.lexer.TokenStream`.
+        """
+        source = self.preprocess(source, name, filename)
+        stream = self.lexer.tokenize(source, name, filename, state)
+        for ext in self.extensions.itervalues():
+            stream = ext.filter_stream(stream)
+            if not isinstance(stream, TokenStream):
+                stream = TokenStream(stream, name, filename)
+        return stream
+
+    def compile(self, source, name=None, filename=None, raw=False):
+        """Compile a node or template source code.  The `name` parameter is
+        the load name of the template after it was joined using
+        :meth:`join_path` if necessary, not the filename on the file system.
+        the `filename` parameter is the estimated filename of the template on
+        the file system.  If the template came from a database or memory this
+        can be omitted.
+
+        The return value of this method is a python code object.  If the `raw`
+        parameter is `True` the return value will be a string with python
+        code equivalent to the bytecode returned otherwise.  This method is
+        mainly used internally.
+        """
+        if isinstance(source, basestring):
+            source = self.parse(source, name, filename)
+        if self.optimized:
+            source = optimize(source, self)
+        source = generate(source, self, name, filename)
+        if raw:
+            return source
+        if filename is None:
+            filename = '<template>'
+        elif isinstance(filename, unicode):
+            filename = filename.encode('utf-8')
+        return compile(source, filename, 'exec')
+
+    def compile_expression(self, source, undefined_to_none=True):
+        """A handy helper method that returns a callable that accepts keyword
+        arguments that appear as variables in the expression.  If called it
+        returns the result of the expression.
+
+        This is useful if applications want to use the same rules as Jinja
+        in template "configuration files" or similar situations.
+
+        Example usage:
+
+        >>> env = Environment()
+        >>> expr = env.compile_expression('foo == 42')
+        >>> expr(foo=23)
+        False
+        >>> expr(foo=42)
+        True
+
+        Per default the return value is converted to `None` if the
+        expression returns an undefined value.  This can be changed
+        by setting `undefined_to_none` to `False`.
+
+        >>> env.compile_expression('var')() is None
+        True
+        >>> env.compile_expression('var', undefined_to_none=False)()
+        Undefined
+
+        **new in Jinja 2.1**
+        """
+        parser = Parser(self, source, state='variable')
+        try:
+            expr = parser.parse_expression()
+            if not parser.stream.eos:
+                raise TemplateSyntaxError('chunk after expression',
+                                          parser.stream.current.lineno,
+                                          None, None)
+        except TemplateSyntaxError, e:
+            e.source = source
+            raise e
+        body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+        template = self.from_string(nodes.Template(body, lineno=1))
+        return TemplateExpression(template, undefined_to_none)
+
+    def join_path(self, template, parent):
+        """Join a template with the parent.  By default all the lookups are
+        relative to the loader root so this method returns the `template`
+        parameter unchanged, but if the paths should be relative to the
+        parent template, this function can be used to calculate the real
+        template name.
+
+        Subclasses may override this method and implement template path
+        joining here.
+        """
+        return template
+
+    def get_template(self, name, parent=None, globals=None):
+        """Load a template from the loader.  If a loader is configured this
+        method ask the loader for the template and returns a :class:`Template`.
+        If the `parent` parameter is not `None`, :meth:`join_path` is called
+        to get the real template name before loading.
+
+        The `globals` parameter can be used to provide template wide globals.
+        These variables are available in the context at render time.
+
+        If the template does not exist a :exc:`TemplateNotFound` exception is
+        raised.
+        """
+        if self.loader is None:
+            raise TypeError('no loader for this environment specified')
+        if parent is not None:
+            name = self.join_path(name, parent)
+
+        if self.cache is not None:
+            template = self.cache.get(name)
+            if template is not None and (not self.auto_reload or \
+                                         template.is_up_to_date):
+                return template
+
+        template = self.loader.load(self, name, self.make_globals(globals))
+        if self.cache is not None:
+            self.cache[name] = template
+        return template
+
+    def from_string(self, source, globals=None, template_class=None):
+        """Load a template from a string.  This parses the source given and
+        returns a :class:`Template` object.
+        """
+        globals = self.make_globals(globals)
+        cls = template_class or self.template_class
+        return cls.from_code(self, self.compile(source), globals, None)
+
+    def make_globals(self, d):
+        """Return a dict for the globals."""
+        if not d:
+            return self.globals
+        return dict(self.globals, **d)
+
+
+class Template(object):
+    """The central template object.  This class represents a compiled template
+    and is used to evaluate it.
+
+    Normally the template object is generated from an :class:`Environment` but
+    it also has a constructor that makes it possible to create a template
+    instance directly using the constructor.  It takes the same arguments as
+    the environment constructor but it's not possible to specify a loader.
+
+    Every template object has a few methods and members that are guaranteed
+    to exist.  However it's important that a template object should be
+    considered immutable.  Modifications on the object are not supported.
+
+    Template objects created from the constructor rather than an environment
+    do have an `environment` attribute that points to a temporary environment
+    that is probably shared with other templates created with the constructor
+    and compatible settings.
+
+    >>> template = Template('Hello {{ name }}!')
+    >>> template.render(name='John Doe')
+    u'Hello John Doe!'
+
+    >>> stream = template.stream(name='John Doe')
+    >>> stream.next()
+    u'Hello John Doe!'
+    >>> stream.next()
+    Traceback (most recent call last):
+        ...
+    StopIteration
+    """
+
+    def __new__(cls, source,
+                block_start_string=BLOCK_START_STRING,
+                block_end_string=BLOCK_END_STRING,
+                variable_start_string=VARIABLE_START_STRING,
+                variable_end_string=VARIABLE_END_STRING,
+                comment_start_string=COMMENT_START_STRING,
+                comment_end_string=COMMENT_END_STRING,
+                line_statement_prefix=LINE_STATEMENT_PREFIX,
+                trim_blocks=TRIM_BLOCKS,
+                newline_sequence=NEWLINE_SEQUENCE,
+                extensions=(),
+                optimized=True,
+                undefined=Undefined,
+                finalize=None,
+                autoescape=False):
+        env = get_spontaneous_environment(
+            block_start_string, block_end_string, variable_start_string,
+            variable_end_string, comment_start_string, comment_end_string,
+            line_statement_prefix, trim_blocks, newline_sequence,
+            frozenset(extensions), optimized, undefined, finalize,
+            autoescape, None, 0, False, None)
+        return env.from_string(source, template_class=cls)
+
+    @classmethod
+    def from_code(cls, environment, code, globals, uptodate=None):
+        """Creates a template object from compiled code and the globals.  This
+        is used by the loaders and environment to create a template object.
+        """
+        t = object.__new__(cls)
+        namespace = {
+            'environment':          environment,
+            '__jinja_template__':   t
+        }
+        exec code in namespace
+        t.environment = environment
+        t.globals = globals
+        t.name = namespace['name']
+        t.filename = code.co_filename
+        t.blocks = namespace['blocks']
+
+        # render function and module
+        t.root_render_func = namespace['root']
+        t._module = None
+
+        # debug and loader helpers
+        t._debug_info = namespace['debug_info']
+        t._uptodate = uptodate
+
+        return t
+
+    def render(self, *args, **kwargs):
+        """This method accepts the same arguments as the `dict` constructor:
+        A dict, a dict subclass or some keyword arguments.  If no arguments
+        are given the context will be empty.  These two calls do the same::
+
+            template.render(knights='that say nih')
+            template.render({'knights': 'that say nih'})
+
+        This will return the rendered template as unicode string.
+        """
+        vars = dict(*args, **kwargs)
+        try:
+            return concat(self.root_render_func(self.new_context(vars)))
+        except:
+            from jinja2.debug import translate_exception
+            exc_type, exc_value, tb = translate_exception(sys.exc_info())
+            raise exc_type, exc_value, tb
+
+    def stream(self, *args, **kwargs):
+        """Works exactly like :meth:`generate` but returns a
+        :class:`TemplateStream`.
+        """
+        return TemplateStream(self.generate(*args, **kwargs))
+
+    def generate(self, *args, **kwargs):
+        """For very large templates it can be useful to not render the whole
+        template at once but evaluate each statement after another and yield
+        piece for piece.  This method basically does exactly that and returns
+        a generator that yields one item after another as unicode strings.
+
+        It accepts the same arguments as :meth:`render`.
+        """
+        vars = dict(*args, **kwargs)
+        try:
+            for event in self.root_render_func(self.new_context(vars)):
+                yield event
+        except:
+            from jinja2.debug import translate_exception
+            exc_type, exc_value, tb = translate_exception(sys.exc_info())
+            raise exc_type, exc_value, tb
+
+    def new_context(self, vars=None, shared=False, locals=None):
+        """Create a new :class:`Context` for this template.  The vars
+        provided will be passed to the template.  Per default the globals
+        are added to the context.  If shared is set to `True` the data
+        is passed as it to the context without adding the globals.
+
+        `locals` can be a dict of local variables for internal usage.
+        """
+        if vars is None:
+            vars = {}
+        if shared:
+            parent = vars
+        else:
+            parent = dict(self.globals, **vars)
+        if locals:
+            # if the parent is shared a copy should be created because
+            # we don't want to modify the dict passed
+            if shared:
+                parent = dict(parent)
+            for key, value in locals.iteritems():
+                if key[:2] == 'l_' and value is not missing:
+                    parent[key[2:]] = value
+        return Context(self.environment, parent, self.name, self.blocks)
+
+    def make_module(self, vars=None, shared=False, locals=None):
+        """This method works like the :attr:`module` attribute when called
+        without arguments but it will evaluate the template every call
+        rather then caching the template.  It's also possible to provide
+        a dict which is then used as context.  The arguments are the same
+        as for the :meth:`new_context` method.
+        """
+        return TemplateModule(self, self.new_context(vars, shared, locals))
+
+    @property
+    def module(self):
+        """The template as module.  This is used for imports in the
+        template runtime but is also useful if one wants to access
+        exported template variables from the Python layer:
+
+        >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+        >>> unicode(t.module)
+        u'23'
+        >>> t.module.foo()
+        u'42'
+        """
+        if self._module is not None:
+            return self._module
+        self._module = rv = self.make_module()
+        return rv
+
+    def get_corresponding_lineno(self, lineno):
+        """Return the source line number of a line number in the
+        generated bytecode as they are not in sync.
+        """
+        for template_line, code_line in reversed(self.debug_info):
+            if code_line <= lineno:
+                return template_line
+        return 1
+
+    @property
+    def is_up_to_date(self):
+        """If this variable is `False` there is a newer version available."""
+        if self._uptodate is None:
+            return True
+        return self._uptodate()
+
+    @property
+    def debug_info(self):
+        """The debug info mapping."""
+        return [tuple(map(int, x.split('='))) for x in
+                self._debug_info.split('&')]
+
+    def __repr__(self):
+        if self.name is None:
+            name = 'memory:%x' % id(self)
+        else:
+            name = repr(self.name)
+        return '<%s %s>' % (self.__class__.__name__, name)
+
+
+class TemplateModule(object):
+    """Represents an imported template.  All the exported names of the
+    template are available as attributes on this object.  Additionally
+    converting it into an unicode- or bytestrings renders the contents.
+    """
+
+    def __init__(self, template, context):
+        self._body_stream = list(template.root_render_func(context))
+        self.__dict__.update(context.get_exported())
+        self.__name__ = template.name
+
+    __unicode__ = lambda x: concat(x._body_stream)
+    __html__ = lambda x: Markup(concat(x._body_stream))
+
+    def __str__(self):
+        return unicode(self).encode('utf-8')
+
+    def __repr__(self):
+        if self.__name__ is None:
+            name = 'memory:%x' % id(self)
+        else:
+            name = repr(self.__name__)
+        return '<%s %s>' % (self.__class__.__name__, name)
+
+
+class TemplateExpression(object):
+    """The :meth:`jinja2.Environment.compile_expression` method returns an
+    instance of this object.  It encapsulates the expression-like access
+    to the template with an expression it wraps.
+    """
+
+    def __init__(self, template, undefined_to_none):
+        self._template = template
+        self._undefined_to_none = undefined_to_none
+
+    def __call__(self, *args, **kwargs):
+        context = self._template.new_context(dict(*args, **kwargs))
+        consume(self._template.root_render_func(context))
+        rv = context.vars['result']
+        if self._undefined_to_none and isinstance(rv, Undefined):
+            rv = None
+        return rv
+
+
+class TemplateStream(object):
+    """A template stream works pretty much like an ordinary python generator
+    but it can buffer multiple items to reduce the number of total iterations.
+    Per default the output is unbuffered which means that for every unbuffered
+    instruction in the template one unicode string is yielded.
+
+    If buffering is enabled with a buffer size of 5, five items are combined
+    into a new unicode string.  This is mainly useful if you are streaming
+    big templates to a client via WSGI which flushes after each iteration.
+    """
+
+    def __init__(self, gen):
+        self._gen = gen
+        self.disable_buffering()
+
+    def dump(self, fp, encoding=None, errors='strict'):
+        """Dump the complete stream into a file or file-like object.
+        Per default unicode strings are written, if you want to encode
+        before writing specifiy an `encoding`.
+
+        Example usage::
+
+            Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+        """
+        close = False
+        if isinstance(fp, basestring):
+            fp = file(fp, 'w')
+            close = True
+        try:
+            if encoding is not None:
+                iterable = (x.encode(encoding, errors) for x in self)
+            else:
+                iterable = self
+            if hasattr(fp, 'writelines'):
+                fp.writelines(iterable)
+            else:
+                for item in iterable:
+                    fp.write(item)
+        finally:
+            if close:
+                fp.close()
+
+    def disable_buffering(self):
+        """Disable the output buffering."""
+        self._next = self._gen.next
+        self.buffered = False
+
+    def enable_buffering(self, size=5):
+        """Enable buffering.  Buffer `size` items before yielding them."""
+        if size <= 1:
+            raise ValueError('buffer size too small')
+
+        def generator(next):
+            buf = []
+            c_size = 0
+            push = buf.append
+
+            while 1:
+                try:
+                    while c_size < size:
+                        c = next()
+                        push(c)
+                        if c:
+                            c_size += 1
+                except StopIteration:
+                    if not c_size:
+                        return
+                yield concat(buf)
+                del buf[:]
+                c_size = 0
+
+        self.buffered = True
+        self._next = generator(self._gen.next).next
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        return self._next()
+
+
+# hook in default template class.  if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/scripts/jinja2/exceptions.py b/scripts/jinja2/exceptions.py
new file mode 100644 (file)
index 0000000..5bfca66
--- /dev/null
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.exceptions
+    ~~~~~~~~~~~~~~~~~
+
+    Jinja exceptions.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+
+
+class TemplateError(Exception):
+    """Baseclass for all template errors."""
+
+
+class TemplateNotFound(IOError, LookupError, TemplateError):
+    """Raised if a template does not exist."""
+
+    def __init__(self, name):
+        IOError.__init__(self, name)
+        self.name = name
+
+
+class TemplateSyntaxError(TemplateError):
+    """Raised to tell the user that there is a problem with the template."""
+
+    def __init__(self, message, lineno, name=None, filename=None):
+        if not isinstance(message, unicode):
+            message = message.decode('utf-8', 'replace')
+        TemplateError.__init__(self, message.encode('utf-8'))
+        self.lineno = lineno
+        self.name = name
+        self.filename = filename
+        self.source = None
+        self.message = message
+
+    def __unicode__(self):
+        location = 'line %d' % self.lineno
+        name = self.filename or self.name
+        if name:
+            location = 'File "%s", %s' % (name, location)
+        lines = [self.message, '  ' + location]
+
+        # if the source is set, add the line to the output
+        if self.source is not None:
+            try:
+                line = self.source.splitlines()[self.lineno - 1]
+            except IndexError:
+                line = None
+            if line:
+                lines.append('    ' + line.strip())
+
+        return u'\n'.join(lines)
+
+    def __str__(self):
+        return unicode(self).encode('utf-8')
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+    """Like a template syntax error, but covers cases where something in the
+    template caused an error at compile time that wasn't necessarily caused
+    by a syntax error.  However it's a direct subclass of
+    :exc:`TemplateSyntaxError` and has the same attributes.
+    """
+
+
+class TemplateRuntimeError(TemplateError):
+    """A generic runtime error in the template engine.  Under some situations
+    Jinja may raise this exception.
+    """
+
+
+class UndefinedError(TemplateRuntimeError):
+    """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+    """Raised if a template tries to do something insecure if the
+    sandbox is enabled.
+    """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+    """This error is raised if a filter was called with inappropriate
+    arguments
+    """
diff --git a/scripts/jinja2/ext.py b/scripts/jinja2/ext.py
new file mode 100644 (file)
index 0000000..353f265
--- /dev/null
@@ -0,0 +1,456 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.ext
+    ~~~~~~~~~~
+
+    Jinja extensions allow to add custom tags similar to the way django custom
+    tags work.  By default two example extensions exist: an i18n and a cache
+    extension.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+from collections import deque
+from jinja2 import nodes
+from jinja2.defaults import *
+from jinja2.environment import get_spontaneous_environment
+from jinja2.runtime import Undefined, concat
+from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
+from jinja2.utils import contextfunction, import_string, Markup
+
+
+# the only real useful gettext functions for a Jinja template.  Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+
+
+class ExtensionRegistry(type):
+    """Gives the extension an unique identifier."""
+
+    def __new__(cls, name, bases, d):
+        rv = type.__new__(cls, name, bases, d)
+        rv.identifier = rv.__module__ + '.' + rv.__name__
+        return rv
+
+
+class Extension(object):
+    """Extensions can be used to add extra functionality to the Jinja template
+    system at the parser level.  Custom extensions are bound to an environment
+    but may not store environment specific data on `self`.  The reason for
+    this is that an extension can be bound to another environment (for
+    overlays) by creating a copy and reassigning the `environment` attribute.
+
+    As extensions are created by the environment they cannot accept any
+    arguments for configuration.  One may want to work around that by using
+    a factory function, but that is not possible as extensions are identified
+    by their import name.  The correct way to configure the extension is
+    storing the configuration values on the environment.  Because this way the
+    environment ends up acting as central configuration storage the
+    attributes may clash which is why extensions have to ensure that the names
+    they choose for configuration are not too generic.  ``prefix`` for example
+    is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+    name as includes the name of the extension (fragment cache).
+    """
+    __metaclass__ = ExtensionRegistry
+
+    #: if this extension parses this is the list of tags it's listening to.
+    tags = set()
+
+    def __init__(self, environment):
+        self.environment = environment
+
+    def bind(self, environment):
+        """Create a copy of this extension bound to another environment."""
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.environment = environment
+        return rv
+
+    def preprocess(self, source, name, filename=None):
+        """This method is called before the actual lexing and can be used to
+        preprocess the source.  The `filename` is optional.  The return value
+        must be the preprocessed source.
+        """
+        return source
+
+    def filter_stream(self, stream):
+        """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+        to filter tokens returned.  This method has to return an iterable of
+        :class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
+        :class:`~jinja2.lexer.TokenStream`.
+
+        In the `ext` folder of the Jinja2 source distribution there is a file
+        called `inlinegettext.py` which implements a filter that utilizes this
+        method.
+        """
+        return stream
+
+    def parse(self, parser):
+        """If any of the :attr:`tags` matched this method is called with the
+        parser as first argument.  The token the parser stream is pointing at
+        is the name token that matched.  This method has to return one or a
+        list of multiple nodes.
+        """
+        raise NotImplementedError()
+
+    def attr(self, name, lineno=None):
+        """Return an attribute node for the current extension.  This is useful
+        to pass constants on extensions to generated template code::
+
+            self.attr('_my_attribute', lineno=lineno)
+        """
+        return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+    def call_method(self, name, args=None, kwargs=None, dyn_args=None,
+                    dyn_kwargs=None, lineno=None):
+        """Call a method of the extension.  This is a shortcut for
+        :meth:`attr` + :class:`jinja2.nodes.Call`.
+        """
+        if args is None:
+            args = []
+        if kwargs is None:
+            kwargs = []
+        return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
+                          dyn_args, dyn_kwargs, lineno=lineno)
+
+
+@contextfunction
+def _gettext_alias(context, string):
+    return context.resolve('gettext')(string)
+
+
+class InternationalizationExtension(Extension):
+    """This extension adds gettext support to Jinja2."""
+    tags = set(['trans'])
+
+    # TODO: the i18n extension is currently reevaluating values in a few
+    # situations.  Take this example:
+    #   {% trans count=something() %}{{ count }} foo{% pluralize
+    #     %}{{ count }} fooss{% endtrans %}
+    # something is called twice here.  One time for the gettext value and
+    # the other time for the n-parameter of the ngettext function.
+
+    def __init__(self, environment):
+        Extension.__init__(self, environment)
+        environment.globals['_'] = _gettext_alias
+        environment.extend(
+            install_gettext_translations=self._install,
+            install_null_translations=self._install_null,
+            uninstall_gettext_translations=self._uninstall,
+            extract_translations=self._extract
+        )
+
+    def _install(self, translations):
+        gettext = getattr(translations, 'ugettext', None)
+        if gettext is None:
+            gettext = translations.gettext
+        ngettext = getattr(translations, 'ungettext', None)
+        if ngettext is None:
+            ngettext = translations.ngettext
+        self.environment.globals.update(gettext=gettext, ngettext=ngettext)
+
+    def _install_null(self):
+        self.environment.globals.update(
+            gettext=lambda x: x,
+            ngettext=lambda s, p, n: (n != 1 and (p,) or (s,))[0]
+        )
+
+    def _uninstall(self, translations):
+        for key in 'gettext', 'ngettext':
+            self.environment.globals.pop(key, None)
+
+    def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
+        if isinstance(source, basestring):
+            source = self.environment.parse(source)
+        return extract_from_ast(source, gettext_functions)
+
+    def parse(self, parser):
+        """Parse a translatable tag."""
+        lineno = parser.stream.next().lineno
+
+        # find all the variables referenced.  Additionally a variable can be
+        # defined in the body of the trans block too, but this is checked at
+        # a later state.
+        plural_expr = None
+        variables = {}
+        while parser.stream.current.type is not 'block_end':
+            if variables:
+                parser.stream.expect('comma')
+
+            # skip colon for python compatibility
+            if parser.stream.skip_if('colon'):
+                break
+
+            name = parser.stream.expect('name')
+            if name.value in variables:
+                parser.fail('translatable variable %r defined twice.' %
+                            name.value, name.lineno,
+                            exc=TemplateAssertionError)
+
+            # expressions
+            if parser.stream.current.type is 'assign':
+                parser.stream.next()
+                variables[name.value] = var = parser.parse_expression()
+            else:
+                variables[name.value] = var = nodes.Name(name.value, 'load')
+            if plural_expr is None:
+                plural_expr = var
+
+        parser.stream.expect('block_end')
+
+        plural = plural_names = None
+        have_plural = False
+        referenced = set()
+
+        # now parse until endtrans or pluralize
+        singular_names, singular = self._parse_block(parser, True)
+        if singular_names:
+            referenced.update(singular_names)
+            if plural_expr is None:
+                plural_expr = nodes.Name(singular_names[0], 'load')
+
+        # if we have a pluralize block, we parse that too
+        if parser.stream.current.test('name:pluralize'):
+            have_plural = True
+            parser.stream.next()
+            if parser.stream.current.type is not 'block_end':
+                name = parser.stream.expect('name')
+                if name.value not in variables:
+                    parser.fail('unknown variable %r for pluralization' %
+                                name.value, name.lineno,
+                                exc=TemplateAssertionError)
+                plural_expr = variables[name.value]
+            parser.stream.expect('block_end')
+            plural_names, plural = self._parse_block(parser, False)
+            parser.stream.next()
+            referenced.update(plural_names)
+        else:
+            parser.stream.next()
+
+        # register free names as simple name expressions
+        for var in referenced:
+            if var not in variables:
+                variables[var] = nodes.Name(var, 'load')
+
+        # no variables referenced?  no need to escape
+        if not referenced:
+            singular = singular.replace('%%', '%')
+            if plural:
+                plural = plural.replace('%%', '%')
+
+        if not have_plural:
+            plural_expr = None
+        elif plural_expr is None:
+            parser.fail('pluralize without variables', lineno)
+
+        if variables:
+            variables = nodes.Dict([nodes.Pair(nodes.Const(x, lineno=lineno), y)
+                                    for x, y in variables.items()])
+        else:
+            variables = None
+
+        node = self._make_node(singular, plural, variables, plural_expr)
+        node.set_lineno(lineno)
+        return node
+
+    def _parse_block(self, parser, allow_pluralize):
+        """Parse until the next block tag with a given name."""
+        referenced = []
+        buf = []
+        while 1:
+            if parser.stream.current.type is 'data':
+                buf.append(parser.stream.current.value.replace('%', '%%'))
+                parser.stream.next()
+            elif parser.stream.current.type is 'variable_begin':
+                parser.stream.next()
+                name = parser.stream.expect('name').value
+                referenced.append(name)
+                buf.append('%%(%s)s' % name)
+                parser.stream.expect('variable_end')
+            elif parser.stream.current.type is 'block_begin':
+                parser.stream.next()
+                if parser.stream.current.test('name:endtrans'):
+                    break
+                elif parser.stream.current.test('name:pluralize'):
+                    if allow_pluralize:
+                        break
+                    parser.fail('a translatable section can have only one '
+                                'pluralize section')
+                parser.fail('control structures in translatable sections are '
+                            'not allowed')
+            elif parser.stream.eos:
+                parser.fail('unclosed translation block')
+            else:
+                assert False, 'internal parser error'
+
+        return referenced, concat(buf)
+
+    def _make_node(self, singular, plural, variables, plural_expr):
+        """Generates a useful node from the data provided."""
+        # singular only:
+        if plural_expr is None:
+            gettext = nodes.Name('gettext', 'load')
+            node = nodes.Call(gettext, [nodes.Const(singular)],
+                              [], None, None)
+
+        # singular and plural
+        else:
+            ngettext = nodes.Name('ngettext', 'load')
+            node = nodes.Call(ngettext, [
+                nodes.Const(singular),
+                nodes.Const(plural),
+                plural_expr
+            ], [], None, None)
+
+        # mark the return value as safe if we are in an
+        # environment with autoescaping turned on
+        if self.environment.autoescape:
+            node = nodes.MarkSafe(node)
+
+        if variables:
+            node = nodes.Mod(node, variables)
+        return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+    """Adds a `do` tag to Jinja2 that works like the print statement just
+    that it doesn't print the return value.
+    """
+    tags = set(['do'])
+
+    def parse(self, parser):
+        node = nodes.ExprStmt(lineno=parser.stream.next().lineno)
+        node.node = parser.parse_tuple()
+        return node
+
+
+class LoopControlExtension(Extension):
+    """Adds break and continue to the template engine."""
+    tags = set(['break', 'continue'])
+
+    def parse(self, parser):
+        token = parser.stream.next()
+        if token.value == 'break':
+            return nodes.Break(lineno=token.lineno)
+        return nodes.Continue(lineno=token.lineno)
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
+                     babel_style=True):
+    """Extract localizable strings from the given template node.  Per
+    default this function returns matches in babel style that means non string
+    parameters as well as keyword arguments are returned as `None`.  This
+    allows Babel to figure out what you really meant if you are using
+    gettext functions that allow keyword arguments for placeholder expansion.
+    If you don't want that behavior set the `babel_style` parameter to `False`
+    which causes only strings to be returned and parameters are always stored
+    in tuples.  As a consequence invalid gettext calls (calls without a single
+    string parameter or string parameters after non-string parameters) are
+    skipped.
+
+    This example explains the behavior:
+
+    >>> from jinja2 import Environment
+    >>> env = Environment()
+    >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+    >>> list(extract_from_ast(node))
+    [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+    >>> list(extract_from_ast(node, babel_style=False))
+    [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+    For every string found this function yields a ``(lineno, function,
+    message)`` tuple, where:
+
+    * ``lineno`` is the number of the line on which the string was found,
+    * ``function`` is the name of the ``gettext`` function used (if the
+      string was extracted from embedded Python code), and
+    *  ``message`` is the string itself (a ``unicode`` object, or a tuple
+       of ``unicode`` objects for functions with multiple string arguments).
+    """
+    for node in node.find_all(nodes.Call):
+        if not isinstance(node.node, nodes.Name) or \
+           node.node.name not in gettext_functions:
+            continue
+
+        strings = []
+        for arg in node.args:
+            if isinstance(arg, nodes.Const) and \
+               isinstance(arg.value, basestring):
+                strings.append(arg.value)
+            else:
+                strings.append(None)
+
+        for arg in node.kwargs:
+            strings.append(None)
+        if node.dyn_args is not None:
+            strings.append(None)
+        if node.dyn_kwargs is not None:
+            strings.append(None)
+
+        if not babel_style:
+            strings = tuple(x for x in strings if x is not None)
+            if not strings:
+                continue
+        else:
+            if len(strings) == 1:
+                strings = strings[0]
+            else:
+                strings = tuple(strings)
+        yield node.lineno, node.node.name, strings
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+    """Babel extraction method for Jinja templates.
+
+    :param fileobj: the file-like object the messages should be extracted from
+    :param keywords: a list of keywords (i.e. function names) that should be
+                     recognized as translation functions
+    :param comment_tags: a list of translator tags to search for and include
+                         in the results.  (Unused)
+    :param options: a dictionary of additional options (optional)
+    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+             (comments will be empty currently)
+    """
+    extensions = set()
+    for extension in options.get('extensions', '').split(','):
+        extension = extension.strip()
+        if not extension:
+            continue
+        extensions.add(import_string(extension))
+    if InternationalizationExtension not in extensions:
+        extensions.add(InternationalizationExtension)
+
+    environment = get_spontaneous_environment(
+        options.get('block_start_string', BLOCK_START_STRING),
+        options.get('block_end_string', BLOCK_END_STRING),
+        options.get('variable_start_string', VARIABLE_START_STRING),
+        options.get('variable_end_string', VARIABLE_END_STRING),
+        options.get('comment_start_string', COMMENT_START_STRING),
+        options.get('comment_end_string', COMMENT_END_STRING),
+        options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
+        str(options.get('trim_blocks', TRIM_BLOCKS)).lower() in \
+            ('1', 'on', 'yes', 'true'),
+        NEWLINE_SEQUENCE, frozenset(extensions),
+        # fill with defaults so that environments are shared
+        # with other spontaneus environments.  The rest of the
+        # arguments are optimizer, undefined, finalize, autoescape,
+        # loader, cache size, auto reloading setting and the
+        # bytecode cache
+        True, Undefined, None, False, None, 0, False, None
+    )
+
+    source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+    try:
+        node = environment.parse(source)
+    except TemplateSyntaxError, e:
+        # skip templates with syntax errors
+        return
+    for lineno, func, message in extract_from_ast(node, keywords):
+        yield lineno, func, message, []
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
diff --git a/scripts/jinja2/filters.py b/scripts/jinja2/filters.py
new file mode 100644 (file)
index 0000000..afa7667
--- /dev/null
@@ -0,0 +1,715 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.filters
+    ~~~~~~~~~~~~~~
+
+    Bundled jinja filters.
+
+    :copyright: 2008 by Armin Ronacher, Christoph Hack.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import math
+from random import choice
+from operator import itemgetter
+from itertools import imap, groupby
+from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode
+from jinja2.runtime import Undefined
+from jinja2.exceptions import FilterArgumentError, SecurityError
+
+
+_word_re = re.compile(r'\w+')
+
+
+def contextfilter(f):
+    """Decorator for marking context dependent filters. The current
+    :class:`Context` will be passed as first argument.
+    """
+    if getattr(f, 'environmentfilter', False):
+        raise TypeError('filter already marked as environment filter')
+    f.contextfilter = True
+    return f
+
+
+def environmentfilter(f):
+    """Decorator for marking evironment dependent filters.  The current
+    :class:`Environment` is passed to the filter as first argument.
+    """
+    if getattr(f, 'contextfilter', False):
+        raise TypeError('filter already marked as context filter')
+    f.environmentfilter = True
+    return f
+
+
+def do_forceescape(value):
+    """Enforce HTML escaping.  This will probably double escape variables."""
+    if hasattr(value, '__html__'):
+        value = value.__html__()
+    return escape(unicode(value))
+
+
+@environmentfilter
+def do_replace(environment, s, old, new, count=None):
+    """Return a copy of the value with all occurrences of a substring
+    replaced with a new one. The first argument is the substring
+    that should be replaced, the second is the replacement string.
+    If the optional third argument ``count`` is given, only the first
+    ``count`` occurrences are replaced:
+
+    .. sourcecode:: jinja
+
+        {{ "Hello World"|replace("Hello", "Goodbye") }}
+            -> Goodbye World
+
+        {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+            -> d'oh, d'oh, aaargh
+    """
+    if count is None:
+        count = -1
+    if not environment.autoescape:
+        return unicode(s).replace(unicode(old), unicode(new), count)
+    if hasattr(old, '__html__') or hasattr(new, '__html__') and \
+       not hasattr(s, '__html__'):
+        s = escape(s)
+    else:
+        s = soft_unicode(s)
+    return s.replace(soft_unicode(old), soft_unicode(new), count)
+
+
+def do_upper(s):
+    """Convert a value to uppercase."""
+    return soft_unicode(s).upper()
+
+
+def do_lower(s):
+    """Convert a value to lowercase."""
+    return soft_unicode(s).lower()
+
+
+@environmentfilter
+def do_xmlattr(_environment, d, autospace=True):
+    """Create an SGML/XML attribute string based on the items in a dict.
+    All values that are neither `none` nor `undefined` are automatically
+    escaped:
+
+    .. sourcecode:: html+jinja
+
+        <ul{{ {'class': 'my_list', 'missing': none,
+                'id': 'list-%d'|format(variable)}|xmlattr }}>
+        ...
+        </ul>
+
+    Results in something like this:
+
+    .. sourcecode:: html
+
+        <ul class="my_list" id="list-42">
+        ...
+        </ul>
+
+    As you can see it automatically prepends a space in front of the item
+    if the filter returned something unless the second parameter is false.
+    """
+    rv = u' '.join(
+        u'%s="%s"' % (escape(key), escape(value))
+        for key, value in d.iteritems()
+        if value is not None and not isinstance(value, Undefined)
+    )
+    if autospace and rv:
+        rv = u' ' + rv
+    if _environment.autoescape:
+        rv = Markup(rv)
+    return rv
+
+
+def do_capitalize(s):
+    """Capitalize a value. The first character will be uppercase, all others
+    lowercase.
+    """
+    return soft_unicode(s).capitalize()
+
+
+def do_title(s):
+    """Return a titlecased version of the value. I.e. words will start with
+    uppercase letters, all remaining characters are lowercase.
+    """
+    return soft_unicode(s).title()
+
+
+def do_dictsort(value, case_sensitive=False, by='key'):
+    """Sort a dict and yield (key, value) pairs. Because python dicts are
+    unsorted you may want to use this function to order them by either
+    key or value:
+
+    .. sourcecode:: jinja
+
+        {% for item in mydict|dictsort %}
+            sort the dict by key, case insensitive
+
+        {% for item in mydict|dicsort(true) %}
+            sort the dict by key, case sensitive
+
+        {% for item in mydict|dictsort(false, 'value') %}
+            sort the dict by key, case insensitive, sorted
+            normally and ordered by value.
+    """
+    if by == 'key':
+        pos = 0
+    elif by == 'value':
+        pos = 1
+    else:
+        raise FilterArgumentError('You can only sort by either '
+                                  '"key" or "value"')
+    def sort_func(item):
+        value = item[pos]
+        if isinstance(value, basestring) and not case_sensitive:
+            value = value.lower()
+        return value
+
+    return sorted(value.items(), key=sort_func)
+
+
+def do_sort(value, case_sensitive=False):
+    """Sort an iterable.  If the iterable is made of strings the second
+    parameter can be used to control the case sensitiveness of the
+    comparison which is disabled by default.
+
+    .. sourcecode:: jinja
+
+        {% for item in iterable|sort %}
+            ...
+        {% endfor %}
+    """
+    if not case_sensitive:
+        def sort_func(item):
+            if isinstance(item, basestring):
+                item = item.lower()
+            return item
+    else:
+        sort_func = None
+    return sorted(seq, key=sort_func)
+
+
+def do_default(value, default_value=u'', boolean=False):
+    """If the value is undefined it will return the passed default value,
+    otherwise the value of the variable:
+
+    .. sourcecode:: jinja
+
+        {{ my_variable|default('my_variable is not defined') }}
+
+    This will output the value of ``my_variable`` if the variable was
+    defined, otherwise ``'my_variable is not defined'``. If you want
+    to use default with variables that evaluate to false you have to
+    set the second parameter to `true`:
+
+    .. sourcecode:: jinja
+
+        {{ ''|default('the string was empty', true) }}
+    """
+    if (boolean and not value) or isinstance(value, Undefined):
+        return default_value
+    return value
+
+
+@environmentfilter
+def do_join(environment, value, d=u''):
+    """Return a string which is the concatenation of the strings in the
+    sequence. The separator between elements is an empty string per
+    default, you can define it with the optional parameter:
+
+    .. sourcecode:: jinja
+
+        {{ [1, 2, 3]|join('|') }}
+            -> 1|2|3
+
+        {{ [1, 2, 3]|join }}
+            -> 123
+    """
+    # no automatic escaping?  joining is a lot eaiser then
+    if not environment.autoescape:
+        return unicode(d).join(imap(unicode, value))
+
+    # if the delimiter doesn't have an html representation we check
+    # if any of the items has.  If yes we do a coercion to Markup
+    if not hasattr(d, '__html__'):
+        value = list(value)
+        do_escape = False
+        for idx, item in enumerate(value):
+            if hasattr(item, '__html__'):
+                do_escape = True
+            else:
+                value[idx] = unicode(item)
+        if do_escape:
+            d = escape(d)
+        else:
+            d = unicode(d)
+        return d.join(value)
+
+    # no html involved, to normal joining
+    return soft_unicode(d).join(imap(soft_unicode, value))
+
+
+def do_center(value, width=80):
+    """Centers the value in a field of a given width."""
+    return unicode(value).center(width)
+
+
+@environmentfilter
+def do_first(environment, seq):
+    """Return the first item of a sequence."""
+    try:
+        return iter(seq).next()
+    except StopIteration:
+        return environment.undefined('No first item, sequence was empty.')
+
+
+@environmentfilter
+def do_last(environment, seq):
+    """Return the last item of a sequence."""
+    try:
+        return iter(reversed(seq)).next()
+    except StopIteration:
+        return environment.undefined('No last item, sequence was empty.')
+
+
+@environmentfilter
+def do_random(environment, seq):
+    """Return a random item from the sequence."""
+    try:
+        return choice(seq)
+    except IndexError:
+        return environment.undefined('No random item, sequence was empty.')
+
+
+def do_filesizeformat(value, binary=False):
+    """Format the value like a 'human-readable' file size (i.e. 13 KB,
+    4.1 MB, 102 bytes, etc).  Per default decimal prefixes are used (mega,
+    giga etc.), if the second parameter is set to `True` the binary
+    prefixes are (mebi, gibi).
+    """
+    bytes = float(value)
+    base = binary and 1024 or 1000
+    middle = binary and 'i' or ''
+    if bytes < base:
+        return "%d Byte%s" % (bytes, bytes != 1 and 's' or '')
+    elif bytes < base * base:
+        return "%.1f K%sB" % (bytes / base, middle)
+    elif bytes < base * base * base:
+        return "%.1f M%sB" % (bytes / (base * base), middle)
+    return "%.1f G%sB" % (bytes / (base * base * base), middle)
+
+
+def do_pprint(value, verbose=False):
+    """Pretty print a variable. Useful for debugging.
+
+    With Jinja 1.2 onwards you can pass it a parameter.  If this parameter
+    is truthy the output will be more verbose (this requires `pretty`)
+    """
+    return pformat(value, verbose=verbose)
+
+
+@environmentfilter
+def do_urlize(environment, value, trim_url_limit=None, nofollow=False):
+    """Converts URLs in plain text into clickable links.
+
+    If you pass the filter an additional integer it will shorten the urls
+    to that number. Also a third argument exists that makes the urls
+    "nofollow":
+
+    .. sourcecode:: jinja
+
+        {{ mytext|urlize(40, true) }}
+            links are shortened to 40 chars and defined with rel="nofollow"
+    """
+    rv = urlize(value, trim_url_limit, nofollow)
+    if environment.autoescape:
+        rv = Markup(rv)
+    return rv
+
+
+def do_indent(s, width=4, indentfirst=False):
+    """Return a copy of the passed string, each line indented by
+    4 spaces. The first line is not indented. If you want to
+    change the number of spaces or indent the first line too
+    you can pass additional parameters to the filter:
+
+    .. sourcecode:: jinja
+
+        {{ mytext|indent(2, true) }}
+            indent by two spaces and indent the first line too.
+    """
+    indention = u' ' * width
+    rv = (u'\n' + indention).join(s.splitlines())
+    if indentfirst:
+        rv = indention + rv
+    return rv
+
+
+def do_truncate(s, length=255, killwords=False, end='...'):
+    """Return a truncated copy of the string. The length is specified
+    with the first parameter which defaults to ``255``. If the second
+    parameter is ``true`` the filter will cut the text at length. Otherwise
+    it will try to save the last word. If the text was in fact
+    truncated it will append an ellipsis sign (``"..."``). If you want a
+    different ellipsis sign than ``"..."`` you can specify it using the
+    third parameter.
+
+    .. sourcecode jinja::
+
+        {{ mytext|truncate(300, false, '&raquo;') }}
+            truncate mytext to 300 chars, don't split up words, use a
+            right pointing double arrow as ellipsis sign.
+    """
+    if len(s) <= length:
+        return s
+    elif killwords:
+        return s[:length] + end
+    words = s.split(' ')
+    result = []
+    m = 0
+    for word in words:
+        m += len(word) + 1
+        if m > length:
+            break
+        result.append(word)
+    result.append(end)
+    return u' '.join(result)
+
+
+def do_wordwrap(s, width=79, break_long_words=True):
+    """
+    Return a copy of the string passed to the filter wrapped after
+    ``79`` characters.  You can override this default using the first
+    parameter.  If you set the second parameter to `false` Jinja will not
+    split words apart if they are longer than `width`.
+    """
+    import textwrap
+    return u'\n'.join(textwrap.wrap(s, width=width, expand_tabs=False,
+                                   replace_whitespace=False,
+                                   break_long_words=break_long_words))
+
+
+def do_wordcount(s):
+    """Count the words in that string."""
+    return len(_word_re.findall(s))
+
+
+def do_int(value, default=0):
+    """Convert the value into an integer. If the
+    conversion doesn't work it will return ``0``. You can
+    override this default using the first parameter.
+    """
+    try:
+        return int(value)
+    except (TypeError, ValueError):
+        # this quirk is necessary so that "42.23"|int gives 42.
+        try:
+            return int(float(value))
+        except (TypeError, ValueError):
+            return default
+
+
+def do_float(value, default=0.0):
+    """Convert the value into a floating point number. If the
+    conversion doesn't work it will return ``0.0``. You can
+    override this default using the first parameter.
+    """
+    try:
+        return float(value)
+    except (TypeError, ValueError):
+        return default
+
+
+def do_format(value, *args, **kwargs):
+    """
+    Apply python string formatting on an object:
+
+    .. sourcecode:: jinja
+
+        {{ "%s - %s"|format("Hello?", "Foo!") }}
+            -> Hello? - Foo!
+    """
+    if args and kwargs:
+        raise FilterArgumentError('can\'t handle positional and keyword '
+                                  'arguments at the same time')
+    return soft_unicode(value) % (kwargs or args)
+
+
+def do_trim(value):
+    """Strip leading and trailing whitespace."""
+    return soft_unicode(value).strip()
+
+
+def do_striptags(value):
+    """Strip SGML/XML tags and replace adjacent whitespace by one space.
+    """
+    if hasattr(value, '__html__'):
+        value = value.__html__()
+    return Markup(unicode(value)).striptags()
+
+
+def do_slice(value, slices, fill_with=None):
+    """Slice an iterator and return a list of lists containing
+    those items. Useful if you want to create a div containing
+    three div tags that represent columns:
+
+    .. sourcecode:: html+jinja
+
+        <div class="columwrapper">
+          {%- for column in items|slice(3) %}
+            <ul class="column-{{ loop.index }}">
+            {%- for item in column %}
+              <li>{{ item }}</li>
+            {%- endfor %}
+            </ul>
+          {%- endfor %}
+        </div>
+
+    If you pass it a second argument it's used to fill missing
+    values on the last iteration.
+    """
+    seq = list(value)
+    length = len(seq)
+    items_per_slice = length // slices
+    slices_with_extra = length % slices
+    offset = 0
+    for slice_number in xrange(slices):
+        start = offset + slice_number * items_per_slice
+        if slice_number < slices_with_extra:
+            offset += 1
+        end = offset + (slice_number + 1) * items_per_slice
+        tmp = seq[start:end]
+        if fill_with is not None and slice_number >= slices_with_extra:
+            tmp.append(fill_with)
+        yield tmp
+
+
+def do_batch(value, linecount, fill_with=None):
+    """
+    A filter that batches items. It works pretty much like `slice`
+    just the other way round. It returns a list of lists with the
+    given number of items. If you provide a second parameter this
+    is used to fill missing items. See this example:
+
+    .. sourcecode:: html+jinja
+
+        <table>
+        {%- for row in items|batch(3, '&nbsp;') %}
+          <tr>
+          {%- for column in row %}
+            <tr>{{ column }}</td>
+          {%- endfor %}
+          </tr>
+        {%- endfor %}
+        </table>
+    """
+    result = []
+    tmp = []
+    for item in value:
+        if len(tmp) == linecount:
+            yield tmp
+            tmp = []
+        tmp.append(item)
+    if tmp:
+        if fill_with is not None and len(tmp) < linecount:
+            tmp += [fill_with] * (linecount - len(tmp))
+        yield tmp
+
+
+def do_round(value, precision=0, method='common'):
+    """Round the number to a given precision. The first
+    parameter specifies the precision (default is ``0``), the
+    second the rounding method:
+
+    - ``'common'`` rounds either up or down
+    - ``'ceil'`` always rounds up
+    - ``'floor'`` always rounds down
+
+    If you don't specify a method ``'common'`` is used.
+
+    .. sourcecode:: jinja
+
+        {{ 42.55|round }}
+            -> 43
+        {{ 42.55|round(1, 'floor') }}
+            -> 42.5
+    """
+    if not method in ('common', 'ceil', 'floor'):
+        raise FilterArgumentError('method must be common, ceil or floor')
+    if precision < 0:
+        raise FilterArgumentError('precision must be a postive integer '
+                                  'or zero.')
+    if method == 'common':
+        return round(value, precision)
+    func = getattr(math, method)
+    if precision:
+        return func(value * 10 * precision) / (10 * precision)
+    else:
+        return func(value)
+
+
+def do_sort(value, reverse=False):
+    """Sort a sequence. Per default it sorts ascending, if you pass it
+    true as first argument it will reverse the sorting.
+    """
+    return sorted(value, reverse=reverse)
+
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+    """Group a sequence of objects by a common attribute.
+
+    If you for example have a list of dicts or objects that represent persons
+    with `gender`, `first_name` and `last_name` attributes and you want to
+    group all users by genders you can do something like the following
+    snippet:
+
+    .. sourcecode:: html+jinja
+
+        <ul>
+        {% for group in persons|groupby('gender') %}
+            <li>{{ group.grouper }}<ul>
+            {% for person in group.list %}
+                <li>{{ person.first_name }} {{ person.last_name }}</li>
+            {% endfor %}</ul></li>
+        {% endfor %}
+        </ul>
+
+    Additionally it's possible to use tuple unpacking for the grouper and
+    list:
+
+    .. sourcecode:: html+jinja
+
+        <ul>
+        {% for grouper, list in persons|groupby('gender') %}
+            ...
+        {% endfor %}
+        </ul>
+
+    As you can see the item we're grouping by is stored in the `grouper`
+    attribute and the `list` contains all the objects that have this grouper
+    in common.
+    """
+    expr = lambda x: environment.getitem(x, attribute)
+    return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
+
+
+class _GroupTuple(tuple):
+    __slots__ = ()
+    grouper = property(itemgetter(0))
+    list = property(itemgetter(1))
+
+    def __new__(cls, (key, value)):
+        return tuple.__new__(cls, (key, list(value)))
+
+
+def do_list(value):
+    """Convert the value into a list.  If it was a string the returned list
+    will be a list of characters.
+    """
+    return list(value)
+
+
+def do_mark_safe(value):
+    """Mark the value as safe which means that in an environment with automatic
+    escaping enabled this variable will not be escaped.
+    """
+    return Markup(value)
+
+
+def do_mark_unsafe(value):
+    """Mark a value as unsafe.  This is the reverse operation for :func:`safe`."""
+    return unicode(value)
+
+
+def do_reverse(value):
+    """Reverse the object or return an iterator the iterates over it the other
+    way round.
+    """
+    if isinstance(value, basestring):
+        return value[::-1]
+    try:
+        return reversed(value)
+    except TypeError:
+        try:
+            rv = list(value)
+            rv.reverse()
+            return rv
+        except TypeError:
+            raise FilterArgumentError('argument must be iterable')
+
+
+@environmentfilter
+def do_attr(environment, obj, name):
+    """Get an attribute of an object.  ``foo|attr("bar")`` works like
+    ``foo["bar"]`` just that always an attribute is returned and items are not
+    looked up.
+
+    See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+    """
+    try:
+        name = str(name)
+    except UnicodeError:
+        pass
+    else:
+        try:
+            value = getattr(obj, name)
+        except AttributeError:
+            pass
+        else:
+            if environment.sandboxed and not \
+               environment.is_safe_attribute(obj, name, value):
+                return environment.unsafe_undefined(obj, name)
+            return value
+    return environment.undefined(obj=obj, name=name)
+
+
+FILTERS = {
+    'attr':                 do_attr,
+    'replace':              do_replace,
+    'upper':                do_upper,
+    'lower':                do_lower,
+    'escape':               escape,
+    'e':                    escape,
+    'forceescape':          do_forceescape,
+    'capitalize':           do_capitalize,
+    'title':                do_title,
+    'default':              do_default,
+    'd':                    do_default,
+    'join':                 do_join,
+    'count':                len,
+    'dictsort':             do_dictsort,
+    'sort':                 do_sort,
+    'length':               len,
+    'reverse':              do_reverse,
+    'center':               do_center,
+    'indent':               do_indent,
+    'title':                do_title,
+    'capitalize':           do_capitalize,
+    'first':                do_first,
+    'last':                 do_last,
+    'random':               do_random,
+    'filesizeformat':       do_filesizeformat,
+    'pprint':               do_pprint,
+    'truncate':             do_truncate,
+    'wordwrap':             do_wordwrap,
+    'wordcount':            do_wordcount,
+    'int':                  do_int,
+    'float':                do_float,
+    'string':               soft_unicode,
+    'list':                 do_list,
+    'urlize':               do_urlize,
+    'format':               do_format,
+    'trim':                 do_trim,
+    'striptags':            do_striptags,
+    'slice':                do_slice,
+    'batch':                do_batch,
+    'sum':                  sum,
+    'abs':                  abs,
+    'round':                do_round,
+    'sort':                 do_sort,
+    'groupby':              do_groupby,
+    'safe':                 do_mark_safe,
+    'xmlattr':              do_xmlattr
+}
diff --git a/scripts/jinja2/lexer.py b/scripts/jinja2/lexer.py
new file mode 100644 (file)
index 0000000..6b26983
--- /dev/null
@@ -0,0 +1,561 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.lexer
+    ~~~~~~~~~~~~
+
+    This module implements a Jinja / Python combination lexer. The
+    `Lexer` class provided by this module is used to do some preprocessing
+    for Jinja.
+
+    On the one hand it filters out invalid operators like the bitshift
+    operators we don't allow in templates. On the other hand it separates
+    template code and python code in expressions.
+
+    :copyright: 2007-2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+from operator import itemgetter
+from collections import deque
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import LRUCache
+
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache = LRUCache(50)
+
+# static regular expressions
+whitespace_re = re.compile(r'\s+', re.U)
+string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+                       r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
+integer_re = re.compile(r'\d+')
+name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
+float_re = re.compile(r'(?<!\.)\d+\.\d+')
+newline_re = re.compile(r'(\r\n|\r|\n)')
+
+# bind operators to token types
+operators = {
+    '+':            'add',
+    '-':            'sub',
+    '/':            'div',
+    '//':           'floordiv',
+    '*':            'mul',
+    '%':            'mod',
+    '**':           'pow',
+    '~':            'tilde',
+    '[':            'lbracket',
+    ']':            'rbracket',
+    '(':            'lparen',
+    ')':            'rparen',
+    '{':            'lbrace',
+    '}':            'rbrace',
+    '==':           'eq',
+    '!=':           'ne',
+    '>':            'gt',
+    '>=':           'gteq',
+    '<':            'lt',
+    '<=':           'lteq',
+    '=':            'assign',
+    '.':            'dot',
+    ':':            'colon',
+    '|':            'pipe',
+    ',':            'comma',
+    ';':            'semicolon'
+}
+
+reverse_operators = dict([(v, k) for k, v in operators.iteritems()])
+assert len(operators) == len(reverse_operators), 'operators dropped'
+operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
+                         sorted(operators, key=lambda x: -len(x))))
+
+
+def count_newlines(value):
+    """Count the number of newline characters in the string.  This is
+    useful for extensions that filter a stream.
+    """
+    return len(newline_re.findall(value))
+
+
+class Failure(object):
+    """Class that raises a `TemplateSyntaxError` if called.
+    Used by the `Lexer` to specify known errors.
+    """
+
+    def __init__(self, message, cls=TemplateSyntaxError):
+        self.message = message
+        self.error_class = cls
+
+    def __call__(self, lineno, filename):
+        raise self.error_class(self.message, lineno, filename)
+
+
+class Token(tuple):
+    """Token class."""
+    __slots__ = ()
+    lineno, type, value = (property(itemgetter(x)) for x in range(3))
+
+    def __new__(cls, lineno, type, value):
+        return tuple.__new__(cls, (lineno, intern(str(type)), value))
+
+    def __str__(self):
+        if self.type in reverse_operators:
+            return reverse_operators[self.type]
+        elif self.type is 'name':
+            return self.value
+        return self.type
+
+    def test(self, expr):
+        """Test a token against a token expression.  This can either be a
+        token type or ``'token_type:token_value'``.  This can only test
+        against string values and types.
+        """
+        # here we do a regular string equality check as test_any is usually
+        # passed an iterable of not interned strings.
+        if self.type == expr:
+            return True
+        elif ':' in expr:
+            return expr.split(':', 1) == [self.type, self.value]
+        return False
+
+    def test_any(self, *iterable):
+        """Test against multiple token expressions."""
+        for expr in iterable:
+            if self.test(expr):
+                return True
+        return False
+
+    def __repr__(self):
+        return 'Token(%r, %r, %r)' % (
+            self.lineno,
+            self.type,
+            self.value
+        )
+
+
+class TokenStreamIterator(object):
+    """The iterator for tokenstreams.  Iterate over the stream
+    until the eof token is reached.
+    """
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        token = self.stream.current
+        if token.type == 'eof':
+            self.stream.close()
+            raise StopIteration()
+        self.stream.next()
+        return token
+
+
+class TokenStream(object):
+    """A token stream is an iterable that yields :class:`Token`\s.  The
+    parser however does not iterate over it but calls :meth:`next` to go
+    one token ahead.  The current active token is stored as :attr:`current`.
+    """
+
+    def __init__(self, generator, name, filename):
+        self._next = iter(generator).next
+        self._pushed = deque()
+        self.name = name
+        self.filename = filename
+        self.closed = False
+        self.current = Token(1, 'initial', '')
+        self.next()
+
+    def __iter__(self):
+        return TokenStreamIterator(self)
+
+    def __nonzero__(self):
+        """Are we at the end of the stream?"""
+        return bool(self._pushed) or self.current.type != 'eof'
+
+    eos = property(lambda x: not x.__nonzero__(), doc=__nonzero__.__doc__)
+
+    def push(self, token):
+        """Push a token back to the stream."""
+        self._pushed.append(token)
+
+    def look(self):
+        """Look at the next token."""
+        old_token = self.next()
+        result = self.current
+        self.push(result)
+        self.current = old_token
+        return result
+
+    def skip(self, n=1):
+        """Got n tokens ahead."""
+        for x in xrange(n):
+            self.next()
+
+    def next_if(self, expr):
+        """Perform the token test and return the token if it matched.
+        Otherwise the return value is `None`.
+        """
+        if self.current.test(expr):
+            return self.next()
+
+    def skip_if(self, expr):
+        """Like :meth:`next_if` but only returns `True` or `False`."""
+        return self.next_if(expr) is not None
+
+    def next(self):
+        """Go one token ahead and return the old one"""
+        rv = self.current
+        if self._pushed:
+            self.current = self._pushed.popleft()
+        elif self.current.type is not 'eof':
+            try:
+                self.current = self._next()
+            except StopIteration:
+                self.close()
+        return rv
+
+    def close(self):
+        """Close the stream."""
+        self.current = Token(self.current.lineno, 'eof', '')
+        self._next = None
+        self.closed = True
+
+    def expect(self, expr):
+        """Expect a given token type and return it.  This accepts the same
+        argument as :meth:`jinja2.lexer.Token.test`.
+        """
+        if not self.current.test(expr):
+            if ':' in expr:
+                expr = expr.split(':')[1]
+            if self.current.type is 'eof':
+                raise TemplateSyntaxError('unexpected end of template, '
+                                          'expected %r.' % expr,
+                                          self.current.lineno,
+                                          self.name, self.filename)
+            raise TemplateSyntaxError("expected token %r, got %r" %
+                                      (expr, str(self.current)),
+                                      self.current.lineno,
+                                      self.name, self.filename)
+        try:
+            return self.current
+        finally:
+            self.next()
+
+
+def get_lexer(environment):
+    """Return a lexer which is probably cached."""
+    key = (environment.block_start_string,
+           environment.block_end_string,
+           environment.variable_start_string,
+           environment.variable_end_string,
+           environment.comment_start_string,
+           environment.comment_end_string,
+           environment.line_statement_prefix,
+           environment.trim_blocks,
+           environment.newline_sequence)
+    lexer = _lexer_cache.get(key)
+    if lexer is None:
+        lexer = Lexer(environment)
+        _lexer_cache[key] = lexer
+    return lexer
+
+
+class Lexer(object):
+    """Class that implements a lexer for a given environment. Automatically
+    created by the environment class, usually you don't have to do that.
+
+    Note that the lexer is not automatically bound to an environment.
+    Multiple environments can share the same lexer.
+    """
+
+    def __init__(self, environment):
+        # shortcuts
+        c = lambda x: re.compile(x, re.M | re.S)
+        e = re.escape
+
+        # lexing rules for tags
+        tag_rules = [
+            (whitespace_re, 'whitespace', None),
+            (float_re, 'float', None),
+            (integer_re, 'integer', None),
+            (name_re, 'name', None),
+            (string_re, 'string', None),
+            (operator_re, 'operator', None)
+        ]
+
+        # assamble the root lexing rule. because "|" is ungreedy
+        # we have to sort by length so that the lexer continues working
+        # as expected when we have parsing rules like <% for block and
+        # <%= for variables. (if someone wants asp like syntax)
+        # variables are just part of the rules if variable processing
+        # is required.
+        root_tag_rules = [
+            ('comment',     environment.comment_start_string),
+            ('block',       environment.block_start_string),
+            ('variable',    environment.variable_start_string)
+        ]
+        root_tag_rules.sort(key=lambda x: -len(x[1]))
+
+        # now escape the rules.  This is done here so that the escape
+        # signs don't count for the lengths of the tags.
+        root_tag_rules = [(a, e(b)) for a, b in root_tag_rules]
+
+        # if we have a line statement prefix we need an extra rule for
+        # that.  We add this rule *after* all the others.
+        if environment.line_statement_prefix is not None:
+            prefix = e(environment.line_statement_prefix)
+            root_tag_rules.insert(0, ('linestatement', '^\s*' + prefix))
+
+        # block suffix if trimming is enabled
+        block_suffix_re = environment.trim_blocks and '\\n?' or ''
+
+        self.newline_sequence = environment.newline_sequence
+
+        # global lexing rules
+        self.rules = {
+            'root': [
+                # directives
+                (c('(.*?)(?:%s)' % '|'.join(
+                    ['(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*%s)' % (
+                        e(environment.block_start_string),
+                        e(environment.block_start_string),
+                        e(environment.block_end_string)
+                    )] + [
+                        '(?P<%s_begin>\s*%s\-|%s)' % (n, r, r)
+                        for n, r in root_tag_rules
+                    ])), ('data', '#bygroup'), '#bygroup'),
+                # data
+                (c('.+'), 'data', None)
+            ],
+            # comments
+            'comment_begin': [
+                (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
+                    e(environment.comment_end_string),
+                    e(environment.comment_end_string),
+                    block_suffix_re
+                )), ('comment', 'comment_end'), '#pop'),
+                (c('(.)'), (Failure('Missing end of comment tag'),), None)
+            ],
+            # blocks
+            'block_begin': [
+                (c('(?:\-%s\s*|%s)%s' % (
+                    e(environment.block_end_string),
+                    e(environment.block_end_string),
+                    block_suffix_re
+                )), 'block_end', '#pop'),
+            ] + tag_rules,
+            # variables
+            'variable_begin': [
+                (c('\-%s\s*|%s' % (
+                    e(environment.variable_end_string),
+                    e(environment.variable_end_string)
+                )), 'variable_end', '#pop')
+            ] + tag_rules,
+            # raw block
+            'raw_begin': [
+                (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+                    e(environment.block_start_string),
+                    e(environment.block_start_string),
+                    e(environment.block_end_string),
+                    e(environment.block_end_string),
+                    block_suffix_re
+                )), ('data', 'raw_end'), '#pop'),
+                (c('(.)'), (Failure('Missing end of raw directive'),), None)
+            ],
+            # line statements
+            'linestatement_begin': [
+                (c(r'\s*(\n|$)'), 'linestatement_end', '#pop')
+            ] + tag_rules
+        }
+
+    def _normalize_newlines(self, value):
+        """Called for strings and template data to normlize it to unicode."""
+        return newline_re.sub(self.newline_sequence, value)
+
+    def tokenize(self, source, name=None, filename=None, state=None):
+        """Calls tokeniter + tokenize and wraps it in a token stream.
+        """
+        stream = self.tokeniter(source, name, filename, state)
+        return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+    def wrap(self, stream, name=None, filename=None):
+        """This is called with the stream as returned by `tokenize` and wraps
+        every token in a :class:`Token` and converts the value.
+        """
+        for lineno, token, value in stream:
+            if token in ('comment_begin', 'comment', 'comment_end',
+                         'whitespace'):
+                continue
+            elif token == 'linestatement_begin':
+                token = 'block_begin'
+            elif token == 'linestatement_end':
+                token = 'block_end'
+            # we are not interested in those tokens in the parser
+            elif token in ('raw_begin', 'raw_end'):
+                continue
+            elif token == 'data':
+                value = self._normalize_newlines(value)
+            elif token == 'keyword':
+                token = value
+            elif token == 'name':
+                value = str(value)
+            elif token == 'string':
+                # try to unescape string
+                try:
+                    value = self._normalize_newlines(value[1:-1]) \
+                        .encode('ascii', 'backslashreplace') \
+                        .decode('unicode-escape')
+                except Exception, e:
+                    msg = str(e).split(':')[-1].strip()
+                    raise TemplateSyntaxError(msg, lineno, name, filename)
+                # if we can express it as bytestring (ascii only)
+                # we do that for support of semi broken APIs
+                # as datetime.datetime.strftime
+                try:
+                    value = str(value)
+                except UnicodeError:
+                    pass
+            elif token == 'integer':
+                value = int(value)
+            elif token == 'float':
+                value = float(value)
+            elif token == 'operator':
+                token = operators[value]
+            yield Token(lineno, token, value)
+
+    def tokeniter(self, source, name, filename=None, state=None):
+        """This method tokenizes the text and returns the tokens in a
+        generator.  Use this method if you just want to tokenize a template.
+        """
+        source = '\n'.join(unicode(source).splitlines())
+        pos = 0
+        lineno = 1
+        stack = ['root']
+        if state is not None and state != 'root':
+            assert state in ('variable', 'block'), 'invalid state'
+            stack.append(state + '_begin')
+        else:
+            state = 'root'
+        statetokens = self.rules[stack[-1]]
+        source_length = len(source)
+
+        balancing_stack = []
+
+        while 1:
+            # tokenizer loop
+            for regex, tokens, new_state in statetokens:
+                m = regex.match(source, pos)
+                # if no match we try again with the next rule
+                if m is None:
+                    continue
+
+                # we only match blocks and variables if brances / parentheses
+                # are balanced. continue parsing with the lower rule which
+                # is the operator rule. do this only if the end tags look
+                # like operators
+                if balancing_stack and \
+                   tokens in ('variable_end', 'block_end',
+                              'linestatement_end'):
+                    continue
+
+                # tuples support more options
+                if isinstance(tokens, tuple):
+                    for idx, token in enumerate(tokens):
+                        # failure group
+                        if token.__class__ is Failure:
+                            raise token(lineno, filename)
+                        # bygroup is a bit more complex, in that case we
+                        # yield for the current token the first named
+                        # group that matched
+                        elif token == '#bygroup':
+                            for key, value in m.groupdict().iteritems():
+                                if value is not None:
+                                    yield lineno, key, value
+                                    lineno += value.count('\n')
+                                    break
+                            else:
+                                raise RuntimeError('%r wanted to resolve '
+                                                   'the token dynamically'
+                                                   ' but no group matched'
+                                                   % regex)
+                        # normal group
+                        else:
+                            data = m.group(idx + 1)
+                            if data:
+                                yield lineno, token, data
+                            lineno += data.count('\n')
+
+                # strings as token just are yielded as it.
+                else:
+                    data = m.group()
+                    # update brace/parentheses balance
+                    if tokens == 'operator':
+                        if data == '{':
+                            balancing_stack.append('}')
+                        elif data == '(':
+                            balancing_stack.append(')')
+                        elif data == '[':
+                            balancing_stack.append(']')
+                        elif data in ('}', ')', ']'):
+                            if not balancing_stack:
+                                raise TemplateSyntaxError('unexpected "%s"' %
+                                                          data, lineno, name,
+                                                          filename)
+                            expected_op = balancing_stack.pop()
+                            if expected_op != data:
+                                raise TemplateSyntaxError('unexpected "%s", '
+                                                          'expected "%s"' %
+                                                          (data, expected_op),
+                                                          lineno, name,
+                                                          filename)
+                    # yield items
+                    yield lineno, tokens, data
+                    lineno += data.count('\n')
+
+                # fetch new position into new variable so that we can check
+                # if there is a internal parsing error which would result
+                # in an infinite loop
+                pos2 = m.end()
+
+                # handle state changes
+                if new_state is not None:
+                    # remove the uppermost state
+                    if new_state == '#pop':
+                        stack.pop()
+                    # resolve the new state by group checking
+                    elif new_state == '#bygroup':
+                        for key, value in m.groupdict().iteritems():
+                            if value is not None:
+                                stack.append(key)
+                                break
+                        else:
+                            raise RuntimeError('%r wanted to resolve the '
+                                               'new state dynamically but'
+                                               ' no group matched' %
+                                               regex)
+                    # direct state name given
+                    else:
+                        stack.append(new_state)
+                    statetokens = self.rules[stack[-1]]
+                # we are still at the same position and no stack change.
+                # this means a loop without break condition, avoid that and
+                # raise error
+                elif pos2 == pos:
+                    raise RuntimeError('%r yielded empty string without '
+                                       'stack change' % regex)
+                # publish new function and start again
+                pos = pos2
+                break
+            # if loop terminated without break we havn't found a single match
+            # either we are at the end of the file or we have a problem
+            else:
+                # end of text
+                if pos >= source_length:
+                    return
+                # something went wrong
+                raise TemplateSyntaxError('unexpected char %r at %d' %
+                                          (source[pos], pos), lineno,
+                                          name, filename)
diff --git a/scripts/jinja2/loaders.py b/scripts/jinja2/loaders.py
new file mode 100644 (file)
index 0000000..c61cd84
--- /dev/null
@@ -0,0 +1,309 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.loaders
+    ~~~~~~~~~~~~~~
+
+    Jinja loader classes.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+from os import path
+try:
+    from hashlib import sha1
+except ImportError:
+    from sha import new as sha1
+from jinja2.exceptions import TemplateNotFound
+from jinja2.utils import LRUCache, open_if_exists
+
+
+def split_template_path(template):
+    """Split a path into segments and perform a sanity check.  If it detects
+    '..' in the path it will raise a `TemplateNotFound` error.
+    """
+    pieces = []
+    for piece in template.split('/'):
+        if path.sep in piece \
+           or (path.altsep and path.altsep in piece) or \
+           piece == path.pardir:
+            raise TemplateNotFound(template)
+        elif piece and piece != '.':
+            pieces.append(piece)
+    return pieces
+
+
+class BaseLoader(object):
+    """Baseclass for all loaders.  Subclass this and override `get_source` to
+    implement a custom loading mechanism.  The environment provides a
+    `get_template` method that calls the loader's `load` method to get the
+    :class:`Template` object.
+
+    A very basic example for a loader that looks up templates on the file
+    system could look like this::
+
+        from jinja2 import BaseLoader, TemplateNotFound
+        from os.path import join, exists, getmtime
+
+        class MyLoader(BaseLoader):
+
+            def __init__(self, path):
+                self.path = path
+
+            def get_source(self, environment, template):
+                path = join(self.path, template)
+                if not exists(path):
+                    raise TemplateNotFound(template)
+                mtime = getmtime(path)
+                with file(path) as f:
+                    source = f.read().decode('utf-8')
+                return source, path, lambda: mtime == getmtime(path)
+    """
+
+    def get_source(self, environment, template):
+        """Get the template source, filename and reload helper for a template.
+        It's passed the environment and template name and has to return a
+        tuple in the form ``(source, filename, uptodate)`` or raise a
+        `TemplateNotFound` error if it can't locate the template.
+
+        The source part of the returned tuple must be the source of the
+        template as unicode string or a ASCII bytestring.  The filename should
+        be the name of the file on the filesystem if it was loaded from there,
+        otherwise `None`.  The filename is used by python for the tracebacks
+        if no loader extension is used.
+
+        The last item in the tuple is the `uptodate` function.  If auto
+        reloading is enabled it's always called to check if the template
+        changed.  No arguments are passed so the function must store the
+        old state somewhere (for example in a closure).  If it returns `False`
+        the template will be reloaded.
+        """
+        raise TemplateNotFound(template)
+
+    def load(self, environment, name, globals=None):
+        """Loads a template.  This method looks up the template in the cache
+        or loads one by calling :meth:`get_source`.  Subclasses should not
+        override this method as loaders working on collections of other
+        loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+        will not call this method but `get_source` directly.
+        """
+        code = None
+        if globals is None:
+            globals = {}
+
+        # first we try to get the source for this template together
+        # with the filename and the uptodate function.
+        source, filename, uptodate = self.get_source(environment, name)
+
+        # try to load the code from the bytecode cache if there is a
+        # bytecode cache configured.
+        bcc = environment.bytecode_cache
+        if bcc is not None:
+            bucket = bcc.get_bucket(environment, name, filename, source)
+            code = bucket.code
+
+        # if we don't have code so far (not cached, no longer up to
+        # date) etc. we compile the template
+        if code is None:
+            code = environment.compile(source, name, filename)
+
+        # if the bytecode cache is available and the bucket doesn't
+        # have a code so far, we give the bucket the new code and put
+        # it back to the bytecode cache.
+        if bcc is not None and bucket.code is None:
+            bucket.code = code
+            bcc.set_bucket(bucket)
+
+        return environment.template_class.from_code(environment, code,
+                                                    globals, uptodate)
+
+
+class FileSystemLoader(BaseLoader):
+    """Loads templates from the file system.  This loader can find templates
+    in folders on the file system and is the preferred way to load them.
+
+    The loader takes the path to the templates as string, or if multiple
+    locations are wanted a list of them which is then looked up in the
+    given order:
+
+    >>> loader = FileSystemLoader('/path/to/templates')
+    >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+
+    Per default the template encoding is ``'utf-8'`` which can be changed
+    by setting the `encoding` parameter to something else.
+    """
+
+    def __init__(self, searchpath, encoding='utf-8'):
+        if isinstance(searchpath, basestring):
+            searchpath = [searchpath]
+        self.searchpath = list(searchpath)
+        self.encoding = encoding
+
+    def get_source(self, environment, template):
+        pieces = split_template_path(template)
+        for searchpath in self.searchpath:
+            filename = path.join(searchpath, *pieces)
+            f = open_if_exists(filename)
+            if f is None:
+                continue
+            try:
+                contents = f.read().decode(self.encoding)
+            finally:
+                f.close()
+
+            mtime = path.getmtime(filename)
+            def uptodate():
+                try:
+                    return path.getmtime(filename) == mtime
+                except OSError:
+                    return False
+            return contents, filename, uptodate
+        raise TemplateNotFound(template)
+
+
+class PackageLoader(BaseLoader):
+    """Load templates from python eggs or packages.  It is constructed with
+    the name of the python package and the path to the templates in that
+    package:
+
+    >>> loader = PackageLoader('mypackage', 'views')
+
+    If the package path is not given, ``'templates'`` is assumed.
+
+    Per default the template encoding is ``'utf-8'`` which can be changed
+    by setting the `encoding` parameter to something else.  Due to the nature
+    of eggs it's only possible to reload templates if the package was loaded
+    from the file system and not a zip file.
+    """
+
+    def __init__(self, package_name, package_path='templates',
+                 encoding='utf-8'):
+        from pkg_resources import DefaultProvider, ResourceManager, \
+                                  get_provider
+        provider = get_provider(package_name)
+        self.encoding = encoding
+        self.manager = ResourceManager()
+        self.filesystem_bound = isinstance(provider, DefaultProvider)
+        self.provider = provider
+        self.package_path = package_path
+
+    def get_source(self, environment, template):
+        pieces = split_template_path(template)
+        p = '/'.join((self.package_path,) + tuple(pieces))
+        if not self.provider.has_resource(p):
+            raise TemplateNotFound(template)
+
+        filename = uptodate = None
+        if self.filesystem_bound:
+            filename = self.provider.get_resource_filename(self.manager, p)
+            mtime = path.getmtime(filename)
+            def uptodate():
+                try:
+                    return path.getmtime(filename) == mtime
+                except OSError:
+                    return False
+
+        source = self.provider.get_resource_string(self.manager, p)
+        return source.decode(self.encoding), filename, uptodate
+
+
+class DictLoader(BaseLoader):
+    """Loads a template from a python dict.  It's passed a dict of unicode
+    strings bound to template names.  This loader is useful for unittesting:
+
+    >>> loader = DictLoader({'index.html': 'source here'})
+
+    Because auto reloading is rarely useful this is disabled per default.
+    """
+
+    def __init__(self, mapping):
+        self.mapping = mapping
+
+    def get_source(self, environment, template):
+        if template in self.mapping:
+            source = self.mapping[template]
+            return source, None, lambda: source != self.mapping.get(template)
+        raise TemplateNotFound(template)
+
+
+class FunctionLoader(BaseLoader):
+    """A loader that is passed a function which does the loading.  The
+    function becomes the name of the template passed and has to return either
+    an unicode string with the template source, a tuple in the form ``(source,
+    filename, uptodatefunc)`` or `None` if the template does not exist.
+
+    >>> def load_template(name):
+    ...     if name == 'index.html'
+    ...         return '...'
+    ...
+    >>> loader = FunctionLoader(load_template)
+
+    The `uptodatefunc` is a function that is called if autoreload is enabled
+    and has to return `True` if the template is still up to date.  For more
+    details have a look at :meth:`BaseLoader.get_source` which has the same
+    return value.
+    """
+
+    def __init__(self, load_func):
+        self.load_func = load_func
+
+    def get_source(self, environment, template):
+        rv = self.load_func(template)
+        if rv is None:
+            raise TemplateNotFound(template)
+        elif isinstance(rv, basestring):
+            return rv, None, None
+        return rv
+
+
+class PrefixLoader(BaseLoader):
+    """A loader that is passed a dict of loaders where each loader is bound
+    to a prefix.  The prefix is delimited from the template by a slash per
+    default, which can be changed by setting the `delimiter` argument to
+    something else.
+
+    >>> loader = PrefixLoader({
+    ...     'app1':     PackageLoader('mypackage.app1'),
+    ...     'app2':     PackageLoader('mypackage.app2')
+    ... })
+
+    By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+    by loading ``'app2/index.html'`` the file from the second.
+    """
+
+    def __init__(self, mapping, delimiter='/'):
+        self.mapping = mapping
+        self.delimiter = delimiter
+
+    def get_source(self, environment, template):
+        try:
+            prefix, template = template.split(self.delimiter, 1)
+            loader = self.mapping[prefix]
+        except (ValueError, KeyError):
+            raise TemplateNotFound(template)
+        return loader.get_source(environment, template)
+
+
+class ChoiceLoader(BaseLoader):
+    """This loader works like the `PrefixLoader` just that no prefix is
+    specified.  If a template could not be found by one loader the next one
+    is tried.
+
+    >>> loader = ChoiceLoader([
+    ...     FileSystemLoader('/path/to/user/templates'),
+    ...     PackageLoader('mypackage')
+    ... ])
+
+    This is useful if you want to allow users to override builtin templates
+    from a different location.
+    """
+
+    def __init__(self, loaders):
+        self.loaders = loaders
+
+    def get_source(self, environment, template):
+        for loader in self.loaders:
+            try:
+                return loader.get_source(environment, template)
+            except TemplateNotFound:
+                pass
+        raise TemplateNotFound(template)
diff --git a/scripts/jinja2/nodes.py b/scripts/jinja2/nodes.py
new file mode 100644 (file)
index 0000000..405622a
--- /dev/null
@@ -0,0 +1,784 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.nodes
+    ~~~~~~~~~~~~
+
+    This module implements additional nodes derived from the ast base node.
+
+    It also provides some node tree helper functions like `in_lineno` and
+    `get_nodes` used by the parser and translator in order to normalize
+    python and jinja nodes.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+import operator
+from itertools import chain, izip
+from collections import deque
+from jinja2.utils import Markup
+
+
+_binop_to_func = {
+    '*':        operator.mul,
+    '/':        operator.truediv,
+    '//':       operator.floordiv,
+    '**':       operator.pow,
+    '%':        operator.mod,
+    '+':        operator.add,
+    '-':        operator.sub
+}
+
+_uaop_to_func = {
+    'not':      operator.not_,
+    '+':        operator.pos,
+    '-':        operator.neg
+}
+
+_cmpop_to_func = {
+    'eq':       operator.eq,
+    'ne':       operator.ne,
+    'gt':       operator.gt,
+    'gteq':     operator.ge,
+    'lt':       operator.lt,
+    'lteq':     operator.le,
+    'in':       lambda a, b: a in b,
+    'notin':    lambda a, b: a not in b
+}
+
+
+class Impossible(Exception):
+    """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+    """A metaclass for nodes that handles the field and attribute
+    inheritance.  fields and attributes from the parent class are
+    automatically forwarded to the child."""
+
+    def __new__(cls, name, bases, d):
+        for attr in 'fields', 'attributes':
+            storage = []
+            storage.extend(getattr(bases[0], attr, ()))
+            storage.extend(d.get(attr, ()))
+            assert len(bases) == 1, 'multiple inheritance not allowed'
+            assert len(storage) == len(set(storage)), 'layout conflict'
+            d[attr] = tuple(storage)
+        d.setdefault('abstract', False)
+        return type.__new__(cls, name, bases, d)
+
+
+class Node(object):
+    """Baseclass for all Jinja2 nodes.  There are a number of nodes available
+    of different types.  There are three major types:
+
+    -   :class:`Stmt`: statements
+    -   :class:`Expr`: expressions
+    -   :class:`Helper`: helper nodes
+    -   :class:`Template`: the outermost wrapper node
+
+    All nodes have fields and attributes.  Fields may be other nodes, lists,
+    or arbitrary values.  Fields are passed to the constructor as regular
+    positional arguments, attributes as keyword arguments.  Each node has
+    two attributes: `lineno` (the line number of the node) and `environment`.
+    The `environment` attribute is set at the end of the parsing process for
+    all nodes automatically.
+    """
+    __metaclass__ = NodeType
+    fields = ()
+    attributes = ('lineno', 'environment')
+    abstract = True
+
+    def __init__(self, *fields, **attributes):
+        if self.abstract:
+            raise TypeError('abstract nodes are not instanciable')
+        if fields:
+            if len(fields) != len(self.fields):
+                if not self.fields:
+                    raise TypeError('%r takes 0 arguments' %
+                                    self.__class__.__name__)
+                raise TypeError('%r takes 0 or %d argument%s' % (
+                    self.__class__.__name__,
+                    len(self.fields),
+                    len(self.fields) != 1 and 's' or ''
+                ))
+            for name, arg in izip(self.fields, fields):
+                setattr(self, name, arg)
+        for attr in self.attributes:
+            setattr(self, attr, attributes.pop(attr, None))
+        if attributes:
+            raise TypeError('unknown attribute %r' %
+                            iter(attributes).next())
+
+    def iter_fields(self, exclude=None, only=None):
+        """This method iterates over all fields that are defined and yields
+        ``(key, value)`` tuples.  Per default all fields are returned, but
+        it's possible to limit that to some fields by providing the `only`
+        parameter or to exclude some using the `exclude` parameter.  Both
+        should be sets or tuples of field names.
+        """
+        for name in self.fields:
+            if (exclude is only is None) or \
+               (exclude is not None and name not in exclude) or \
+               (only is not None and name in only):
+                try:
+                    yield name, getattr(self, name)
+                except AttributeError:
+                    pass
+
+    def iter_child_nodes(self, exclude=None, only=None):
+        """Iterates over all direct child nodes of the node.  This iterates
+        over all fields and yields the values of they are nodes.  If the value
+        of a field is a list all the nodes in that list are returned.
+        """
+        for field, item in self.iter_fields(exclude, only):
+            if isinstance(item, list):
+                for n in item:
+                    if isinstance(n, Node):
+                        yield n
+            elif isinstance(item, Node):
+                yield item
+
+    def find(self, node_type):
+        """Find the first node of a given type.  If no such node exists the
+        return value is `None`.
+        """
+        for result in self.find_all(node_type):
+            return result
+
+    def find_all(self, node_type):
+        """Find all the nodes of a given type."""
+        for child in self.iter_child_nodes():
+            if isinstance(child, node_type):
+                yield child
+            for result in child.find_all(node_type):
+                yield result
+
+    def set_ctx(self, ctx):
+        """Reset the context of a node and all child nodes.  Per default the
+        parser will all generate nodes that have a 'load' context as it's the
+        most common one.  This method is used in the parser to set assignment
+        targets and other nodes to a store context.
+        """
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            if 'ctx' in node.fields:
+                node.ctx = ctx
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def set_lineno(self, lineno, override=False):
+        """Set the line numbers of the node and children."""
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            if 'lineno' in node.attributes:
+                if node.lineno is None or override:
+                    node.lineno = lineno
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def set_environment(self, environment):
+        """Set the environment for all nodes."""
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            node.environment = environment
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def __eq__(self, other):
+        return type(self) is type(other) and \
+               tuple(self.iter_fields()) == tuple(other.iter_fields())
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        return '%s(%s)' % (
+            self.__class__.__name__,
+            ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
+                      arg in self.fields)
+        )
+
+
+class Stmt(Node):
+    """Base node for all statements."""
+    abstract = True
+
+
+class Helper(Node):
+    """Nodes that exist in a specific context only."""
+    abstract = True
+
+
+class Template(Node):
+    """Node that represents a template.  This must be the outermost node that
+    is passed to the compiler.
+    """
+    fields = ('body',)
+
+
+class Output(Stmt):
+    """A node that holds multiple expressions which are then printed out.
+    This is used both for the `print` statement and the regular template data.
+    """
+    fields = ('nodes',)
+
+
+class Extends(Stmt):
+    """Represents an extends statement."""
+    fields = ('template',)
+
+
+class For(Stmt):
+    """The for loop.  `target` is the target for the iteration (usually a
+    :class:`Name` or :class:`Tuple`), `iter` the iterable.  `body` is a list
+    of nodes that are used as loop-body, and `else_` a list of nodes for the
+    `else` block.  If no else node exists it has to be an empty list.
+
+    For filtered nodes an expression can be stored as `test`, otherwise `None`.
+    """
+    fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+
+class If(Stmt):
+    """If `test` is true, `body` is rendered, else `else_`."""
+    fields = ('test', 'body', 'else_')
+
+
+class Macro(Stmt):
+    """A macro definition.  `name` is the name of the macro, `args` a list of
+    arguments and `defaults` a list of defaults if there are any.  `body` is
+    a list of nodes for the macro body.
+    """
+    fields = ('name', 'args', 'defaults', 'body')
+
+
+class CallBlock(Stmt):
+    """Like a macro without a name but a call instead.  `call` is called with
+    the unnamed macro as `caller` argument this node holds.
+    """
+    fields = ('call', 'args', 'defaults', 'body')
+
+
+class FilterBlock(Stmt):
+    """Node for filter sections."""
+    fields = ('body', 'filter')
+
+
+class Block(Stmt):
+    """A node that represents a block."""
+    fields = ('name', 'body')
+
+
+class Include(Stmt):
+    """A node that represents the include tag."""
+    fields = ('template', 'with_context')
+
+
+class Import(Stmt):
+    """A node that represents the import tag."""
+    fields = ('template', 'target', 'with_context')
+
+
+class FromImport(Stmt):
+    """A node that represents the from import tag.  It's important to not
+    pass unsafe names to the name attribute.  The compiler translates the
+    attribute lookups directly into getattr calls and does *not* use the
+    subscript callback of the interface.  As exported variables may not
+    start with double underscores (which the parser asserts) this is not a
+    problem for regular Jinja code, but if this node is used in an extension
+    extra care must be taken.
+
+    The list of names may contain tuples if aliases are wanted.
+    """
+    fields = ('template', 'names', 'with_context')
+
+
+class ExprStmt(Stmt):
+    """A statement that evaluates an expression and discards the result."""
+    fields = ('node',)
+
+
+class Assign(Stmt):
+    """Assigns an expression to a target."""
+    fields = ('target', 'node')
+
+
+class Expr(Node):
+    """Baseclass for all expressions."""
+    abstract = True
+
+    def as_const(self):
+        """Return the value of the expression as constant or raise
+        :exc:`Impossible` if this was not possible:
+
+        >>> Add(Const(23), Const(42)).as_const()
+        65
+        >>> Add(Const(23), Name('var', 'load')).as_const()
+        Traceback (most recent call last):
+          ...
+        Impossible
+
+        This requires the `environment` attribute of all nodes to be
+        set to the environment that created the nodes.
+        """
+        raise Impossible()
+
+    def can_assign(self):
+        """Check if it's possible to assign something to this node."""
+        return False
+
+
+class BinExpr(Expr):
+    """Baseclass for all binary expressions."""
+    fields = ('left', 'right')
+    operator = None
+    abstract = True
+
+    def as_const(self):
+        f = _binop_to_func[self.operator]
+        try:
+            return f(self.left.as_const(), self.right.as_const())
+        except:
+            raise Impossible()
+
+
+class UnaryExpr(Expr):
+    """Baseclass for all unary expressions."""
+    fields = ('node',)
+    operator = None
+    abstract = True
+
+    def as_const(self):
+        f = _uaop_to_func[self.operator]
+        try:
+            return f(self.node.as_const())
+        except:
+            raise Impossible()
+
+
+class Name(Expr):
+    """Looks up a name or stores a value in a name.
+    The `ctx` of the node can be one of the following values:
+
+    -   `store`: store a value in the name
+    -   `load`: load that name
+    -   `param`: like `store` but if the name was defined as function parameter.
+    """
+    fields = ('name', 'ctx')
+
+    def can_assign(self):
+        return self.name not in ('true', 'false', 'none',
+                                 'True', 'False', 'None')
+
+
+class Literal(Expr):
+    """Baseclass for literals."""
+    abstract = True
+
+
+class Const(Literal):
+    """All constant values.  The parser will return this node for simple
+    constants such as ``42`` or ``"foo"`` but it can be used to store more
+    complex values such as lists too.  Only constants with a safe
+    representation (objects where ``eval(repr(x)) == x`` is true).
+    """
+    fields = ('value',)
+
+    def as_const(self):
+        return self.value
+
+    @classmethod
+    def from_untrusted(cls, value, lineno=None, environment=None):
+        """Return a const object if the value is representable as
+        constant value in the generated code, otherwise it will raise
+        an `Impossible` exception.
+        """
+        from compiler import has_safe_repr
+        if not has_safe_repr(value):
+            raise Impossible()
+        return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+    """A constant template string."""
+    fields = ('data',)
+
+    def as_const(self):
+        if self.environment.autoescape:
+            return Markup(self.data)
+        return self.data
+
+
+class Tuple(Literal):
+    """For loop unpacking and some other things like multiple arguments
+    for subscripts.  Like for :class:`Name` `ctx` specifies if the tuple
+    is used for loading the names or storing.
+    """
+    fields = ('items', 'ctx')
+
+    def as_const(self):
+        return tuple(x.as_const() for x in self.items)
+
+    def can_assign(self):
+        for item in self.items:
+            if not item.can_assign():
+                return False
+        return True
+
+
+class List(Literal):
+    """Any list literal such as ``[1, 2, 3]``"""
+    fields = ('items',)
+
+    def as_const(self):
+        return [x.as_const() for x in self.items]
+
+
+class Dict(Literal):
+    """Any dict literal such as ``{1: 2, 3: 4}``.  The items must be a list of
+    :class:`Pair` nodes.
+    """
+    fields = ('items',)
+
+    def as_const(self):
+        return dict(x.as_const() for x in self.items)
+
+
+class Pair(Helper):
+    """A key, value pair for dicts."""
+    fields = ('key', 'value')
+
+    def as_const(self):
+        return self.key.as_const(), self.value.as_const()
+
+
+class Keyword(Helper):
+    """A key, value pair for keyword arguments where key is a string."""
+    fields = ('key', 'value')
+
+    def as_const(self):
+        return self.key, self.value.as_const()
+
+
+class CondExpr(Expr):
+    """A conditional expression (inline if expression).  (``{{
+    foo if bar else baz }}``)
+    """
+    fields = ('test', 'expr1', 'expr2')
+
+    def as_const(self):
+        if self.test.as_const():
+            return self.expr1.as_const()
+
+        # if we evaluate to an undefined object, we better do that at runtime
+        if self.expr2 is None:
+            raise Impossible()
+
+        return self.expr2.as_const()
+
+
+class Filter(Expr):
+    """This node applies a filter on an expression.  `name` is the name of
+    the filter, the rest of the fields are the same as for :class:`Call`.
+
+    If the `node` of a filter is `None` the contents of the last buffer are
+    filtered.  Buffers are created by macros and filter blocks.
+    """
+    fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+    def as_const(self, obj=None):
+        if self.node is obj is None:
+            raise Impossible()
+        filter = self.environment.filters.get(self.name)
+        if filter is None or getattr(filter, 'contextfilter', False):
+            raise Impossible()
+        if obj is None:
+            obj = self.node.as_const()
+        args = [x.as_const() for x in self.args]
+        if getattr(filter, 'environmentfilter', False):
+            args.insert(0, self.environment)
+        kwargs = dict(x.as_const() for x in self.kwargs)
+        if self.dyn_args is not None:
+            try:
+                args.extend(self.dyn_args.as_const())
+            except:
+                raise Impossible()
+        if self.dyn_kwargs is not None:
+            try:
+                kwargs.update(self.dyn_kwargs.as_const())
+            except:
+                raise Impossible()
+        try:
+            return filter(obj, *args, **kwargs)
+        except:
+            raise Impossible()
+
+
+class Test(Expr):
+    """Applies a test on an expression.  `name` is the name of the test, the
+    rest of the fields are the same as for :class:`Call`.
+    """
+    fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+
+class Call(Expr):
+    """Calls an expression.  `args` is a list of arguments, `kwargs` a list
+    of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+    and `dyn_kwargs` has to be either `None` or a node that is used as
+    node for dynamic positional (``*args``) or keyword (``**kwargs``)
+    arguments.
+    """
+    fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+    def as_const(self):
+        obj = self.node.as_const()
+
+        # don't evaluate context functions
+        args = [x.as_const() for x in self.args]
+        if getattr(obj, 'contextfunction', False):
+            raise Impossible()
+        elif getattr(obj, 'environmentfunction', False):
+            args.insert(0, self.environment)
+
+        kwargs = dict(x.as_const() for x in self.kwargs)
+        if self.dyn_args is not None:
+            try:
+                args.extend(self.dyn_args.as_const())
+            except:
+                raise Impossible()
+        if self.dyn_kwargs is not None:
+            try:
+                kwargs.update(self.dyn_kwargs.as_const())
+            except:
+                raise Impossible()
+        try:
+            return obj(*args, **kwargs)
+        except:
+            raise Impossible()
+
+
+class Getitem(Expr):
+    """Get an attribute or item from an expression and prefer the item."""
+    fields = ('node', 'arg', 'ctx')
+
+    def as_const(self):
+        if self.ctx != 'load':
+            raise Impossible()
+        try:
+            return self.environment.getitem(self.node.as_const(),
+                                            self.arg.as_const())
+        except:
+            raise Impossible()
+
+    def can_assign(self):
+        return False
+
+
+class Getattr(Expr):
+    """Get an attribute or item from an expression that is a ascii-only
+    bytestring and prefer the attribute.
+    """
+    fields = ('node', 'attr', 'ctx')
+
+    def as_const(self):
+        if self.ctx != 'load':
+            raise Impossible()
+        try:
+            return self.environment.getattr(self.node.as_const(), arg)
+        except:
+            raise Impossible()
+
+    def can_assign(self):
+        return False
+
+
+class Slice(Expr):
+    """Represents a slice object.  This must only be used as argument for
+    :class:`Subscript`.
+    """
+    fields = ('start', 'stop', 'step')
+
+    def as_const(self):
+        def const(obj):
+            if obj is None:
+                return obj
+            return obj.as_const()
+        return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+    """Concatenates the list of expressions provided after converting them to
+    unicode.
+    """
+    fields = ('nodes',)
+
+    def as_const(self):
+        return ''.join(unicode(x.as_const()) for x in self.nodes)
+
+
+class Compare(Expr):
+    """Compares an expression with some other expressions.  `ops` must be a
+    list of :class:`Operand`\s.
+    """
+    fields = ('expr', 'ops')
+
+    def as_const(self):
+        result = value = self.expr.as_const()
+        try:
+            for op in self.ops:
+                new_value = op.expr.as_const()
+                result = _cmpop_to_func[op.op](value, new_value)
+                value = new_value
+        except:
+            raise Impossible()
+        return result
+
+
+class Operand(Helper):
+    """Holds an operator and an expression."""
+    fields = ('op', 'expr')
+
+if __debug__:
+    Operand.__doc__ += '\nThe following operators are available: ' + \
+        ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+                  set(_uaop_to_func) | set(_cmpop_to_func)))
+
+
+class Mul(BinExpr):
+    """Multiplies the left with the right node."""
+    operator = '*'
+
+
+class Div(BinExpr):
+    """Divides the left by the right node."""
+    operator = '/'
+
+
+class FloorDiv(BinExpr):
+    """Divides the left by the right node and truncates conver the
+    result into an integer by truncating.
+    """
+    operator = '//'
+
+
+class Add(BinExpr):
+    """Add the left to the right node."""
+    operator = '+'
+
+
+class Sub(BinExpr):
+    """Substract the right from the left node."""
+    operator = '-'
+
+
+class Mod(BinExpr):
+    """Left modulo right."""
+    operator = '%'
+
+
+class Pow(BinExpr):
+    """Left to the power of right."""
+    operator = '**'
+
+
+class And(BinExpr):
+    """Short circuited AND."""
+    operator = 'and'
+
+    def as_const(self):
+        return self.left.as_const() and self.right.as_const()
+
+
+class Or(BinExpr):
+    """Short circuited OR."""
+    operator = 'or'
+
+    def as_const(self):
+        return self.left.as_const() or self.right.as_const()
+
+
+class Not(UnaryExpr):
+    """Negate the expression."""
+    operator = 'not'
+
+
+class Neg(UnaryExpr):
+    """Make the expression negative."""
+    operator = '-'
+
+
+class Pos(UnaryExpr):
+    """Make the expression positive (noop for most expressions)"""
+    operator = '+'
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+    """Loads an attribute from the environment object.  This is useful for
+    extensions that want to call a callback stored on the environment.
+    """
+    fields = ('name',)
+
+
+class ExtensionAttribute(Expr):
+    """Returns the attribute of an extension bound to the environment.
+    The identifier is the identifier of the :class:`Extension`.
+
+    This node is usually constructed by calling the
+    :meth:`~jinja2.ext.Extension.attr` method on an extension.
+    """
+    fields = ('identifier', 'name')
+
+
+class ImportedName(Expr):
+    """If created with an import name the import name is returned on node
+    access.  For example ``ImportedName('cgi.escape')`` returns the `escape`
+    function from the cgi module on evaluation.  Imports are optimized by the
+    compiler so there is no need to assign them to local variables.
+    """
+    fields = ('importname',)
+
+
+class InternalName(Expr):
+    """An internal name in the compiler.  You cannot create these nodes
+    yourself but the parser provides a
+    :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+    a new identifier for you.  This identifier is not available from the
+    template and is not threated specially by the compiler.
+    """
+    fields = ('name',)
+
+    def __init__(self):
+        raise TypeError('Can\'t create internal names.  Use the '
+                        '`free_identifier` method on a parser.')
+
+
+class MarkSafe(Expr):
+    """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+    fields = ('expr',)
+
+    def as_const(self):
+        return Markup(self.expr.as_const())
+
+
+class ContextReference(Expr):
+    """Returns the current template context."""
+
+
+class Continue(Stmt):
+    """Continue a loop."""
+
+
+class Break(Stmt):
+    """Break a loop."""
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args, **kwargs):
+    raise TypeError('can\'t create custom node types')
+NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/scripts/jinja2/optimizer.py b/scripts/jinja2/optimizer.py
new file mode 100644 (file)
index 0000000..43065df
--- /dev/null
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.optimizer
+    ~~~~~~~~~~~~~~~~
+
+    The jinja optimizer is currently trying to constant fold a few expressions
+    and modify the AST in place so that it should be easier to evaluate it.
+
+    Because the AST does not contain all the scoping information and the
+    compiler has to find that out, we cannot do all the optimizations we
+    want.  For example loop unrolling doesn't work because unrolled loops would
+    have a different scoping.
+
+    The solution would be a second syntax tree that has the scoping rules stored.
+
+    :copyright: Copyright 2008 by Christoph Hack, Armin Ronacher.
+    :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.visitor import NodeTransformer
+
+
+def optimize(node, environment):
+    """The context hint can be used to perform an static optimization
+    based on the context given."""
+    optimizer = Optimizer(environment)
+    return optimizer.visit(node)
+
+
+class Optimizer(NodeTransformer):
+
+    def __init__(self, environment):
+        self.environment = environment
+
+    def visit_If(self, node):
+        """Eliminate dead code."""
+        # do not optimize ifs that have a block inside so that it doesn't
+        # break super().
+        if node.find(nodes.Block) is not None:
+            return self.generic_visit(node)
+        try:
+            val = self.visit(node.test).as_const()
+        except nodes.Impossible:
+            return self.generic_visit(node)
+        if val:
+            body = node.body
+        else:
+            body = node.else_
+        result = []
+        for node in body:
+            result.extend(self.visit_list(node))
+        return result
+
+    def fold(self, node):
+        """Do constant folding."""
+        node = self.generic_visit(node)
+        try:
+            return nodes.Const.from_untrusted(node.as_const(),
+                                              lineno=node.lineno,
+                                              environment=self.environment)
+        except nodes.Impossible:
+            return node
+
+    visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
+    visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
+    visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
+    visit_Filter = visit_Test = visit_CondExpr = fold
+    del fold
diff --git a/scripts/jinja2/parser.py b/scripts/jinja2/parser.py
new file mode 100644 (file)
index 0000000..d6f1b36
--- /dev/null
@@ -0,0 +1,774 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.parser
+    ~~~~~~~~~~~~~
+
+    Implements the template parser.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
+
+
+_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
+                                 'macro', 'include', 'from', 'import',
+                                 'set'])
+_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+
+
+class Parser(object):
+    """This is the central parsing class Jinja2 uses.  It's passed to
+    extensions and can be used to parse expressions or statements.
+    """
+
+    def __init__(self, environment, source, name=None, filename=None,
+                 state=None):
+        self.environment = environment
+        self.stream = environment._tokenize(source, name, filename, state)
+        self.name = name
+        self.filename = filename
+        self.closed = False
+        self.extensions = {}
+        for extension in environment.extensions.itervalues():
+            for tag in extension.tags:
+                self.extensions[tag] = extension.parse
+        self._last_identifier = 0
+
+    def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+        """Convenience method that raises `exc` with the message, passed
+        line number or last line number as well as the current name and
+        filename.
+        """
+        if lineno is None:
+            lineno = self.stream.current.lineno
+        raise exc(msg, lineno, self.name, self.filename)
+
+    def is_tuple_end(self, extra_end_rules=None):
+        """Are we at the end of a tuple?"""
+        if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+            return True
+        elif extra_end_rules is not None:
+            return self.stream.current.test_any(extra_end_rules)
+        return False
+
+    def free_identifier(self, lineno=None):
+        """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+        self._last_identifier += 1
+        rv = object.__new__(nodes.InternalName)
+        nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+        return rv
+
+    def parse_statement(self):
+        """Parse a single statement."""
+        token = self.stream.current
+        if token.type is not 'name':
+            self.fail('tag name expected', token.lineno)
+        if token.value in _statement_keywords:
+            return getattr(self, 'parse_' + self.stream.current.value)()
+        if token.value == 'call':
+            return self.parse_call_block()
+        if token.value == 'filter':
+            return self.parse_filter_block()
+        ext = self.extensions.get(token.value)
+        if ext is not None:
+            return ext(self)
+        self.fail('unknown tag %r' % token.value, token.lineno)
+
+    def parse_statements(self, end_tokens, drop_needle=False):
+        """Parse multiple statements into a list until one of the end tokens
+        is reached.  This is used to parse the body of statements as it also
+        parses template data if appropriate.  The parser checks first if the
+        current token is a colon and skips it if there is one.  Then it checks
+        for the block end and parses until if one of the `end_tokens` is
+        reached.  Per default the active token in the stream at the end of
+        the call is the matched end token.  If this is not wanted `drop_needle`
+        can be set to `True` and the end token is removed.
+        """
+        # the first token may be a colon for python compatibility
+        self.stream.skip_if('colon')
+
+        # in the future it would be possible to add whole code sections
+        # by adding some sort of end of statement token and parsing those here.
+        self.stream.expect('block_end')
+        result = self.subparse(end_tokens)
+
+        if drop_needle:
+            self.stream.next()
+        return result
+
+    def parse_set(self):
+        """Parse an assign statement."""
+        lineno = self.stream.next().lineno
+        target = self.parse_assign_target()
+        self.stream.expect('assign')
+        expr = self.parse_tuple()
+        return nodes.Assign(target, expr, lineno=lineno)
+
+    def parse_for(self):
+        """Parse a for loop."""
+        lineno = self.stream.expect('name:for').lineno
+        target = self.parse_assign_target(extra_end_rules=('name:in',))
+        self.stream.expect('name:in')
+        iter = self.parse_tuple(with_condexpr=False,
+                                extra_end_rules=('name:recursive',))
+        test = None
+        if self.stream.skip_if('name:if'):
+            test = self.parse_expression()
+        recursive = self.stream.skip_if('name:recursive')
+        body = self.parse_statements(('name:endfor', 'name:else'))
+        if self.stream.next().value == 'endfor':
+            else_ = []
+        else:
+            else_ = self.parse_statements(('name:endfor',), drop_needle=True)
+        return nodes.For(target, iter, body, else_, test,
+                         recursive, lineno=lineno)
+
+    def parse_if(self):
+        """Parse an if construct."""
+        node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+        while 1:
+            node.test = self.parse_tuple(with_condexpr=False)
+            node.body = self.parse_statements(('name:elif', 'name:else',
+                                               'name:endif'))
+            token = self.stream.next()
+            if token.test('name:elif'):
+                new_node = nodes.If(lineno=self.stream.current.lineno)
+                node.else_ = [new_node]
+                node = new_node
+                continue
+            elif token.test('name:else'):
+                node.else_ = self.parse_statements(('name:endif',),
+                                                   drop_needle=True)
+            else:
+                node.else_ = []
+            break
+        return result
+
+    def parse_block(self):
+        node = nodes.Block(lineno=self.stream.next().lineno)
+        node.name = self.stream.expect('name').value
+        node.body = self.parse_statements(('name:endblock',), drop_needle=True)
+        self.stream.skip_if('name:' + node.name)
+        return node
+
+    def parse_extends(self):
+        node = nodes.Extends(lineno=self.stream.next().lineno)
+        node.template = self.parse_expression()
+        return node
+
+    def parse_import_context(self, node, default):
+        if self.stream.current.test_any('name:with', 'name:without') and \
+           self.stream.look().test('name:context'):
+            node.with_context = self.stream.next().value == 'with'
+            self.stream.skip()
+        else:
+            node.with_context = default
+        return node
+
+    def parse_include(self):
+        node = nodes.Include(lineno=self.stream.next().lineno)
+        node.template = self.parse_expression()
+        return self.parse_import_context(node, True)
+
+    def parse_import(self):
+        node = nodes.Import(lineno=self.stream.next().lineno)
+        node.template = self.parse_expression()
+        self.stream.expect('name:as')
+        node.target = self.parse_assign_target(name_only=True).name
+        return self.parse_import_context(node, False)
+
+    def parse_from(self):
+        node = nodes.FromImport(lineno=self.stream.next().lineno)
+        node.template = self.parse_expression()
+        self.stream.expect('name:import')
+        node.names = []
+
+        def parse_context():
+            if self.stream.current.value in ('with', 'without') and \
+               self.stream.look().test('name:context'):
+                node.with_context = self.stream.next().value == 'with'
+                self.stream.skip()
+                return True
+            return False
+
+        while 1:
+            if node.names:
+                self.stream.expect('comma')
+            if self.stream.current.type is 'name':
+                if parse_context():
+                    break
+                target = self.parse_assign_target(name_only=True)
+                if target.name.startswith('_'):
+                    self.fail('names starting with an underline can not '
+                              'be imported', target.lineno,
+                              exc=TemplateAssertionError)
+                if self.stream.skip_if('name:as'):
+                    alias = self.parse_assign_target(name_only=True)
+                    node.names.append((target.name, alias.name))
+                else:
+                    node.names.append(target.name)
+                if parse_context() or self.stream.current.type is not 'comma':
+                    break
+            else:
+                break
+        if not hasattr(node, 'with_context'):
+            node.with_context = False
+            self.stream.skip_if('comma')
+        return node
+
+    def parse_signature(self, node):
+        node.args = args = []
+        node.defaults = defaults = []
+        self.stream.expect('lparen')
+        while self.stream.current.type is not 'rparen':
+            if args:
+                self.stream.expect('comma')
+            arg = self.parse_assign_target(name_only=True)
+            arg.set_ctx('param')
+            if self.stream.skip_if('assign'):
+                defaults.append(self.parse_expression())
+            args.append(arg)
+        self.stream.expect('rparen')
+
+    def parse_call_block(self):
+        node = nodes.CallBlock(lineno=self.stream.next().lineno)
+        if self.stream.current.type is 'lparen':
+            self.parse_signature(node)
+        else:
+            node.args = []
+            node.defaults = []
+
+        node.call = self.parse_expression()
+        if not isinstance(node.call, nodes.Call):
+            self.fail('expected call', node.lineno)
+        node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+        return node
+
+    def parse_filter_block(self):
+        node = nodes.FilterBlock(lineno=self.stream.next().lineno)
+        node.filter = self.parse_filter(None, start_inline=True)
+        node.body = self.parse_statements(('name:endfilter',),
+                                          drop_needle=True)
+        return node
+
+    def parse_macro(self):
+        node = nodes.Macro(lineno=self.stream.next().lineno)
+        node.name = self.parse_assign_target(name_only=True).name
+        self.parse_signature(node)
+        node.body = self.parse_statements(('name:endmacro',),
+                                          drop_needle=True)
+        return node
+
+    def parse_print(self):
+        node = nodes.Output(lineno=self.stream.next().lineno)
+        node.nodes = []
+        while self.stream.current.type is not 'block_end':
+            if node.nodes:
+                self.stream.expect('comma')
+            node.nodes.append(self.parse_expression())
+        return node
+
+    def parse_assign_target(self, with_tuple=True, name_only=False,
+                            extra_end_rules=None):
+        """Parse an assignment target.  As Jinja2 allows assignments to
+        tuples, this function can parse all allowed assignment targets.  Per
+        default assignments to tuples are parsed, that can be disable however
+        by setting `with_tuple` to `False`.  If only assignments to names are
+        wanted `name_only` can be set to `True`.  The `extra_end_rules`
+        parameter is forwarded to the tuple parsing function.
+        """
+        if name_only:
+            token = self.stream.expect('name')
+            target = nodes.Name(token.value, 'store', lineno=token.lineno)
+        else:
+            if with_tuple:
+                target = self.parse_tuple(simplified=True,
+                                          extra_end_rules=extra_end_rules)
+            else:
+                target = self.parse_primary(with_postfix=False)
+            target.set_ctx('store')
+        if not target.can_assign():
+            self.fail('can\'t assign to %r' % target.__class__.
+                      __name__.lower(), target.lineno)
+        return target
+
+    def parse_expression(self, with_condexpr=True):
+        """Parse an expression.  Per default all expressions are parsed, if
+        the optional `with_condexpr` parameter is set to `False` conditional
+        expressions are not parsed.
+        """
+        if with_condexpr:
+            return self.parse_condexpr()
+        return self.parse_or()
+
+    def parse_condexpr(self):
+        lineno = self.stream.current.lineno
+        expr1 = self.parse_or()
+        while self.stream.skip_if('name:if'):
+            expr2 = self.parse_or()
+            if self.stream.skip_if('name:else'):
+                expr3 = self.parse_condexpr()
+            else:
+                expr3 = None
+            expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return expr1
+
+    def parse_or(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_and()
+        while self.stream.skip_if('name:or'):
+            right = self.parse_and()
+            left = nodes.Or(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_and(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_compare()
+        while self.stream.skip_if('name:and'):
+            right = self.parse_compare()
+            left = nodes.And(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_compare(self):
+        lineno = self.stream.current.lineno
+        expr = self.parse_add()
+        ops = []
+        while 1:
+            token_type = self.stream.current.type
+            if token_type in _compare_operators:
+                self.stream.next()
+                ops.append(nodes.Operand(token_type, self.parse_add()))
+            elif self.stream.skip_if('name:in'):
+                ops.append(nodes.Operand('in', self.parse_add()))
+            elif self.stream.current.test('name:not') and \
+                 self.stream.look().test('name:in'):
+                self.stream.skip(2)
+                ops.append(nodes.Operand('notin', self.parse_add()))
+            else:
+                break
+            lineno = self.stream.current.lineno
+        if not ops:
+            return expr
+        return nodes.Compare(expr, ops, lineno=lineno)
+
+    def parse_add(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_sub()
+        while self.stream.current.type is 'add':
+            self.stream.next()
+            right = self.parse_sub()
+            left = nodes.Add(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_sub(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_concat()
+        while self.stream.current.type is 'sub':
+            self.stream.next()
+            right = self.parse_concat()
+            left = nodes.Sub(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_concat(self):
+        lineno = self.stream.current.lineno
+        args = [self.parse_mul()]
+        while self.stream.current.type is 'tilde':
+            self.stream.next()
+            args.append(self.parse_mul())
+        if len(args) == 1:
+            return args[0]
+        return nodes.Concat(args, lineno=lineno)
+
+    def parse_mul(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_div()
+        while self.stream.current.type is 'mul':
+            self.stream.next()
+            right = self.parse_div()
+            left = nodes.Mul(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_div(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_floordiv()
+        while self.stream.current.type is 'div':
+            self.stream.next()
+            right = self.parse_floordiv()
+            left = nodes.Div(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_floordiv(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_mod()
+        while self.stream.current.type is 'floordiv':
+            self.stream.next()
+            right = self.parse_mod()
+            left = nodes.FloorDiv(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_mod(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_pow()
+        while self.stream.current.type is 'mod':
+            self.stream.next()
+            right = self.parse_pow()
+            left = nodes.Mod(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_pow(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_unary()
+        while self.stream.current.type is 'pow':
+            self.stream.next()
+            right = self.parse_unary()
+            left = nodes.Pow(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_unary(self):
+        token_type = self.stream.current.type
+        lineno = self.stream.current.lineno
+        if token_type is 'name' and self.stream.current.value == 'not':
+            self.stream.next()
+            node = self.parse_unary()
+            return nodes.Not(node, lineno=lineno)
+        if token_type is 'sub':
+            self.stream.next()
+            node = self.parse_unary()
+            return nodes.Neg(node, lineno=lineno)
+        if token_type is 'add':
+            self.stream.next()
+            node = self.parse_unary()
+            return nodes.Pos(node, lineno=lineno)
+        return self.parse_primary()
+
+    def parse_primary(self, with_postfix=True):
+        token = self.stream.current
+        if token.type is 'name':
+            if token.value in ('true', 'false', 'True', 'False'):
+                node = nodes.Const(token.value in ('true', 'True'),
+                                   lineno=token.lineno)
+            elif token.value in ('none', 'None'):
+                node = nodes.Const(None, lineno=token.lineno)
+            else:
+                node = nodes.Name(token.value, 'load', lineno=token.lineno)
+            self.stream.next()
+        elif token.type is 'string':
+            self.stream.next()
+            buf = [token.value]
+            lineno = token.lineno
+            while self.stream.current.type is 'string':
+                buf.append(self.stream.current.value)
+                self.stream.next()
+            node = nodes.Const(''.join(buf), lineno=lineno)
+        elif token.type in ('integer', 'float'):
+            self.stream.next()
+            node = nodes.Const(token.value, lineno=token.lineno)
+        elif token.type is 'lparen':
+            self.stream.next()
+            node = self.parse_tuple()
+            self.stream.expect('rparen')
+        elif token.type is 'lbracket':
+            node = self.parse_list()
+        elif token.type is 'lbrace':
+            node = self.parse_dict()
+        else:
+            self.fail("unexpected token '%s'" % (token,), token.lineno)
+        if with_postfix:
+            node = self.parse_postfix(node)
+        return node
+
+    def parse_tuple(self, simplified=False, with_condexpr=True,
+                    extra_end_rules=None):
+        """Works like `parse_expression` but if multiple expressions are
+        delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+        This method could also return a regular expression instead of a tuple
+        if no commas where found.
+
+        The default parsing mode is a full tuple.  If `simplified` is `True`
+        only names and literals are parsed.  The `no_condexpr` parameter is
+        forwarded to :meth:`parse_expression`.
+
+        Because tuples do not require delimiters and may end in a bogus comma
+        an extra hint is needed that marks the end of a tuple.  For example
+        for loops support tuples between `for` and `in`.  In that case the
+        `extra_end_rules` is set to ``['name:in']``.
+        """
+        lineno = self.stream.current.lineno
+        if simplified:
+            parse = lambda: self.parse_primary(with_postfix=False)
+        elif with_condexpr:
+            parse = self.parse_expression
+        else:
+            parse = lambda: self.parse_expression(with_condexpr=False)
+        args = []
+        is_tuple = False
+        while 1:
+            if args:
+                self.stream.expect('comma')
+            if self.is_tuple_end(extra_end_rules):
+                break
+            args.append(parse())
+            if self.stream.current.type is 'comma':
+                is_tuple = True
+            else:
+                break
+            lineno = self.stream.current.lineno
+        if not is_tuple and args:
+            return args[0]
+        return nodes.Tuple(args, 'load', lineno=lineno)
+
+    def parse_list(self):
+        token = self.stream.expect('lbracket')
+        items = []
+        while self.stream.current.type is not 'rbracket':
+            if items:
+                self.stream.expect('comma')
+            if self.stream.current.type == 'rbracket':
+                break
+            items.append(self.parse_expression())
+        self.stream.expect('rbracket')
+        return nodes.List(items, lineno=token.lineno)
+
+    def parse_dict(self):
+        token = self.stream.expect('lbrace')
+        items = []
+        while self.stream.current.type is not 'rbrace':
+            if items:
+                self.stream.expect('comma')
+            if self.stream.current.type == 'rbrace':
+                break
+            key = self.parse_expression()
+            self.stream.expect('colon')
+            value = self.parse_expression()
+            items.append(nodes.Pair(key, value, lineno=key.lineno))
+        self.stream.expect('rbrace')
+        return nodes.Dict(items, lineno=token.lineno)
+
+    def parse_postfix(self, node):
+        while 1:
+            token_type = self.stream.current.type
+            if token_type is 'dot' or token_type is 'lbracket':
+                node = self.parse_subscript(node)
+            elif token_type is 'lparen':
+                node = self.parse_call(node)
+            elif token_type is 'pipe':
+                node = self.parse_filter(node)
+            elif token_type is 'name' and self.stream.current.value == 'is':
+                node = self.parse_test(node)
+            else:
+                break
+        return node
+
+    def parse_subscript(self, node):
+        token = self.stream.next()
+        if token.type is 'dot':
+            attr_token = self.stream.current
+            self.stream.next()
+            if attr_token.type is 'name':
+                return nodes.Getattr(node, attr_token.value, 'load',
+                                     lineno=token.lineno)
+            elif attr_token.type is not 'integer':
+                self.fail('expected name or number', attr_token.lineno)
+            arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+            return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+        if token.type is 'lbracket':
+            priority_on_attribute = False
+            args = []
+            while self.stream.current.type is not 'rbracket':
+                if args:
+                    self.stream.expect('comma')
+                args.append(self.parse_subscribed())
+            self.stream.expect('rbracket')
+            if len(args) == 1:
+                arg = args[0]
+            else:
+                arg = nodes.Tuple(args, self.lineno, self.filename)
+            return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+        self.fail('expected subscript expression', self.lineno)
+
+    def parse_subscribed(self):
+        lineno = self.stream.current.lineno
+
+        if self.stream.current.type is 'colon':
+            self.stream.next()
+            args = [None]
+        else:
+            node = self.parse_expression()
+            if self.stream.current.type is not 'colon':
+                return node
+            self.stream.next()
+            args = [node]
+
+        if self.stream.current.type is 'colon':
+            args.append(None)
+        elif self.stream.current.type not in ('rbracket', 'comma'):
+            args.append(self.parse_expression())
+        else:
+            args.append(None)
+
+        if self.stream.current.type is 'colon':
+            self.stream.next()
+            if self.stream.current.type not in ('rbracket', 'comma'):
+                args.append(self.parse_expression())
+            else:
+                args.append(None)
+        else:
+            args.append(None)
+
+        return nodes.Slice(lineno=lineno, *args)
+
+    def parse_call(self, node):
+        token = self.stream.expect('lparen')
+        args = []
+        kwargs = []
+        dyn_args = dyn_kwargs = None
+        require_comma = False
+
+        def ensure(expr):
+            if not expr:
+                self.fail('invalid syntax for function call expression',
+                          token.lineno)
+
+        while self.stream.current.type is not 'rparen':
+            if require_comma:
+                self.stream.expect('comma')
+                # support for trailing comma
+                if self.stream.current.type is 'rparen':
+                    break
+            if self.stream.current.type is 'mul':
+                ensure(dyn_args is None and dyn_kwargs is None)
+                self.stream.next()
+                dyn_args = self.parse_expression()
+            elif self.stream.current.type is 'pow':
+                ensure(dyn_kwargs is None)
+                self.stream.next()
+                dyn_kwargs = self.parse_expression()
+            else:
+                ensure(dyn_args is None and dyn_kwargs is None)
+                if self.stream.current.type is 'name' and \
+                    self.stream.look().type is 'assign':
+                    key = self.stream.current.value
+                    self.stream.skip(2)
+                    value = self.parse_expression()
+                    kwargs.append(nodes.Keyword(key, value,
+                                                lineno=value.lineno))
+                else:
+                    ensure(not kwargs)
+                    args.append(self.parse_expression())
+
+            require_comma = True
+        self.stream.expect('rparen')
+
+        if node is None:
+            return args, kwargs, dyn_args, dyn_kwargs
+        return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
+                          lineno=token.lineno)
+
+    def parse_filter(self, node, start_inline=False):
+        while self.stream.current.type == 'pipe' or start_inline:
+            if not start_inline:
+                self.stream.next()
+            token = self.stream.expect('name')
+            name = token.value
+            while self.stream.current.type is 'dot':
+                self.stream.next()
+                name += '.' + self.stream.expect('name').value
+            if self.stream.current.type is 'lparen':
+                args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+            else:
+                args = []
+                kwargs = []
+                dyn_args = dyn_kwargs = None
+            node = nodes.Filter(node, name, args, kwargs, dyn_args,
+                                dyn_kwargs, lineno=token.lineno)
+            start_inline = False
+        return node
+
+    def parse_test(self, node):
+        token = self.stream.next()
+        if self.stream.current.test('name:not'):
+            self.stream.next()
+            negated = True
+        else:
+            negated = False
+        name = self.stream.expect('name').value
+        while self.stream.current.type is 'dot':
+            self.stream.next()
+            name += '.' + self.stream.expect('name').value
+        dyn_args = dyn_kwargs = None
+        kwargs = []
+        if self.stream.current.type is 'lparen':
+            args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+        elif self.stream.current.type in ('name', 'string', 'integer',
+                                          'float', 'lparen', 'lbracket',
+                                          'lbrace') and not \
+             self.stream.current.test_any('name:else', 'name:or',
+                                          'name:and'):
+            if self.stream.current.test('name:is'):
+                self.fail('You cannot chain multiple tests with is')
+            args = [self.parse_expression()]
+        else:
+            args = []
+        node = nodes.Test(node, name, args, kwargs, dyn_args,
+                          dyn_kwargs, lineno=token.lineno)
+        if negated:
+            node = nodes.Not(node, lineno=token.lineno)
+        return node
+
+    def subparse(self, end_tokens=None):
+        body = []
+        data_buffer = []
+        add_data = data_buffer.append
+
+        def flush_data():
+            if data_buffer:
+                lineno = data_buffer[0].lineno
+                body.append(nodes.Output(data_buffer[:], lineno=lineno))
+                del data_buffer[:]
+
+        while self.stream:
+            token = self.stream.current
+            if token.type is 'data':
+                if token.value:
+                    add_data(nodes.TemplateData(token.value,
+                                                lineno=token.lineno))
+                self.stream.next()
+            elif token.type is 'variable_begin':
+                self.stream.next()
+                add_data(self.parse_tuple(with_condexpr=True))
+                self.stream.expect('variable_end')
+            elif token.type is 'block_begin':
+                flush_data()
+                self.stream.next()
+                if end_tokens is not None and \
+                   self.stream.current.test_any(*end_tokens):
+                    return body
+                rv = self.parse_statement()
+                if isinstance(rv, list):
+                    body.extend(rv)
+                else:
+                    body.append(rv)
+                self.stream.expect('block_end')
+            else:
+                raise AssertionError('internal parsing error')
+
+        flush_data()
+        return body
+
+    def parse(self):
+        """Parse the whole template into a `Template` node."""
+        result = nodes.Template(self.subparse(), lineno=1)
+        result.set_environment(self.environment)
+        return result
diff --git a/scripts/jinja2/runtime.py b/scripts/jinja2/runtime.py
new file mode 100644 (file)
index 0000000..2ed3ac6
--- /dev/null
@@ -0,0 +1,484 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.runtime
+    ~~~~~~~~~~~~~~
+
+    Runtime helpers.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+import sys
+from itertools import chain, imap
+from jinja2.utils import Markup, partial, soft_unicode, escape, missing, \
+     concat, MethodType, FunctionType
+from jinja2.exceptions import UndefinedError, TemplateRuntimeError
+
+
+# these variables are exported to the template runtime
+__all__ = ['LoopContext', 'Context', 'TemplateReference', 'Macro', 'Markup',
+           'TemplateRuntimeError', 'missing', 'concat', 'escape',
+           'markup_join', 'unicode_join']
+
+
+#: the types we support for context functions
+_context_function_types = (FunctionType, MethodType)
+
+
+def markup_join(seq):
+    """Concatenation that escapes if necessary and converts to unicode."""
+    buf = []
+    iterator = imap(soft_unicode, seq)
+    for arg in iterator:
+        buf.append(arg)
+        if hasattr(arg, '__html__'):
+            return Markup(u'').join(chain(buf, iterator))
+    return concat(buf)
+
+
+def unicode_join(seq):
+    """Simple args to unicode conversion and concatenation."""
+    return concat(imap(unicode, seq))
+
+
+class Context(object):
+    """The template context holds the variables of a template.  It stores the
+    values passed to the template and also the names the template exports.
+    Creating instances is neither supported nor useful as it's created
+    automatically at various stages of the template evaluation and should not
+    be created by hand.
+
+    The context is immutable.  Modifications on :attr:`parent` **must not**
+    happen and modifications on :attr:`vars` are allowed from generated
+    template code only.  Template filters and global functions marked as
+    :func:`contextfunction`\s get the active context passed as first argument
+    and are allowed to access the context read-only.
+
+    The template context supports read only dict operations (`get`,
+    `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+    `__getitem__`, `__contains__`).  Additionally there is a :meth:`resolve`
+    method that doesn't fail with a `KeyError` but returns an
+    :class:`Undefined` object for missing variables.
+    """
+    __slots__ = ('parent', 'vars', 'environment', 'exported_vars', 'name',
+                 'blocks', '__weakref__')
+
+    def __init__(self, environment, parent, name, blocks):
+        self.parent = parent
+        self.vars = vars = {}
+        self.environment = environment
+        self.exported_vars = set()
+        self.name = name
+
+        # create the initial mapping of blocks.  Whenever template inheritance
+        # takes place the runtime will update this mapping with the new blocks
+        # from the template.
+        self.blocks = dict((k, [v]) for k, v in blocks.iteritems())
+
+    def super(self, name, current):
+        """Render a parent block."""
+        try:
+            blocks = self.blocks[name]
+            index = blocks.index(current) + 1
+            blocks[index]
+        except LookupError:
+            return self.environment.undefined('there is no parent block '
+                                              'called %r.' % name,
+                                              name='super')
+        return BlockReference(name, self, blocks, index)
+
+    def get(self, key, default=None):
+        """Returns an item from the template context, if it doesn't exist
+        `default` is returned.
+        """
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def resolve(self, key):
+        """Looks up a variable like `__getitem__` or `get` but returns an
+        :class:`Undefined` object with the name of the name looked up.
+        """
+        if key in self.vars:
+            return self.vars[key]
+        if key in self.parent:
+            return self.parent[key]
+        return self.environment.undefined(name=key)
+
+    def get_exported(self):
+        """Get a new dict with the exported variables."""
+        return dict((k, self.vars[k]) for k in self.exported_vars)
+
+    def get_all(self):
+        """Return a copy of the complete context as dict including the
+        exported variables.
+        """
+        return dict(self.parent, **self.vars)
+
+    def call(__self, __obj, *args, **kwargs):
+        """Call the callable with the arguments and keyword arguments
+        provided but inject the active context or environment as first
+        argument if the callable is a :func:`contextfunction` or
+        :func:`environmentfunction`.
+        """
+        if __debug__:
+            __traceback_hide__ = True
+        if isinstance(__obj, _context_function_types):
+            if getattr(__obj, 'contextfunction', 0):
+                args = (__self,) + args
+            elif getattr(__obj, 'environmentfunction', 0):
+                args = (__self.environment,) + args
+        return __obj(*args, **kwargs)
+
+    def _all(meth):
+        proxy = lambda self: getattr(self.get_all(), meth)()
+        proxy.__doc__ = getattr(dict, meth).__doc__
+        proxy.__name__ = meth
+        return proxy
+
+    keys = _all('keys')
+    values = _all('values')
+    items = _all('items')
+    iterkeys = _all('iterkeys')
+    itervalues = _all('itervalues')
+    iteritems = _all('iteritems')
+    del _all
+
+    def __contains__(self, name):
+        return name in self.vars or name in self.parent
+
+    def __getitem__(self, key):
+        """Lookup a variable or raise `KeyError` if the variable is
+        undefined.
+        """
+        item = self.resolve(key)
+        if isinstance(item, Undefined):
+            raise KeyError(key)
+        return item
+
+    def __repr__(self):
+        return '<%s %s of %r>' % (
+            self.__class__.__name__,
+            repr(self.get_all()),
+            self.name
+        )
+
+
+# register the context as mapping if possible
+try:
+    from collections import Mapping
+    Mapping.register(Context)
+except ImportError:
+    pass
+
+
+class TemplateReference(object):
+    """The `self` in templates."""
+
+    def __init__(self, context):
+        self.__context = context
+
+    def __getitem__(self, name):
+        blocks = self.__context.blocks[name]
+        wrap = self.__context.environment.autoescape and \
+               Markup or (lambda x: x)
+        return BlockReference(name, self.__context, blocks, 0)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.__context.name
+        )
+
+
+class BlockReference(object):
+    """One block on a template reference."""
+
+    def __init__(self, name, context, stack, depth):
+        self.name = name
+        self._context = context
+        self._stack = stack
+        self._depth = depth
+
+    @property
+    def super(self):
+        """Super the block."""
+        if self._depth + 1 >= len(self._stack):
+            return self._context.environment. \
+                undefined('there is no parent block called %r.' %
+                          self.name, name='super')
+        return BlockReference(self.name, self._context, self._stack,
+                              self._depth + 1)
+
+    def __call__(self):
+        rv = concat(self._stack[self._depth](self._context))
+        if self._context.environment.autoescape:
+            rv = Markup(rv)
+        return rv
+
+
+class LoopContext(object):
+    """A loop context for dynamic iteration."""
+
+    def __init__(self, iterable, recurse=None):
+        self._iterator = iter(iterable)
+        self._recurse = recurse
+        self.index0 = -1
+
+        # try to get the length of the iterable early.  This must be done
+        # here because there are some broken iterators around where there
+        # __len__ is the number of iterations left (i'm looking at your
+        # listreverseiterator!).
+        try:
+            self._length = len(iterable)
+        except (TypeError, AttributeError):
+            self._length = None
+
+    def cycle(self, *args):
+        """Cycles among the arguments with the current loop index."""
+        if not args:
+            raise TypeError('no items for cycling given')
+        return args[self.index0 % len(args)]
+
+    first = property(lambda x: x.index0 == 0)
+    last = property(lambda x: x.index0 + 1 == x.length)
+    index = property(lambda x: x.index0 + 1)
+    revindex = property(lambda x: x.length - x.index0)
+    revindex0 = property(lambda x: x.length - x.index)
+
+    def __len__(self):
+        return self.length
+
+    def __iter__(self):
+        return LoopContextIterator(self)
+
+    def loop(self, iterable):
+        if self._recurse is None:
+            raise TypeError('Tried to call non recursive loop.  Maybe you '
+                            "forgot the 'recursive' modifier.")
+        return self._recurse(iterable, self._recurse)
+
+    # a nifty trick to enhance the error message if someone tried to call
+    # the the loop without or with too many arguments.
+    __call__ = loop; del loop
+
+    @property
+    def length(self):
+        if self._length is None:
+            # if was not possible to get the length of the iterator when
+            # the loop context was created (ie: iterating over a generator)
+            # we have to convert the iterable into a sequence and use the
+            # length of that.
+            iterable = tuple(self._iterator)
+            self._iterator = iter(iterable)
+            self._length = len(iterable) + self.index0 + 1
+        return self._length
+
+    def __repr__(self):
+        return '<%s %r/%r>' % (
+            self.__class__.__name__,
+            self.index,
+            self.length
+        )
+
+
+class LoopContextIterator(object):
+    """The iterator for a loop context."""
+    __slots__ = ('context',)
+
+    def __init__(self, context):
+        self.context = context
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        ctx = self.context
+        ctx.index0 += 1
+        return ctx._iterator.next(), ctx
+
+
+class Macro(object):
+    """Wraps a macro."""
+
+    def __init__(self, environment, func, name, arguments, defaults,
+                 catch_kwargs, catch_varargs, caller):
+        self._environment = environment
+        self._func = func
+        self._argument_count = len(arguments)
+        self.name = name
+        self.arguments = arguments
+        self.defaults = defaults
+        self.catch_kwargs = catch_kwargs
+        self.catch_varargs = catch_varargs
+        self.caller = caller
+
+    def __call__(self, *args, **kwargs):
+        arguments = []
+        for idx, name in enumerate(self.arguments):
+            try:
+                value = args[idx]
+            except:
+                try:
+                    value = kwargs.pop(name)
+                except:
+                    try:
+                        value = self.defaults[idx - self._argument_count]
+                    except:
+                        value = self._environment.undefined(
+                            'parameter %r was not provided' % name, name=name)
+            arguments.append(value)
+
+        # it's important that the order of these arguments does not change
+        # if not also changed in the compiler's `function_scoping` method.
+        # the order is caller, keyword arguments, positional arguments!
+        if self.caller:
+            caller = kwargs.pop('caller', None)
+            if caller is None:
+                caller = self._environment.undefined('No caller defined',
+                                                     name='caller')
+            arguments.append(caller)
+        if self.catch_kwargs:
+            arguments.append(kwargs)
+        elif kwargs:
+            raise TypeError('macro %r takes no keyword argument %r' %
+                            (self.name, iter(kwargs).next()))
+        if self.catch_varargs:
+            arguments.append(args[self._argument_count:])
+        elif len(args) > self._argument_count:
+            raise TypeError('macro %r takes not more than %d argument(s)' %
+                            (self.name, len(self.arguments)))
+        return self._func(*arguments)
+
+    def __repr__(self):
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            self.name is None and 'anonymous' or repr(self.name)
+        )
+
+
+class Undefined(object):
+    """The default undefined type.  This undefined type can be printed and
+    iterated over, but every other access will raise an :exc:`UndefinedError`:
+
+    >>> foo = Undefined(name='foo')
+    >>> str(foo)
+    ''
+    >>> not foo
+    True
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
+                 '_undefined_exception')
+
+    def __init__(self, hint=None, obj=None, name=None, exc=UndefinedError):
+        self._undefined_hint = hint
+        self._undefined_obj = obj
+        self._undefined_name = name
+        self._undefined_exception = exc
+
+    def _fail_with_undefined_error(self, *args, **kwargs):
+        """Regular callback function for undefined objects that raises an
+        `UndefinedError` on call.
+        """
+        if self._undefined_hint is None:
+            if self._undefined_obj is None:
+                hint = '%r is undefined' % self._undefined_name
+            elif not isinstance(self._undefined_name, basestring):
+                hint = '%r object has no element %r' % (
+                    self._undefined_obj.__class__.__name__,
+                    self._undefined_name
+                )
+            else:
+                hint = '%r object has no attribute %r' % (
+                    self._undefined_obj.__class__.__name__,
+                    self._undefined_name
+                )
+        else:
+            hint = self._undefined_hint
+        raise self._undefined_exception(hint)
+
+    __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
+    __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
+    __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
+    __getattr__ = __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \
+    __int__ = __float__ = __complex__ = __pow__ = __rpow__ = \
+        _fail_with_undefined_error
+
+    def __str__(self):
+        return unicode(self).encode('utf-8')
+
+    def __unicode__(self):
+        return u''
+
+    def __len__(self):
+        return 0
+
+    def __iter__(self):
+        if 0:
+            yield None
+
+    def __nonzero__(self):
+        return False
+
+    def __repr__(self):
+        return 'Undefined'
+
+
+class DebugUndefined(Undefined):
+    """An undefined that returns the debug info when printed.
+
+    >>> foo = DebugUndefined(name='foo')
+    >>> str(foo)
+    '{{ foo }}'
+    >>> not foo
+    True
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ()
+
+    def __unicode__(self):
+        if self._undefined_hint is None:
+            if self._undefined_obj is None:
+                return u'{{ %s }}' % self._undefined_name
+            return '{{ no such element: %s[%r] }}' % (
+                self._undefined_obj.__class__.__name__,
+                self._undefined_name
+            )
+        return u'{{ undefined value printed: %s }}' % self._undefined_hint
+
+
+class StrictUndefined(Undefined):
+    """An undefined that barks on print and iteration as well as boolean
+    tests and all kinds of comparisons.  In other words: you can do nothing
+    with it except checking if it's defined using the `defined` test.
+
+    >>> foo = StrictUndefined(name='foo')
+    >>> str(foo)
+    Traceback (most recent call last):
+      ...
+    UndefinedError: 'foo' is undefined
+    >>> not foo
+    Traceback (most recent call last):
+      ...
+    UndefinedError: 'foo' is undefined
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ()
+    __iter__ = __unicode__ = __len__ = __nonzero__ = __eq__ = __ne__ = \
+        Undefined._fail_with_undefined_error
+
+
+# remove remaining slots attributes, after the metaclass did the magic they
+# are unneeded and irritating as they contain wrong data for the subclasses.
+del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/scripts/jinja2/sandbox.py b/scripts/jinja2/sandbox.py
new file mode 100644 (file)
index 0000000..7b28273
--- /dev/null
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.sandbox
+    ~~~~~~~~~~~~~~
+
+    Adds a sandbox layer to Jinja as it was the default behavior in the old
+    Jinja 1 releases.  This sandbox is slightly different from Jinja 1 as the
+    default behavior is easier to use.
+
+    The behavior can be changed by subclassing the environment.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+import operator
+from jinja2.runtime import Undefined
+from jinja2.environment import Environment
+from jinja2.exceptions import SecurityError
+from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
+     FrameType, GeneratorType
+
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: attributes of function objects that are considered unsafe.
+UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
+                                  'func_defaults', 'func_globals'])
+
+#: unsafe method attributes.  function attributes are unsafe for methods too
+UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+
+
+from collections import deque
+from sets import Set, ImmutableSet
+from UserDict import UserDict, DictMixin
+from UserList import UserList
+_mutable_set_types = (ImmutableSet, Set, set)
+_mutable_mapping_types = (UserDict, DictMixin, dict)
+_mutable_sequence_types = (UserList, list)
+
+#: register Python 2.6 abstract base classes
+try:
+    from collections import MutableSet, MutableMapping, MutableSequence
+    _mutable_set_types += (MutableSet,)
+    _mutable_mapping_types += (MutableMapping,)
+    _mutable_sequence_types += (MutableSequence,)
+except ImportError:
+    pass
+
+_mutable_spec = (
+    (_mutable_set_types, frozenset([
+        'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
+        'symmetric_difference_update', 'update'
+    ])),
+    (_mutable_mapping_types, frozenset([
+        'clear', 'pop', 'popitem', 'setdefault', 'update'
+    ])),
+    (_mutable_sequence_types, frozenset([
+        'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
+    ])),
+    (deque, frozenset([
+        'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
+        'popleft', 'remove', 'rotate'
+    ]))
+)
+
+
+def safe_range(*args):
+    """A range that can't generate ranges with a length of more than
+    MAX_RANGE items.
+    """
+    rng = xrange(*args)
+    if len(rng) > MAX_RANGE:
+        raise OverflowError('range too big, maximum size for range is %d' %
+                            MAX_RANGE)
+    return rng
+
+
+def unsafe(f):
+    """
+    Mark a function or method as unsafe::
+
+        @unsafe
+        def delete(self):
+            pass
+    """
+    f.unsafe_callable = True
+    return f
+
+
+def is_internal_attribute(obj, attr):
+    """Test if the attribute given is an internal python attribute.  For
+    example this function returns `True` for the `func_code` attribute of
+    python objects.  This is useful if the environment method
+    :meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
+
+    >>> from jinja2.sandbox import is_internal_attribute
+    >>> is_internal_attribute(lambda: None, "func_code")
+    True
+    >>> is_internal_attribute((lambda x:x).func_code, 'co_code')
+    True
+    >>> is_internal_attribute(str, "upper")
+    False
+    """
+    if isinstance(obj, FunctionType):
+        if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+            return True
+    elif isinstance(obj, MethodType):
+        if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
+           attr in UNSAFE_METHOD_ATTRIBUTES:
+            return True
+    elif isinstance(obj, type):
+        if attr == 'mro':
+            return True
+    elif isinstance(obj, (CodeType, TracebackType, FrameType)):
+        return True
+    elif isinstance(obj, GeneratorType):
+        if attr == 'gi_frame':
+            return True
+    return attr.startswith('__')
+
+
+def modifies_known_mutable(obj, attr):
+    """This function checks if an attribute on a builtin mutable object
+    (list, dict, set or deque) would modify it if called.  It also supports
+    the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
+    with Python 2.6 onwards the abstract base classes `MutableSet`,
+    `MutableMapping`, and `MutableSequence`.
+
+    >>> modifies_known_mutable({}, "clear")
+    True
+    >>> modifies_known_mutable({}, "keys")
+    False
+    >>> modifies_known_mutable([], "append")
+    True
+    >>> modifies_known_mutable([], "index")
+    False
+
+    If called with an unsupported object (such as unicode) `False` is
+    returned.
+
+    >>> modifies_known_mutable("foo", "upper")
+    False
+    """
+    for typespec, unsafe in _mutable_spec:
+        if isinstance(obj, typespec):
+            return attr in unsafe
+    return False
+
+
+class SandboxedEnvironment(Environment):
+    """The sandboxed environment.  It works like the regular environment but
+    tells the compiler to generate sandboxed code.  Additionally subclasses of
+    this environment may override the methods that tell the runtime what
+    attributes or functions are safe to access.
+
+    If the template tries to access insecure code a :exc:`SecurityError` is
+    raised.  However also other exceptions may occour during the rendering so
+    the caller has to ensure that all exceptions are catched.
+    """
+    sandboxed = True
+
+    def __init__(self, *args, **kwargs):
+        Environment.__init__(self, *args, **kwargs)
+        self.globals['range'] = safe_range
+
+    def is_safe_attribute(self, obj, attr, value):
+        """The sandboxed environment will call this method to check if the
+        attribute of an object is safe to access.  Per default all attributes
+        starting with an underscore are considered private as well as the
+        special attributes of internal python objects as returned by the
+        :func:`is_internal_attribute` function.
+        """
+        return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+
+    def is_safe_callable(self, obj):
+        """Check if an object is safely callable.  Per default a function is
+        considered safe unless the `unsafe_callable` attribute exists and is
+        True.  Override this method to alter the behavior, but this won't
+        affect the `unsafe` decorator from this module.
+        """
+        return not (getattr(obj, 'unsafe_callable', False) or \
+                    getattr(obj, 'alters_data', False))
+
+    def getitem(self, obj, argument):
+        """Subscribe an object from sandboxed code."""
+        try:
+            return obj[argument]
+        except (TypeError, LookupError):
+            if isinstance(argument, basestring):
+                try:
+                    attr = str(argument)
+                except:
+                    pass
+                else:
+                    try:
+                        value = getattr(obj, attr)
+                    except AttributeError:
+                        pass
+                    else:
+                        if self.is_safe_attribute(obj, argument, value):
+                            return value
+                        return self.unsafe_undefined(obj, argument)
+        return self.undefined(obj=obj, name=argument)
+
+    def getattr(self, obj, attribute):
+        """Subscribe an object from sandboxed code and prefer the
+        attribute.  The attribute passed *must* be a bytestring.
+        """
+        try:
+            value = getattr(obj, attribute)
+        except AttributeError:
+            try:
+                return obj[attribute]
+            except (TypeError, LookupError):
+                pass
+        else:
+            if self.is_safe_attribute(obj, attribute, value):
+                return value
+            return self.unsafe_undefined(obj, attribute)
+        return self.undefined(obj=obj, name=attribute)
+
+    def unsafe_undefined(self, obj, attribute):
+        """Return an undefined object for unsafe attributes."""
+        return self.undefined('access to attribute %r of %r '
+                              'object is unsafe.' % (
+            attribute,
+            obj.__class__.__name__
+        ), name=attribute, obj=obj, exc=SecurityError)
+
+    def call(__self, __context, __obj, *args, **kwargs):
+        """Call an object from sandboxed code."""
+        # the double prefixes are to avoid double keyword argument
+        # errors when proxying the call.
+        if not __self.is_safe_callable(__obj):
+            raise SecurityError('%r is not safely callable' % (__obj,))
+        return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+    """Works exactly like the regular `SandboxedEnvironment` but does not
+    permit modifications on the builtin mutable objects `list`, `set`, and
+    `dict` by using the :func:`modifies_known_mutable` function.
+    """
+
+    def is_safe_attribute(self, obj, attr, value):
+        if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+            return False
+        return not modifies_known_mutable(obj, attr)
diff --git a/scripts/jinja2/tests.py b/scripts/jinja2/tests.py
new file mode 100644 (file)
index 0000000..6873b5a
--- /dev/null
@@ -0,0 +1,136 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.tests
+    ~~~~~~~~~~~~
+
+    Jinja test functions. Used with the "is" operator.
+
+    :copyright: 2007 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+from jinja2.runtime import Undefined
+
+
+number_re = re.compile(r'^-?\d+(\.\d+)?$')
+regex_type = type(number_re)
+
+
+def test_odd(value):
+    """Return true if the variable is odd."""
+    return value % 2 == 1
+
+
+def test_even(value):
+    """Return true if the variable is even."""
+    return value % 2 == 0
+
+
+def test_divisibleby(value, num):
+    """Check if a variable is divisible by a number."""
+    return value % num == 0
+
+
+def test_defined(value):
+    """Return true if the variable is defined:
+
+    .. sourcecode:: jinja
+
+        {% if variable is defined %}
+            value of variable: {{ variable }}
+        {% else %}
+            variable is not defined
+        {% endif %}
+
+    See the :func:`default` filter for a simple way to set undefined
+    variables.
+    """
+    return not isinstance(value, Undefined)
+
+
+def test_undefined(value):
+    """Like :func:`defined` but the other way round."""
+    return isinstance(value, Undefined)
+
+
+def test_none(value):
+    """Return true if the variable is none."""
+    return value is None
+
+
+def test_lower(value):
+    """Return true if the variable is lowercased."""
+    return unicode(value).islower()
+
+
+def test_upper(value):
+    """Return true if the variable is uppercased."""
+    return unicode(value).isupper()
+
+
+def test_string(value):
+    """Return true if the object is a string."""
+    return isinstance(value, basestring)
+
+
+def test_number(value):
+    """Return true if the variable is a number."""
+    return isinstance(value, (int, long, float, complex))
+
+
+def test_sequence(value):
+    """Return true if the variable is a sequence. Sequences are variables
+    that are iterable.
+    """
+    try:
+        len(value)
+        value.__getitem__
+    except:
+        return False
+    return True
+
+
+def test_sameas(value, other):
+    """Check if an object points to the same memory address than another
+    object:
+
+    .. sourcecode:: jinja
+
+        {% if foo.attribute is sameas false %}
+            the foo attribute really is the `False` singleton
+        {% endif %}
+    """
+    return value is other
+
+
+def test_iterable(value):
+    """Check if it's possible to iterate over an object."""
+    try:
+        iter(value)
+    except TypeError:
+        return False
+    return True
+
+
+def test_escaped(value):
+    """Check if the value is escaped."""
+    return hasattr(value, '__html__')
+
+
+TESTS = {
+    'odd':              test_odd,
+    'even':             test_even,
+    'divisibleby':      test_divisibleby,
+    'defined':          test_defined,
+    'undefined':        test_undefined,
+    'none':             test_none,
+    'lower':            test_lower,
+    'upper':            test_upper,
+    'string':           test_string,
+    'number':           test_number,
+    'sequence':         test_sequence,
+    'iterable':         test_iterable,
+    'callable':         callable,
+    'sameas':           test_sameas,
+    'escaped':          test_escaped
+}
diff --git a/scripts/jinja2/utils.py b/scripts/jinja2/utils.py
new file mode 100644 (file)
index 0000000..3f3d053
--- /dev/null
@@ -0,0 +1,748 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.utils
+    ~~~~~~~~~~~~
+
+    Utility functions.
+
+    :copyright: 2008 by Armin Ronacher.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import sys
+import errno
+try:
+    from thread import allocate_lock
+except ImportError:
+    from dummy_thread import allocate_lock
+from collections import deque
+from itertools import imap
+
+
+_word_split_re = re.compile(r'(\s+)')
+_punctuation_re = re.compile(
+    '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
+        '|'.join(imap(re.escape, ('(', '<', '&lt;'))),
+        '|'.join(imap(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
+    )
+)
+_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+_digits = '0123456789'
+
+# special singleton representing missing values for the runtime
+missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+
+
+# concatenate a list of strings and convert them to unicode.
+# unfortunately there is a bug in python 2.4 and lower that causes
+# unicode.join trash the traceback.
+_concat = u''.join
+try:
+    def _test_gen_bug():
+        raise TypeError(_test_gen_bug)
+        yield None
+    _concat(_test_gen_bug())
+except TypeError, _error:
+    if not _error.args or _error.args[0] is not _test_gen_bug:
+        def concat(gen):
+            try:
+                return _concat(list(gen))
+            except:
+                # this hack is needed so that the current frame
+                # does not show up in the traceback.
+                exc_type, exc_value, tb = sys.exc_info()
+                raise exc_type, exc_value, tb.tb_next
+    else:
+        concat = _concat
+    del _test_gen_bug, _error
+
+
+# ironpython without stdlib doesn't have keyword
+try:
+    from keyword import iskeyword as is_python_keyword
+except ImportError:
+    _py_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9]*$')
+    def is_python_keyword(name):
+        if _py_identifier_re.search(name) is None:
+            return False
+        try:
+            exec name + " = 42"
+        except SyntaxError:
+            return False
+        return True
+
+
+# common types.  These do exist in the special types module too which however
+# does not exist in IronPython out of the box.
+class _C(object):
+    def method(self): pass
+def _func():
+    yield None
+FunctionType = type(_func)
+GeneratorType = type(_func())
+MethodType = type(_C.method)
+CodeType = type(_C.method.func_code)
+try:
+    raise TypeError()
+except TypeError:
+    _tb = sys.exc_info()[2]
+    TracebackType = type(_tb)
+    FrameType = type(_tb.tb_frame)
+del _C, _tb, _func
+
+
+def contextfunction(f):
+    """This decorator can be used to mark a function or method context callable.
+    A context callable is passed the active :class:`Context` as first argument when
+    called from the template.  This is useful if a function wants to get access
+    to the context or functions provided on the context object.  For example
+    a function that returns a sorted list of template variables the current
+    template exports could look like this::
+
+        @contextfunction
+        def get_exported_names(context):
+            return sorted(context.exported_vars)
+    """
+    f.contextfunction = True
+    return f
+
+
+def environmentfunction(f):
+    """This decorator can be used to mark a function or method as environment
+    callable.  This decorator works exactly like the :func:`contextfunction`
+    decorator just that the first argument is the active :class:`Environment`
+    and not context.
+    """
+    f.environmentfunction = True
+    return f
+
+
+def is_undefined(obj):
+    """Check if the object passed is undefined.  This does nothing more than
+    performing an instance check against :class:`Undefined` but looks nicer.
+    This can be used for custom filters or tests that want to react to
+    undefined variables.  For example a custom default filter can look like
+    this::
+
+        def default(var, default=''):
+            if is_undefined(var):
+                return default
+            return var
+    """
+    from jinja2.runtime import Undefined
+    return isinstance(obj, Undefined)
+
+
+def consume(iterable):
+    """Consumes an iterable without doing anything with it."""
+    for event in iterable:
+        pass
+
+
+def clear_caches():
+    """Jinja2 keeps internal caches for environments and lexers.  These are
+    used so that Jinja2 doesn't have to recreate environments and lexers all
+    the time.  Normally you don't have to care about that but if you are
+    messuring memory consumption you may want to clean the caches.
+    """
+    from jinja2.environment import _spontaneous_environments
+    from jinja2.lexer import _lexer_cache
+    _spontaneous_environments.clear()
+    _lexer_cache.clear()
+
+
+def import_string(import_name, silent=False):
+    """Imports an object based on a string.  This use useful if you want to
+    use import paths as endpoints or something similar.  An import path can
+    be specified either in dotted notation (``xml.sax.saxutils.escape``)
+    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+    If the `silent` is True the return value will be `None` if the import
+    fails.
+
+    :return: imported object
+    """
+    try:
+        if ':' in import_name:
+            module, obj = import_name.split(':', 1)
+        elif '.' in import_name:
+            items = import_name.split('.')
+            module = '.'.join(items[:-1])
+            obj = items[-1]
+        else:
+            return __import__(import_name)
+        return getattr(__import__(module, None, None, [obj]), obj)
+    except (ImportError, AttributeError):
+        if not silent:
+            raise
+
+
+def open_if_exists(filename, mode='r'):
+    """Returns a file descriptor for the filename if that file exists,
+    otherwise `None`.
+    """
+    try:
+        return file(filename, mode)
+    except IOError, e:
+        if e.errno not in (errno.ENOENT, errno.EISDIR):
+            raise
+
+
+def pformat(obj, verbose=False):
+    """Prettyprint an object.  Either use the `pretty` library or the
+    builtin `pprint`.
+    """
+    try:
+        from pretty import pretty
+        return pretty(obj, verbose=verbose)
+    except ImportError:
+        from pprint import pformat
+        return pformat(obj)
+
+
+def urlize(text, trim_url_limit=None, nofollow=False):
+    """Converts any URLs in text into clickable links. Works on http://,
+    https:// and www. links. Links can have trailing punctuation (periods,
+    commas, close-parens) and leading punctuation (opening parens) and
+    it'll still do the right thing.
+
+    If trim_url_limit is not None, the URLs in link text will be limited
+    to trim_url_limit characters.
+
+    If nofollow is True, the URLs in link text will get a rel="nofollow"
+    attribute.
+    """
+    trim_url = lambda x, limit=trim_url_limit: limit is not None \
+                         and (x[:limit] + (len(x) >=limit and '...'
+                         or '')) or x
+    words = _word_split_re.split(unicode(escape(text)))
+    nofollow_attr = nofollow and ' rel="nofollow"' or ''
+    for i, word in enumerate(words):
+        match = _punctuation_re.match(word)
+        if match:
+            lead, middle, trail = match.groups()
+            if middle.startswith('www.') or (
+                '@' not in middle and
+                not middle.startswith('http://') and
+                len(middle) > 0 and
+                middle[0] in _letters + _digits and (
+                    middle.endswith('.org') or
+                    middle.endswith('.net') or
+                    middle.endswith('.com')
+                )):
+                middle = '<a href="http://%s"%s>%s</a>' % (middle,
+                    nofollow_attr, trim_url(middle))
+            if middle.startswith('http://') or \
+               middle.startswith('https://'):
+                middle = '<a href="%s"%s>%s</a>' % (middle,
+                    nofollow_attr, trim_url(middle))
+            if '@' in middle and not middle.startswith('www.') and \
+               not ':' in middle and _simple_email_re.match(middle):
+                middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+            if lead + middle + trail != word:
+                words[i] = lead + middle + trail
+    return u''.join(words)
+
+
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+    """Generate some lorem impsum for the template."""
+    from jinja2.constants import LOREM_IPSUM_WORDS
+    from random import choice, random, randrange
+    words = LOREM_IPSUM_WORDS.split()
+    result = []
+
+    for _ in xrange(n):
+        next_capitalized = True
+        last_comma = last_fullstop = 0
+        word = None
+        last = None
+        p = []
+
+        # each paragraph contains out of 20 to 100 words.
+        for idx, _ in enumerate(xrange(randrange(min, max))):
+            while True:
+                word = choice(words)
+                if word != last:
+                    last = word
+                    break
+            if next_capitalized:
+                word = word.capitalize()
+                next_capitalized = False
+            # add commas
+            if idx - randrange(3, 8) > last_comma:
+                last_comma = idx
+                last_fullstop += 2
+                word += ','
+            # add end of sentences
+            if idx - randrange(10, 20) > last_fullstop:
+                last_comma = last_fullstop = idx
+                word += '.'
+                next_capitalized = True
+            p.append(word)
+
+        # ensure that the paragraph ends with a dot.
+        p = u' '.join(p)
+        if p.endswith(','):
+            p = p[:-1] + '.'
+        elif not p.endswith('.'):
+            p += '.'
+        result.append(p)
+
+    if not html:
+        return u'\n\n'.join(result)
+    return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+
+
+class Markup(unicode):
+    r"""Marks a string as being safe for inclusion in HTML/XML output without
+    needing to be escaped.  This implements the `__html__` interface a couple
+    of frameworks and web applications use.  :class:`Markup` is a direct
+    subclass of `unicode` and provides all the methods of `unicode` just that
+    it escapes arguments passed and always returns `Markup`.
+
+    The `escape` function returns markup objects so that double escaping can't
+    happen.  If you want to use autoescaping in Jinja just enable the
+    autoescaping feature in the environment.
+
+    The constructor of the :class:`Markup` class can be used for three
+    different things:  When passed an unicode object it's assumed to be safe,
+    when passed an object with an HTML representation (has an `__html__`
+    method) that representation is used, otherwise the object passed is
+    converted into a unicode string and then assumed to be safe:
+
+    >>> Markup("Hello <em>World</em>!")
+    Markup(u'Hello <em>World</em>!')
+    >>> class Foo(object):
+    ...  def __html__(self):
+    ...   return '<a href="#">foo</a>'
+    ...
+    >>> Markup(Foo())
+    Markup(u'<a href="#">foo</a>')
+
+    If you want object passed being always treated as unsafe you can use the
+    :meth:`escape` classmethod to create a :class:`Markup` object:
+
+    >>> Markup.escape("Hello <em>World</em>!")
+    Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!')
+
+    Operations on a markup string are markup aware which means that all
+    arguments are passed through the :func:`escape` function:
+
+    >>> em = Markup("<em>%s</em>")
+    >>> em % "foo & bar"
+    Markup(u'<em>foo &amp; bar</em>')
+    >>> strong = Markup("<strong>%(text)s</strong>")
+    >>> strong % {'text': '<blink>hacker here</blink>'}
+    Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>')
+    >>> Markup("<em>Hello</em> ") + "<foo>"
+    Markup(u'<em>Hello</em> &lt;foo&gt;')
+    """
+    __slots__ = ()
+
+    def __new__(cls, base=u'', encoding=None, errors='strict'):
+        if hasattr(base, '__html__'):
+            base = base.__html__()
+        if encoding is None:
+            return unicode.__new__(cls, base)
+        return unicode.__new__(cls, base, encoding, errors)
+
+    def __html__(self):
+        return self
+
+    def __add__(self, other):
+        if hasattr(other, '__html__') or isinstance(other, basestring):
+            return self.__class__(unicode(self) + unicode(escape(other)))
+        return NotImplemented
+
+    def __radd__(self, other):
+        if hasattr(other, '__html__') or isinstance(other, basestring):
+            return self.__class__(unicode(escape(other)) + unicode(self))
+        return NotImplemented
+
+    def __mul__(self, num):
+        if isinstance(num, (int, long)):
+            return self.__class__(unicode.__mul__(self, num))
+        return NotImplemented
+    __rmul__ = __mul__
+
+    def __mod__(self, arg):
+        if isinstance(arg, tuple):
+            arg = tuple(imap(_MarkupEscapeHelper, arg))
+        else:
+            arg = _MarkupEscapeHelper(arg)
+        return self.__class__(unicode.__mod__(self, arg))
+
+    def __repr__(self):
+        return '%s(%s)' % (
+            self.__class__.__name__,
+            unicode.__repr__(self)
+        )
+
+    def join(self, seq):
+        return self.__class__(unicode.join(self, imap(escape, seq)))
+    join.__doc__ = unicode.join.__doc__
+
+    def split(self, *args, **kwargs):
+        return map(self.__class__, unicode.split(self, *args, **kwargs))
+    split.__doc__ = unicode.split.__doc__
+
+    def rsplit(self, *args, **kwargs):
+        return map(self.__class__, unicode.rsplit(self, *args, **kwargs))
+    rsplit.__doc__ = unicode.rsplit.__doc__
+
+    def splitlines(self, *args, **kwargs):
+        return map(self.__class__, unicode.splitlines(self, *args, **kwargs))
+    splitlines.__doc__ = unicode.splitlines.__doc__
+
+    def unescape(self):
+        r"""Unescape markup again into an unicode string.  This also resolves
+        known HTML4 and XHTML entities:
+
+        >>> Markup("Main &raquo; <em>About</em>").unescape()
+        u'Main \xbb <em>About</em>'
+        """
+        from jinja2.constants import HTML_ENTITIES
+        def handle_match(m):
+            name = m.group(1)
+            if name in HTML_ENTITIES:
+                return unichr(HTML_ENTITIES[name])
+            try:
+                if name[:2] in ('#x', '#X'):
+                    return unichr(int(name[2:], 16))
+                elif name.startswith('#'):
+                    return unichr(int(name[1:]))
+            except ValueError:
+                pass
+            return u''
+        return _entity_re.sub(handle_match, unicode(self))
+
+    def striptags(self):
+        r"""Unescape markup into an unicode string and strip all tags.  This
+        also resolves known HTML4 and XHTML entities.  Whitespace is
+        normalized to one:
+
+        >>> Markup("Main &raquo;  <em>About</em>").striptags()
+        u'Main \xbb About'
+        """
+        stripped = u' '.join(_striptags_re.sub('', self).split())
+        return Markup(stripped).unescape()
+
+    @classmethod
+    def escape(cls, s):
+        """Escape the string.  Works like :func:`escape` with the difference
+        that for subclasses of :class:`Markup` this function would return the
+        correct subclass.
+        """
+        rv = escape(s)
+        if rv.__class__ is not cls:
+            return cls(rv)
+        return rv
+
+    def make_wrapper(name):
+        orig = getattr(unicode, name)
+        def func(self, *args, **kwargs):
+            args = _escape_argspec(list(args), enumerate(args))
+            _escape_argspec(kwargs, kwargs.iteritems())
+            return self.__class__(orig(self, *args, **kwargs))
+        func.__name__ = orig.__name__
+        func.__doc__ = orig.__doc__
+        return func
+
+    for method in '__getitem__', '__getslice__', 'capitalize', \
+                  'title', 'lower', 'upper', 'replace', 'ljust', \
+                  'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
+                  'translate', 'expandtabs', 'swapcase', 'zfill':
+        locals()[method] = make_wrapper(method)
+
+    # new in python 2.5
+    if hasattr(unicode, 'partition'):
+        partition = make_wrapper('partition'),
+        rpartition = make_wrapper('rpartition')
+
+    # new in python 2.6
+    if hasattr(unicode, 'format'):
+        format = make_wrapper('format')
+
+    del method, make_wrapper
+
+
+def _escape_argspec(obj, iterable):
+    """Helper for various string-wrapped functions."""
+    for key, value in iterable:
+        if hasattr(value, '__html__') or isinstance(value, basestring):
+            obj[key] = escape(value)
+    return obj
+
+
+class _MarkupEscapeHelper(object):
+    """Helper for Markup.__mod__"""
+
+    def __init__(self, obj):
+        self.obj = obj
+
+    __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x])
+    __unicode__ = lambda s: unicode(escape(s.obj))
+    __str__ = lambda s: str(escape(s.obj))
+    __repr__ = lambda s: str(escape(repr(s.obj)))
+    __int__ = lambda s: int(s.obj)
+    __float__ = lambda s: float(s.obj)
+
+
+class LRUCache(object):
+    """A simple LRU Cache implementation."""
+
+    # this is fast for small capacities (something below 1000) but doesn't
+    # scale.  But as long as it's only used as storage for templates this
+    # won't do any harm.
+
+    def __init__(self, capacity):
+        self.capacity = capacity
+        self._mapping = {}
+        self._queue = deque()
+        self._postinit()
+
+    def _postinit(self):
+        # alias all queue methods for faster lookup
+        self._popleft = self._queue.popleft
+        self._pop = self._queue.pop
+        if hasattr(self._queue, 'remove'):
+            self._remove = self._queue.remove
+        self._wlock = allocate_lock()
+        self._append = self._queue.append
+
+    def _remove(self, obj):
+        """Python 2.4 compatibility."""
+        for idx, item in enumerate(self._queue):
+            if item == obj:
+                del self._queue[idx]
+                break
+
+    def __getstate__(self):
+        return {
+            'capacity':     self.capacity,
+            '_mapping':     self._mapping,
+            '_queue':       self._queue
+        }
+
+    def __setstate__(self, d):
+        self.__dict__.update(d)
+        self._postinit()
+
+    def __getnewargs__(self):
+        return (self.capacity,)
+
+    def copy(self):
+        """Return an shallow copy of the instance."""
+        rv = self.__class__(self.capacity)
+        rv._mapping.update(self._mapping)
+        rv._queue = deque(self._queue)
+        return rv
+
+    def get(self, key, default=None):
+        """Return an item from the cache dict or `default`"""
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def setdefault(self, key, default=None):
+        """Set `default` if the key is not in the cache otherwise
+        leave unchanged. Return the value of this key.
+        """
+        try:
+            return self[key]
+        except KeyError:
+            self[key] = default
+            return default
+
+    def clear(self):
+        """Clear the cache."""
+        self._wlock.acquire()
+        try:
+            self._mapping.clear()
+            self._queue.clear()
+        finally:
+            self._wlock.release()
+
+    def __contains__(self, key):
+        """Check if a key exists in this cache."""
+        return key in self._mapping
+
+    def __len__(self):
+        """Return the current size of the cache."""
+        return len(self._mapping)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self._mapping
+        )
+
+    def __getitem__(self, key):
+        """Get an item from the cache. Moves the item up so that it has the
+        highest priority then.
+
+        Raise an `KeyError` if it does not exist.
+        """
+        rv = self._mapping[key]
+        if self._queue[-1] != key:
+            self._remove(key)
+            self._append(key)
+        return rv
+
+    def __setitem__(self, key, value):
+        """Sets the value for an item. Moves the item up so that it
+        has the highest priority then.
+        """
+        self._wlock.acquire()
+        try:
+            if key in self._mapping:
+                self._remove(key)
+            elif len(self._mapping) == self.capacity:
+                del self._mapping[self._popleft()]
+            self._append(key)
+            self._mapping[key] = value
+        finally:
+            self._wlock.release()
+
+    def __delitem__(self, key):
+        """Remove an item from the cache dict.
+        Raise an `KeyError` if it does not exist.
+        """
+        self._wlock.acquire()
+        try:
+            del self._mapping[key]
+            self._remove(key)
+        finally:
+            self._wlock.release()
+
+    def items(self):
+        """Return a list of items."""
+        result = [(key, self._mapping[key]) for key in list(self._queue)]
+        result.reverse()
+        return result
+
+    def iteritems(self):
+        """Iterate over all items."""
+        return iter(self.items())
+
+    def values(self):
+        """Return a list of all values."""
+        return [x[1] for x in self.items()]
+
+    def itervalue(self):
+        """Iterate over all values."""
+        return iter(self.values())
+
+    def keys(self):
+        """Return a list of all keys ordered by most recent usage."""
+        return list(self)
+
+    def iterkeys(self):
+        """Iterate over all keys in the cache dict, ordered by
+        the most recent usage.
+        """
+        return reversed(tuple(self._queue))
+
+    __iter__ = iterkeys
+
+    def __reversed__(self):
+        """Iterate over the values in the cache dict, oldest items
+        coming first.
+        """
+        return iter(tuple(self._queue))
+
+    __copy__ = copy
+
+
+# register the LRU cache as mutable mapping if possible
+try:
+    from collections import MutableMapping
+    MutableMapping.register(LRUCache)
+except ImportError:
+    pass
+
+
+class Cycler(object):
+    """A cycle helper for templates."""
+
+    def __init__(self, *items):
+        if not items:
+            raise RuntimeError('at least one item has to be provided')
+        self.items = items
+        self.reset()
+
+    def reset(self):
+        """Resets the cycle."""
+        self.pos = 0
+
+    @property
+    def current(self):
+        """Returns the current item."""
+        return self.items[self.pos]
+
+    def next(self):
+        """Goes one item ahead and returns it."""
+        rv = self.current
+        self.pos = (self.pos + 1) % len(self.items)
+        return rv
+
+
+class Joiner(object):
+    """A joining helper for templates."""
+
+    def __init__(self, sep=u', '):
+        self.sep = sep
+        self.used = False
+
+    def __call__(self):
+        if not self.used:
+            self.used = True
+            return u''
+        return self.sep
+
+
+# we have to import it down here as the speedups module imports the
+# markup type which is define above.
+try:
+    from jinja2._speedups import escape, soft_unicode
+except ImportError:
+    def escape(s):
+        """Convert the characters &, <, >, ' and " in string s to HTML-safe
+        sequences.  Use this if you need to display text that might contain
+        such characters in HTML.  Marks return value as markup string.
+        """
+        if hasattr(s, '__html__'):
+            return s.__html__()
+        return Markup(unicode(s)
+            .replace('&', '&amp;')
+            .replace('>', '&gt;')
+            .replace('<', '&lt;')
+            .replace("'", '&#39;')
+            .replace('"', '&#34;')
+        )
+
+    def soft_unicode(s):
+        """Make a string unicode if it isn't already.  That way a markup
+        string is not converted back to unicode.
+        """
+        if not isinstance(s, unicode):
+            s = unicode(s)
+        return s
+
+
+# partials
+try:
+    from functools import partial
+except ImportError:
+    class partial(object):
+        def __init__(self, _func, *args, **kwargs):
+            self._func = _func
+            self._args = args
+            self._kwargs = kwargs
+        def __call__(self, *args, **kwargs):
+            kwargs.update(self._kwargs)
+            return self._func(*(self._args + args), **kwargs)
diff --git a/scripts/jinja2/visitor.py b/scripts/jinja2/visitor.py
new file mode 100644 (file)
index 0000000..ad11108
--- /dev/null
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.visitor
+    ~~~~~~~~~~~~~~
+
+    This module implements a visitor for the nodes.
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: BSD.
+"""
+from jinja2.nodes import Node
+
+
+class NodeVisitor(object):
+    """Walks the abstract syntax tree and call visitor functions for every
+    node found.  The visitor functions may return values which will be
+    forwarded by the `visit` method.
+
+    Per default the visitor functions for the nodes are ``'visit_'`` +
+    class name of the node.  So a `TryFinally` node visit function would
+    be `visit_TryFinally`.  This behavior can be changed by overriding
+    the `get_visitor` function.  If no visitor function exists for a node
+    (return value `None`) the `generic_visit` visitor is used instead.
+    """
+
+    def get_visitor(self, node):
+        """Return the visitor function for this node or `None` if no visitor
+        exists for this node.  In that case the generic visit function is
+        used instead.
+        """
+        method = 'visit_' + node.__class__.__name__
+        return getattr(self, method, None)
+
+    def visit(self, node, *args, **kwargs):
+        """Visit a node."""
+        f = self.get_visitor(node)
+        if f is not None:
+            return f(node, *args, **kwargs)
+        return self.generic_visit(node, *args, **kwargs)
+
+    def generic_visit(self, node, *args, **kwargs):
+        """Called if no explicit visitor function exists for a node."""
+        for node in node.iter_child_nodes():
+            self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+    """Walks the abstract syntax tree and allows modifications of nodes.
+
+    The `NodeTransformer` will walk the AST and use the return value of the
+    visitor functions to replace or remove the old node.  If the return
+    value of the visitor function is `None` the node will be removed
+    from the previous location otherwise it's replaced with the return
+    value.  The return value may be the original node in which case no
+    replacement takes place.
+    """
+
+    def generic_visit(self, node, *args, **kwargs):
+        for field, old_value in node.iter_fields():
+            if isinstance(old_value, list):
+                new_values = []
+                for value in old_value:
+                    if isinstance(value, Node):
+                        value = self.visit(value, *args, **kwargs)
+                        if value is None:
+                            continue
+                        elif not isinstance(value, Node):
+                            new_values.extend(value)
+                            continue
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, Node):
+                new_node = self.visit(old_value, *args, **kwargs)
+                if new_node is None:
+                    delattr(node, field)
+                else:
+                    setattr(node, field, new_node)
+        return node
+
+    def visit_list(self, node, *args, **kwargs):
+        """As transformers may return lists in some places this method
+        can be used to enforce a list as return value.
+        """
+        rv = self.visit(node, *args, **kwargs)
+        if not isinstance(rv, list):
+            rv = [rv]
+        return rv
index 81b1297..5998576 100644 (file)
@@ -45,7 +45,7 @@
                                AdditionalOptions="/WL"
                                Optimization="2"
                                InlineFunctionExpansion="1"
-                               AdditionalIncludeDirectories="../../obstack;../win32;../ir/adt;../ir/ana;../ir/common;../ir/debug;../ir/ident;../ir/ir;../ir/opt;../ir/stat;../ir/tr;../ir/tv;../ir/arch;../ir/lower;../ir/net;../ir/be;../ir/libcore;../include/libfirm;../include/libfirm/adt;../../ipd/include"
+                               AdditionalIncludeDirectories="../../obstack;../win32;../ir/adt;../ir/ana;../ir/common;../ir/debug;../ir/ident;../ir/ir;../ir/opt;../ir/stat;../ir/tr;../ir/tv;../ir/arch;../ir/lower;../ir/net;../ir/be;../ir/libcore;../include/libfirm;../include/libfirm/adt;../../ipd/include;../ir"
                                PreprocessorDefinitions="NDEBUG;WIN32;_LIB;HAVE_CONFIG_H;_CRT_SECURE_NO_DEPRECATE;inline=_inline"
                                StringPooling="true"
                                RuntimeLibrary="0"
                                        </FileConfiguration>
                                </File>
                        </Filter>
-                       <Filter
-                               Name="arch"
-                               >
-                               <File
-                                       RelativePath="..\ir\arch\archop.c"
-                                       >
-                                       <FileConfiguration
-                                               Name="Release|Win32"
-                                               >
-                                               <Tool
-                                                       Name="VCCLCompilerTool"
-                                                       AdditionalIncludeDirectories=""
-                                                       PreprocessorDefinitions=""
-                                               />
-                                       </FileConfiguration>
-                                       <FileConfiguration
-                                               Name="Debug|Win32"
-                                               >
-                                               <Tool
-                                                       Name="VCCLCompilerTool"
-                                                       AdditionalIncludeDirectories=""
-                                                       PreprocessorDefinitions=""
-                                               />
-                                       </FileConfiguration>
-                                       <FileConfiguration
-                                               Name="DebugJTEST|Win32"
-                                               >
-                                               <Tool
-                                                       Name="VCCLCompilerTool"
-                                                       AdditionalIncludeDirectories=""
-                                                       PreprocessorDefinitions=""
-                                               />
-                                       </FileConfiguration>
-                               </File>
-                               <File
-                                       RelativePath="..\ir\arch\Makefile.in"
-                                       >
-                               </File>
-                       </Filter>
                        <Filter
                                Name="be"
                                >
                                        RelativePath="..\ir\be\beilpsched.h"
                                        >
                                </File>
+                               <File
+                                       RelativePath="..\ir\be\beinfo.c"
+                                       >
+                               </File>
+                               <File
+                                       RelativePath="..\ir\be\beinfo.h"
+                                       >
+                               </File>
                                <File
                                        RelativePath="..\ir\be\beinsn.c"
                                        >
                                        RelativePath="..\ir\be\bespilloptions.h"
                                        >
                                </File>
-                               <File
-                                       RelativePath="..\ir\be\bespillremat.c"
-                                       >
-                               </File>
-                               <File
-                                       RelativePath="..\ir\be\bespillremat.h"
-                                       >
-                               </File>
                                <File
                                        RelativePath="..\ir\be\bespillslots.c"
                                        >
                        <Filter
                                Name="ir"
                                >
+                               <File
+                                       RelativePath="..\ir\ir\gen_ir_cons.c.inl"
+                                       >
+                               </File>
+                               <File
+                                       RelativePath="..\ir\ir\gen_irio_export.inl"
+                                       >
+                               </File>
+                               <File
+                                       RelativePath="..\ir\ir\gen_irio_import.inl"
+                                       >
+                               </File>
+                               <File
+                                       RelativePath="..\ir\ir\gen_irio_lex.inl"
+                                       >
+                               </File>
                                <File
                                        RelativePath="..\ir\ir\instrument.c"
                                        >
                                        RelativePath="..\ir\ir\instrument.h"
                                        >
                                </File>
+                               <File
+                                       RelativePath="..\ir\ir\ir_spec.pl"
+                                       >
+                                       <FileConfiguration
+                                               Name="Release|Win32"
+                                               >
+                                               <Tool
+                                                       Name="VCCustomBuildTool"
+                                                       Description="Translate IR-Spec: $(InputPath)"
+                                                       CommandLine="..\scripts\gen_ir.pl $(InputPath) ..\ir\ir"
+                                                       AdditionalDependencies="..\scripts\gen_ir.pl"
+                                                       Outputs="..\ir\ir\gen_ir_cons.c.inl"
+                                               />
+                                       </FileConfiguration>
+                                       <FileConfiguration
+                                               Name="Debug|Win32"
+                                               >
+                                               <Tool
+                                                       Name="VCCustomBuildTool"
+                                                       Description="Translate IR-Spec: $(InputPath)"
+                                                       CommandLine="..\scripts\gen_ir.pl $(InputPath) ..\ir\ir"
+                                                       AdditionalDependencies="..\scripts\gen_ir.pl"
+                                                       Outputs="..\ir\ir\gen_ir_cons.c.inl"
+                                               />
+                                       </FileConfiguration>
+                                       <FileConfiguration
+                                               Name="DebugJTEST|Win32"
+                                               >
+                                               <Tool
+                                                       Name="VCCustomBuildTool"
+                                                       Description="Translate IR-Spec: $(InputPath)"
+                                                       CommandLine="..\scripts\gen_ir.pl $(InputPath) ..\ir\ir"
+                                                       AdditionalDependencies="..\scripts\gen_ir.pl"
+                                                       Outputs="..\ir\ir\gen_ir_cons.c.inl"
+                                               />
+                                       </FileConfiguration>
+                               </File>
                                <File
                                        RelativePath="..\ir\ir\irarch.c"
                                        >
                                                />
                                        </FileConfiguration>
                                </File>
+                               <File
+                                       RelativePath="..\ir\ir\irio.c"
+                                       >
+                               </File>
                                <File
                                        RelativePath="..\ir\ir\irmode.c"
                                        >
                                        RelativePath="..\ir\opt\opt_inline_t.h"
                                        >
                                </File>
+                               <File
+                                       RelativePath="..\ir\opt\opt_ldst.c"
+                                       >
+                               </File>
                                <File
                                        RelativePath="..\ir\opt\opt_osr.c"
                                        >
                                        RelativePath="..\ir\stat\stat_dmp.h"
                                        >
                                </File>
+                               <File
+                                       RelativePath="..\ir\stat\stat_liveness.c"
+                                       >
+                               </File>
                                <File
                                        RelativePath="..\ir\stat\stat_timing.c"
                                        >
                                RelativePath="..\include\libfirm\irhooks.h"
                                >
                        </File>
+                       <File
+                               RelativePath="..\include\libfirm\irio.h"
+                               >
+                       </File>
                        <File
                                RelativePath="..\include\libfirm\irlivechk.h"
                                >
                                </File>
                        </Filter>
                </Filter>
+               <Filter
+                       Name="scripts"
+                       >
+                       <File
+                               RelativePath="..\scripts\gen_ir.pl"
+                               >
+                       </File>
+                       <File
+                               RelativePath="..\scripts\gen_ir_io.py"
+                               >
+                               <FileConfiguration
+                                       Name="Release|Win32"
+                                       >
+                                       <Tool
+                                               Name="VCCustomBuildTool"
+                                               Description="Generating I/O code: $(InputPath)"
+                                               CommandLine="$(InputPath) ..\ir\ir"
+                                               AdditionalDependencies="ir_spec.py"
+                                               Outputs="..\ir\ir\gen_irio_import.inl;..\ir\ir\gen_irio_export.inl;..\ir\ir\gen_irio_lex.inl"
+                                       />
+                               </FileConfiguration>
+                               <FileConfiguration
+                                       Name="Debug|Win32"
+                                       >
+                                       <Tool
+                                               Name="VCCustomBuildTool"
+                                               Description="Generating I/O code: $(InputPath)"
+                                               CommandLine="$(InputPath) ..\ir\ir"
+                                               AdditionalDependencies="ir_spec.py"
+                                               Outputs="..\ir\ir\gen_irio_import.inl;..\ir\ir\gen_irio_export.inl;..\ir\ir\gen_irio_lex.inl"
+                                       />
+                               </FileConfiguration>
+                               <FileConfiguration
+                                       Name="DebugJTEST|Win32"
+                                       >
+                                       <Tool
+                                               Name="VCCustomBuildTool"
+                                               Description="Generating I/O code: $(InputPath)"
+                                               CommandLine="$(InputPath) ..\ir\ir"
+                                               AdditionalDependencies="ir_spec.py"
+                                               Outputs="..\ir\ir\gen_irio_import.inl;..\ir\ir\gen_irio_export.inl;..\ir\ir\gen_irio_lex.inl"
+                                       />
+                               </FileConfiguration>
+                       </File>
+                       <File
+                               RelativePath="..\scripts\ir_spec.py"
+                               >
+                       </File>
+               </Filter>
        </Files>
        <Globals>
        </Globals>