First commit

This commit is contained in:
Pierre Hubert
2016-11-19 12:08:12 +01:00
commit 990540b2b9
4706 changed files with 931207 additions and 0 deletions

View File

@@ -0,0 +1,171 @@
# Quick notes about binary CMap format (bcmap)
The format is designed to package some information from the CMap files located at external/cmap. Please notice for size optimization reasons, the original information blocks can be changed (split or joined) and items in the blocks can be swaped.
The data stored in binary format in network byte order (big-endian).
# Data primitives
The following primitives used during encoding of the file:
- byte (B) a byte, bits are numbered from 0 (less significant) to 7 (most significant)
- bytes block (B[n]) a sequence of n bytes
- unsigned number (UN) the number is encoded as sequence of bytes, bit 7 is flag to continue decoding the byte, bits 6-0 store number information, e.g. bytes 0x818407 will represent 16903 (0x4207). Limited to the 32 bit.
- signed number (SN) the number is encoded as sequence of bytes, as UN, however shall be transformed before encoding: if n < 0, the n shall be encoded as (-2*n-1) using UN encoding, other n shall be encoded as (2*n) using UN encoding. So the lowest bit of the number indicates the sign of the initial number
- unsigned fixed number (UB[n]) similar to the UN, but it represents an unsigned number that is stored in B[n]
- signed fixed number (SB[n]) similar to the SN, but it represents a signed number that is stored in B[n]
- string (S) the string is encoded as sequence of bytes. First comes length is characters encoded as UN, when UTF16 characters encoded as UN.
# File structure
The first byte is a header:
- bits 2-1 indicate a CMapType. Valid values are 1 and 2
- bit 0 indicate WMode. Valid values are 0 and 1.
Then records follow. The records starts from the record header encoded as B, where bits 7-5 indicate record type (see description of other bits below):
- 0 codespacerange
- 1 notdefrange
- 2 cidchar
- 3 cidrange
- 4 bfchar
- 5 bfrange
- 6 reserved
- 7 metadata
## Metadata record
The metadata record header bit 4-0 contain id of the metadata:
- 0 comment, body of the record is encoded comment string (S)
- 1 UseCMap, body of the record is usecmap id string (S)
## Data records
The records that have types 0 5, have the following fields in the header:
- bit 4 indicate the char or start/end entries are stored in a sequence in this block
- bits 3-0 contain length of the data size minus 1 in this block (dataSize)
The amount of entries encoded as UN follows the header. The items records follow (see below).
### codespacerange (0)
Represents the following CMap block:
n begincodespacerange
<start> <end>
endcodespacerange
First record format is:
- start as B[dataSize]
- endDelta as UB[dataSize], end is calculated as (start + endDelta)
Next record format is:
- startDelta as UB[dataSize], start = end + startDelta
- endDelta as UB[dataSize], end = start + endDelta
### notdefrange (1)
Represents the following CMap block:
n beginnotdefrange
<start> <end> code
endnotdefrange
First record format is:
- start as B[dataSize]
- endDelta as UB[dataSize], end is calculated as (start + endDelta)
- code as UN
Next record format is:
- startDelta as UB[dataSize], start = end + startDelta
- endDelta as UB[dataSize], end = start + endDelta
- code as UN
### cidchar (2)
Represents the following CMap block:
n begincidchar
<char> code
endcidchar
First record format is:
- char as B[dataSize]
- code as UN
Next record format is:
- if sequence = 0, charDelta as UB[dataSize], char = char + charDelta + 1
- if sequence = 1, char = char + 1
- codeDelta as SN, code = code + codeDelta
### cidrange (3)
Represents the following CMap block:
n begincidrange
<start> <end> code
endcidrange
First record format is:
- start as B[dataSize]
- endDelta as UN[dataSize], end is calculated as (start + endDelta)
- code as UN
Next record format is:
- if sequence = 0, startDelta as UB[dataSize], start = end + startDelta + 1
- if sequence = 1, start = end + 1
- endDelta as UN[dataSize], end = start + endDelta
- code as UN
### bfchar (4)
Represents the following CMap block:
n beginbfchar
<char> <code>
endbfchar
First record format is:
- char as B[ucs2Size], where ucs2Size = 2 (here and below)
- code as B[dataSize]
Next record format is:
- if sequence = 0, charDelta as UN[ucs2Size], char = charDelta + charDelta + 1
- if sequence = 1, char = char + 1
- codeDelta as SB[dataSize], code = code + codeDelta
### bfrange (5)
Represents the following CMap block:
n beginbfrange
<start> <end> <code>
endbfrange
First record format is:
- start as B[ucs2Size]
- endDelta as UB[ucs2Size], end is calculated as (start + endDelta)
- code as B[dataSize]
Next record format is:
- if sequence = 0, startDelta as UB[ucs2Size], start = end + startDelta + 1
- if sequence = 1, start = end + 1
- endDelta as UB[ucs2Size], end = start + endDelta
- code as B[dataSize]

View File

@@ -0,0 +1,437 @@
/* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
var fs = require('fs');
var path = require('path');
var parseAdobeCMap = require('./parse.js').parseAdobeCMap;
var optimizeCMap = require('./optimize.js').optimizeCMap;
function compressCmap(srcPath, destPath, verify) {
var content = fs.readFileSync(srcPath).toString();
var inputData = parseAdobeCMap(content);
optimizeCMap(inputData);
var out = writeByte((inputData.type << 1) | inputData.wmode);
if (inputData.comment) {
out += writeByte(0xE0) + writeString(inputData.comment);
}
if (inputData.usecmap) {
out += writeByte(0xE1) + writeString(inputData.usecmap);
}
var i = 0;
while (i < inputData.body.length) {
var item = inputData.body[i++], subitems = item.items;
var first = item.items[0];
var sequence = item.sequence === true;
var flags = (item.type << 5) | (sequence ? 0x10 : 0);
var nextStart, nextCode;
switch (item.type) {
case 0:
out += writeByte(flags | getHexSize(first.start)) + writeNumber(subitems.length);
out += first.start + writeNumber(subHex(first.end, first.start));
nextStart = incHex(first.end);
for (var j = 1; j < subitems.length; j++) {
out += writeNumber(subHex(subitems[j].start, nextStart)) +
writeNumber(subHex(subitems[j].end, subitems[j].start));
nextStart = incHex(subitems[j].end);
}
break;
case 1:
out += writeByte(flags | getHexSize(first.start)) + writeNumber(subitems.length);
out += first.start + writeNumber(subHex(first.end, first.start)) + writeNumber(first.code);
nextStart = incHex(first.end);
for (var j = 1; j < subitems.length; j++) {
out += writeNumber(subHex(subitems[j].start, nextStart)) +
writeNumber(subHex(subitems[j].end, subitems[j].start)) +
writeNumber(subitems[j].code);
nextStart = incHex(subitems[j].end);
}
break;
case 2:
out += writeByte(flags | getHexSize(first.char)) + writeNumber(subitems.length);
out += first.char + writeNumber(first.code);
nextStart = incHex(first.char);
nextCode = first.code + 1;
for (var j = 1; j < subitems.length; j++) {
out += (sequence ? '' : writeNumber(subHex(subitems[j].char, nextStart))) +
writeSigned(subitems[j].code - nextCode);
nextStart = incHex(subitems[j].char);
nextCode = item.items[j].code + 1;
}
break;
case 3:
out += writeByte(flags | getHexSize(first.start)) + writeNumber(subitems.length);
out += first.start + writeNumber(subHex(first.end, first.start)) + writeNumber(first.code);
nextStart = incHex(first.end);
for (var j = 1; j < subitems.length; j++) {
out += (sequence ? '' : writeNumber(subHex(subitems[j].start, nextStart))) +
writeNumber(subHex(subitems[j].end, subitems[j].start)) +
writeNumber(subitems[j].code);
nextStart = incHex(subitems[j].end);
}
break;
case 4:
out += writeByte(flags | getHexSize(first.code)) + writeNumber(subitems.length);
out += first.char + first.code;
nextStart = incHex(first.char);
nextCode = incHex(first.code);
for (var j = 1; j < subitems.length; j++) {
out += (sequence ? '' : writeNumber(subHex(subitems[j].char, nextStart))) +
writeSigned(subHex(subitems[j].code, nextCode));
nextStart = incHex(subitems[j].char);
nextCode = incHex(subitems[j].code);
}
break;
case 5:
out += writeByte(flags | getHexSize(first.code)) + writeNumber(subitems.length);
out += first.start + writeNumber(subHex(first.end, first.start)) + first.code;
nextStart = incHex(first.end);
for (var j = 1; j < subitems.length; j++) {
out += (sequence ? '' : writeNumber(subHex(subitems[j].start, nextStart))) +
writeNumber(subHex(subitems[j].end, subitems[j].start)) +
subitems[j].code;
nextStart = incHex(subitems[j].end);
}
break;
}
}
fs.writeFileSync(destPath, new Buffer(out, 'hex'));
if (verify) {
var result2 = parseCMap(out);
var isGood = JSON.stringify(inputData) == JSON.stringify(result2);
if (!isGood) {
throw new Error('Extracted data does not match the expected result');
}
}
return {
orig: fs.statSync(srcPath).size,
packed: out.length >> 1
};
}
function parseCMap(binaryData) {
var reader = {
buffer: binaryData,
pos: 0,
end: binaryData.length,
readByte: function () {
if (this.pos >= this.end) {
return -1;
}
var d1 = fromHexDigit(this.buffer[this.pos]);
var d2 = fromHexDigit(this.buffer[this.pos + 1]);
this.pos += 2;
return (d1 << 4) | d2;
},
readNumber: function () {
var n = 0;
var last;
do {
var b = this.readByte();
last = !(b & 0x80);
n = (n << 7) | (b & 0x7F);
} while (!last);
return n;
},
readSigned: function () {
var n = this.readNumber();
return (n & 1) ? -(n >>> 1) - 1 : n >>> 1;
},
readHex: function (size) {
var lengthInChars = (size + 1) << 1;
var s = this.buffer.substr(this.pos, lengthInChars);
this.pos += lengthInChars;
return s;
},
readHexNumber: function (size) {
var lengthInChars = (size + 1) << 1;
var stack = [];
do {
var b = this.readByte();
last = !(b & 0x80);
stack.push(b & 0x7F);
} while (!last);
var s = '', buffer = 0, bufferSize = 0;
while (s.length < lengthInChars) {
while (bufferSize < 4 && stack.length > 0) {
buffer = (stack.pop() << bufferSize) | buffer;
bufferSize += 7;
}
s = toHexDigit(buffer & 15) + s;
buffer >>= 4;
bufferSize -= 4;
}
return s;
},
readHexSigned: function (size) {
var num = this.readHexNumber(size);
var sign = fromHexDigit(num[num.length - 1]) & 1 ? 15 : 0;
var c = 0;
var result = '';
for (var i = 0; i < num.length; i++) {
c = (c << 4) | fromHexDigit(num[i]);
result += toHexDigit(sign ? (c >> 1) ^ sign : (c >> 1));
c &= 1;
}
return result;
},
readString: function () {
var len = this.readNumber();
var s = '';
for (var i = 0; i < len; i++) {
s += String.fromCharCode(this.readNumber());
}
return s;
}
};
var header = reader.readByte();
var result = {
type: header >> 1,
wmode: header & 1,
comment: null,
usecmap: null,
body: []
};
var b;
while ((b = reader.readByte()) >= 0) {
var type = b >> 5;
if (type === 7) {
switch (b & 0x1F) {
case 0:
result.comment = reader.readString();
break;
case 1:
result.usecmap = reader.readString();
break;
}
continue;
}
var sequence = !!(b & 0x10);
var dataSize = b & 15;
var subitems = [];
var item = {
type: type,
items: subitems
};
if (sequence) {
item.sequence = true;
}
var ucs2DataSize = 1;
var subitemsCount = reader.readNumber();
var start, end, code, char;
switch (type) {
case 0:
start = reader.readHex(dataSize);
end = addHex(reader.readHexNumber(dataSize), start);
subitems.push({start: start, end: end});
for (var i = 1; i < subitemsCount; i++) {
start = addHex(reader.readHexNumber(dataSize), incHex(end));
end = addHex(reader.readHexNumber(dataSize), start);
subitems.push({start: start, end: end});
}
break;
case 1:
start = reader.readHex(dataSize);
end = addHex(reader.readHexNumber(dataSize), start);
code = reader.readNumber();
subitems.push({start: start, end: end, code: code});
for (var i = 1; i < subitemsCount; i++) {
start = addHex(reader.readHexNumber(dataSize), incHex(end));
end = addHex(reader.readHexNumber(dataSize), start);
code = reader.readNumber();
subitems.push({start: start, end: end, code: code});
}
break;
case 2:
char = reader.readHex(dataSize);
code = reader.readNumber();
subitems.push({char: char, code: code});
for (var i = 1; i < subitemsCount; i++) {
char = sequence ? incHex(char) : addHex(reader.readHexNumber(dataSize), incHex(char));
code = reader.readSigned() + (code + 1);
subitems.push({char: char, code: code});
}
break;
case 3:
start = reader.readHex(dataSize);
end = addHex(reader.readHexNumber(dataSize), start);
code = reader.readNumber();
subitems.push({start: start, end: end, code: code});
for (var i = 1; i < subitemsCount; i++) {
start = sequence ? incHex(end) : addHex(reader.readHexNumber(dataSize), incHex(end));
end = addHex(reader.readHexNumber(dataSize), start);
code = reader.readNumber();
subitems.push({start: start, end: end, code: code});
}
break;
case 4:
char = reader.readHex(ucs2DataSize);
code = reader.readHex(dataSize);
subitems.push({char: char, code: code});
for (var i = 1; i < subitemsCount; i++) {
char = sequence ? incHex(char) : addHex(reader.readHexNumber(ucs2DataSize), incHex(char));
code = addHex(reader.readHexSigned(dataSize), incHex(code));
subitems.push({char: char, code: code});
}
break;
case 5:
start = reader.readHex(ucs2DataSize);
end = addHex(reader.readHexNumber(ucs2DataSize), start);
code = reader.readHex(dataSize);
subitems.push({start: start, end: end, code: code});
for (var i = 1; i < subitemsCount; i++) {
start = sequence ? incHex(end) : addHex(reader.readHexNumber(ucs2DataSize), incHex(end));
end = addHex(reader.readHexNumber(ucs2DataSize), start);
code = reader.readHex(dataSize);
subitems.push({start: start, end: end, code: code});
}
break;
default:
throw new Error('Unknown type: ' + type)
}
result.body.push(item);
}
return result;
}
function toHexDigit(n) {
return n.toString(16);
}
function fromHexDigit(s) {
return parseInt(s, 16);
}
function getHexSize(s) {
return (s.length >> 1) - 1;
}
function writeByte(b) {
return toHexDigit((b >> 4) & 15) + toHexDigit(b & 15);
}
function writeNumber(n) {
if (typeof n === 'string') {
var s = '', buffer = 0, bufferSize = 0;
var i = n.length;
while (i > 0) {
--i;
buffer = (fromHexDigit(n[i]) << bufferSize) | buffer;
bufferSize += 4;
if (bufferSize >= 7) {
s = writeByte((buffer & 0x7f) | (s.length > 0 ? 0x80 : 0)) + s;
buffer >>>= 7;
bufferSize -= 7;
}
}
if (buffer > 0) {
s = writeByte((buffer & 0x7f) | (s.length > 0 ? 0x80 : 0)) + s;
}
while (s.indexOf('80') === 0) {
s = s.substr(2);
}
return s;
} else {
var s = writeByte(n & 0x7F);
n >>>= 7;
while (n > 0) {
s = writeByte((n & 0x7F) | 0x80) + s;
n >>>= 7;
}
return s;
}
}
function writeSigned(n) {
if (typeof n === 'string') {
var t = '';
var c = fromHexDigit(n[0]);
var neg = c >= 8;
c = neg ? (c ^ 15) : c;
for (var i = 1; i < n.length; i++) {
var d = fromHexDigit(n[i]);
c = (c << 4) | (neg ? (d ^ 15) : d);
t += toHexDigit(c >> 3);
c = c & 7;
}
t += toHexDigit((c << 1) | (neg ? 1 : 0));
return writeNumber(t);
}
return n < 0 ? writeNumber(-2 * n - 1) : writeNumber(2 * n);
}
function writeString(s) {
var t = writeNumber(s.length);
for (var i = 0; i < s.length; i++) {
t += writeNumber(s.charCodeAt(i));
}
return t;
}
function addHex(a, b) {
var c = 0, s = '';
for (var i = a.length - 1; i >= 0; i--) {
c += fromHexDigit(a[i]) + fromHexDigit(b[i]);
if (c >= 16) {
s = toHexDigit(c - 16) + s;
c = 1;
} else {
s = toHexDigit(c) + s;
c = 0;
}
}
return s;
}
function subHex(a, b) {
var c = 0, s = '';
for (var i = a.length - 1; i >= 0; i--) {
c += fromHexDigit(a[i]) - fromHexDigit(b[i]);
if (c < 0) {
s = toHexDigit(c + 16) + s;
c = -1;
} else {
s = toHexDigit(c) + s;
c = 0;
}
}
return s;
}
function incHex(a) {
var c = 1, s = '';
for (var i = a.length - 1; i >= 0; i--) {
c += fromHexDigit(a[i]);
if (c >= 16) {
s = toHexDigit(c - 16) + s;
c = 1;
} else {
s = toHexDigit(c) + s;
c = 0;
}
}
return s;
}
exports.compressCmaps = function (src, dest, verify) {
var files = fs.readdirSync(src).filter(function (fn) {
return fn.indexOf('.') < 0; // skipping files with the extension
});
files.forEach(function (fn) {
var srcPath = path.join(src, fn);
var destPath = path.join(dest, fn + '.bcmap');
var stats = compressCmap(srcPath, destPath, verify);
console.log('Compressing ' + fn + ': ' + stats.orig + ' vs ' + stats.packed +
' ' + (stats.packed / stats.orig * 100).toFixed(1) + '%');
});
};

View File

@@ -0,0 +1,211 @@
/* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
exports.optimizeCMap = function (data) {
var i = 1;
while (i < data.body.length) {
if (data.body[i - 1].type === data.body[i].type) {
data.body[i - 1].items = data.body[i - 1].items.concat(data.body[i].items);
data.body.splice(i, 1);
} else {
i++;
}
}
// split into groups with different lengths
var i = 0;
while (i < data.body.length) {
var item = data.body[i];
var keys = Object.keys(item.items[0]).filter(function (i) {
return typeof item.items[0][i] === 'string';
});
var j = 1;
while (j < item.items.length) {
var different = false;
for (var q = 0; q < keys.length && !different; q++) {
different = item.items[j - 1][keys[q]].length !== item.items[j][keys[q]].length;
}
if (different) {
break;
}
j++;
}
if (j < item.items.length) {
data.body.splice(i + 1, 0, {
type: item.type,
items: item.items.splice(j, item.items.length - j)
});
}
i++;
}
// find sequences of single char ranges
var i = 0;
while (i < data.body.length) {
var item = data.body[i];
if (item.type === 3 || item.type === 5) {
var j = 0;
while (j < item.items.length) {
var q = j;
while (j < item.items.length && item.items[j].start === item.items[j].end) {
j++;
}
if ((j - q) >= 9) {
if (j < item.items.length) {
data.body.splice(i + 1, 0, {
type: item.type,
items: item.items.splice(j, item.items.length - j)
});
}
if (q > 0) {
data.body.splice(i + 1, 0, {
type: item.type - 1,
items: item.items.splice(q, j - q).map(function (i) {
return {char: i.start, code: i.code };
})
});
i++;
} else {
item.type -= 1;
item.items = item.items.map(function (i) {
return {char: i.start, code: i.code };
});
}
continue;
}
j++;
}
}
i++;
}
// find sequences of increasing code/ranges order
var i = 0;
while (i < data.body.length) {
var item = data.body[i];
if (item.type >= 2 && item.type <= 5) {
var j = 1;
var startProp = item.type === 2 || item.type === 4 ? 'char' : 'start';
var endProp = item.type === 2 || item.type === 4 ? 'char' : 'end';
while (j < item.items.length) {
var q = j - 1;
while (j < item.items.length && incHex(item.items[j - 1][endProp]) === item.items[j][startProp]) {
j++;
}
if ((j - q) >= 9) {
if (j < item.items.length) {
data.body.splice(i + 1, 0, {
type: item.type,
items: item.items.splice(j, item.items.length - j)
});
}
if (q > 0) {
data.body.splice(i + 1, 0, {
type: item.type,
items: item.items.splice(q, j - q),
sequence: true
});
i++;
} else {
item.sequence = true;
}
continue;
}
j++;
}
}
i++;
}
// split non-sequences two groups where codes are close
var i = 0;
while (i < data.body.length) {
var item = data.body[i];
if (!item.sequence && (item.type === 2 || item.type === 3)) {
var subitems = item.items;
var codes = subitems.map(function (i) {
return i.code;
});
codes.sort(function (a, b) {
return a - b;
});
var maxDistance = 100, minItems = 10, itemsPerBucket = 50;
if (subitems.length > minItems && codes[codes.length - 1] - codes[0] > maxDistance) {
var gapsCount = Math.max(2, (subitems.length / itemsPerBucket) | 0);
var gaps = [];
for (var q = 0; q < gapsCount; q++) {
gaps.push({length: 0});
}
for (var j = 1; j < codes.length; j++) {
var gapLength = codes[j] - codes[j - 1];
var q = 0;
while (q < gaps.length && gaps[q].length > gapLength) {
q++;
}
if (q >= gaps.length) {
continue;
}
var q0 = q;
while (q < gaps.length) {
if (gaps[q].length < gaps[q0].length) {
q0 = q;
}
q++;
}
gaps[q0] = {length: gapLength, boundary: codes[j]};
}
var groups = gaps.filter(function (g) {
return g.length >= maxDistance;
}).map(function (g) {
return g.boundary;
});
groups.sort(function (a, b) {
return a - b;
});
if (groups.length > 1) {
var buckets = [item.items = []];
for (var j = 0; j < groups.length; j++) {
var newItem = {type: item.type, items: []}
buckets.push(newItem.items);
i++;
data.body.splice(i, 0, newItem);
}
for (var j = 0; j < subitems.length; j++) {
var code = subitems[j].code;
var q = 0;
while (q < groups.length && groups[q] <= code) {
q++;
}
buckets[q].push(subitems[j]);
}
}
}
}
i++;
}
};
function incHex(a) {
var c = 1, s = '';
for (var i = a.length - 1; i >= 0; i--) {
c += parseInt(a[i], 16);
if (c >= 16) {
s = '0' + s;
c = 1;
} else {
s = c.toString(16) + s;
c = 0;
}
}
return s;
}

View File

@@ -0,0 +1,101 @@
/* Copyright 2014 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
exports.parseAdobeCMap = function (content) {
var m = /(\bbegincmap\b[\s\S]*?)\bendcmap\b/.exec(content);
if (!m) {
throw new Error('cmap was not found');
}
var body = m[1].replace(/\r\n?/g, '\n');
var result = {
type: 1,
wmode: 0,
comment: 'Copyright 1990-2009 Adobe Systems Incorporated.\nAll rights reserved.\nSee ./LICENSE',
usecmap: null,
body: []
};
m = /\/CMapType\s+(\d+)+\s+def\b/.exec(body);
result.type = +m[1];
m = /\/WMode\s+(\d+)+\s+def\b/.exec(body);
result.wmode = +m[1];
m = /\/([\w\-]+)\s+usecmap\b/.exec(body);
if (m) {
result.usecmap = m[1];
}
var re = /(\d+)\s+(begincodespacerange|beginnotdefrange|begincidchar|begincidrange|beginbfchar|beginbfrange)\n([\s\S]*?)\n(endcodespacerange|endnotdefrange|endcidchar|endcidrange|endbfchar|endbfrange)/g;
while (m = re.exec(body)) {
var lines = m[3].toLowerCase().split('\n');
var m2;
switch (m[2]) {
case 'begincodespacerange':
result.body.push({
type: 0,
items: lines.map(function (line) {
var m = /<(\w+)>\s+<(\w+)>/.exec(line);
return {start: m[1], end: m[2]};
})
});
break;
case 'beginnotdefrange':
result.body.push({
type: 1,
items: lines.map(function (line) {
var m = /<(\w+)>\s+<(\w+)>\s+(\d+)/.exec(line);
return {start: m[1], end: m[2], code: +m[3]};
})
});
break;
case 'begincidchar':
result.body.push({
type: 2,
items: lines.map(function (line) {
var m = /<(\w+)>\s+(\d+)/.exec(line);
return {char: m[1], code: +m[2]};
})
});
break;
case 'begincidrange':
result.body.push({
type: 3,
items: lines.map(function (line) {
var m = /<(\w+)>\s+<(\w+)>\s+(\d+)/.exec(line);
return {start: m[1], end: m[2], code: +m[3]};
})
});
break;
case 'beginbfchar':
result.body.push({
type: 4,
items: lines.map(function (line) {
var m = /<(\w+)>\s+<(\w+)>/.exec(line);
return {char: m[1], code: m[2]};
})
});
break;
case 'beginbfrange':
result.body.push({
type: 5,
items: lines.map(function (line) {
var m = /<(\w+)>\s+<(\w+)>\s+<(\w+)>/.exec(line);
return {start: m[1], end: m[2], code: m[3]};
})
});
break;
}
}
return result;
};